diff options
author | 2014-07-03 21:46:10 +0000 | |
---|---|---|
committer | 2014-07-02 20:49:53 +0000 | |
commit | ca8ff32bbb1f034b3b1f25de1fe20a9015bc87ec (patch) | |
tree | b8758c0d0a0ecd2f902a53a0fbb1b5014a153c6f | |
parent | 3ee86bcbbc29f17b0243954a52dcda96b09411e0 (diff) | |
parent | a77ee5103532abb197f492c14a9e6fb437054e2a (diff) |
Merge "x86_64: TargetReg update for x86"
-rw-r--r-- | compiler/dex/quick/arm64/codegen_arm64.h | 4 | ||||
-rw-r--r-- | compiler/dex/quick/codegen_util.cc | 6 | ||||
-rw-r--r-- | compiler/dex/quick/gen_common.cc | 117 | ||||
-rw-r--r-- | compiler/dex/quick/gen_invoke.cc | 247 | ||||
-rw-r--r-- | compiler/dex/quick/gen_loadstore.cc | 18 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.cc | 18 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 18 | ||||
-rw-r--r-- | compiler/dex/quick/ralloc_util.cc | 8 | ||||
-rw-r--r-- | compiler/dex/quick/x86/call_x86.cc | 29 | ||||
-rw-r--r-- | compiler/dex/quick/x86/codegen_x86.h | 58 | ||||
-rw-r--r-- | compiler/dex/quick/x86/fp_x86.cc | 28 | ||||
-rw-r--r-- | compiler/dex/quick/x86/int_x86.cc | 92 | ||||
-rw-r--r-- | compiler/dex/quick/x86/target_x86.cc | 99 | ||||
-rw-r--r-- | compiler/dex/quick/x86/x86_lir.h | 6 |
14 files changed, 402 insertions, 346 deletions
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index 7db6ab69f2..8692c6c678 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -110,6 +110,10 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { RegStorage reg = TargetReg(symbolic_reg); return (reg.Is64Bit() ? reg : As64BitReg(reg)); } + RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE { + RegStorage reg = TargetReg(symbolic_reg); + return (reg.Is64Bit() ? reg : As64BitReg(reg)); + } RegStorage GetArgMappingToPhysicalReg(int arg_num); RegLocation GetReturnAlt(); RegLocation GetReturnWideAlt(); diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 5ba0d3f5e4..5870d22208 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1184,8 +1184,8 @@ void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType t // resolve these invokes to the same method, so we don't care which one we record here. data_target->operands[2] = type; } - // TODO: This is actually a pointer, not a reference. - LIR* load_pc_rel = OpPcRelLoad(TargetRefReg(symbolic_reg), data_target); + // Loads a code pointer. Code from oat file can be mapped anywhere. + LIR* load_pc_rel = OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target); AppendLIR(load_pc_rel); DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); } @@ -1201,6 +1201,7 @@ void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType // resolve these invokes to the same method, so we don't care which one we record here. data_target->operands[2] = type; } + // Loads an ArtMethod pointer, which is a reference as it lives in the heap. LIR* load_pc_rel = OpPcRelLoad(TargetRefReg(symbolic_reg), data_target); AppendLIR(load_pc_rel); DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); @@ -1212,6 +1213,7 @@ void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_re if (data_target == nullptr) { data_target = AddWordData(&class_literal_list_, type_idx); } + // Loads a Class pointer, which is a reference as it lives in the heap. LIR* load_pc_rel = OpPcRelLoad(TargetRefReg(symbolic_reg), data_target); AppendLIR(load_pc_rel); } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index fe905623b2..dafefea0a3 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -368,7 +368,7 @@ static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, if (!use_direct_type_ptr) { mir_to_lir->LoadClassType(type_idx, kArg0); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); - mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0), + mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0, false), rl_src, true); } else { // Use the direct pointer. @@ -431,8 +431,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { } else { GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); } - FreeTemp(TargetReg(kArg2)); - FreeTemp(TargetReg(kArg1)); + FreeTemp(TargetReg(kArg2, false)); + FreeTemp(TargetReg(kArg1, false)); /* * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the * return region. Because AllocFromCode placed the new array @@ -440,7 +440,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { * added, it may be necessary to additionally copy all return * values to a home location in thread-local storage */ - LockTemp(TargetReg(kRet0)); + RegStorage ref_reg = TargetRefReg(kRet0); + LockTemp(ref_reg); // TODO: use the correct component size, currently all supported types // share array alignment with ints (see comment at head of function) @@ -460,7 +461,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { RegLocation loc = UpdateLoc(info->args[i]); if (loc.location == kLocPhysReg) { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); + Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); } } /* @@ -480,7 +481,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { break; case kX86: case kX86_64: - FreeTemp(TargetReg(kRet0)); + FreeTemp(ref_reg); r_val = AllocTemp(); break; case kMips: @@ -490,9 +491,9 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { } // Set up source pointer RegLocation rl_first = info->args[0]; - OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); + OpRegRegImm(kOpAdd, r_src, TargetPtrReg(kSp), SRegOffset(rl_first.s_reg_low)); // Set up the target pointer - OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), + OpRegRegImm(kOpAdd, r_dst, ref_reg, mirror::Array::DataOffset(component_size).Int32Value()); // Set up the loop counter (known to be > 0) LoadConstant(r_idx, elems - 1); @@ -510,14 +511,14 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { OpDecAndBranch(kCondGe, r_idx, target); if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { // Restore the target pointer - OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, + OpRegRegImm(kOpAdd, ref_reg, r_dst, -mirror::Array::DataOffset(component_size).Int32Value()); } } else if (!info->is_range) { // TUNING: interleave for (int i = 0; i < elems; i++) { RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); - Store32Disp(TargetReg(kRet0), + Store32Disp(ref_reg, mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); // If the LoadValue caused a temp to be allocated, free it if (IsTemp(rl_arg.reg)) { @@ -552,7 +553,7 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { storage_index_, true); } // Copy helper's result into r_base, a no-op on all but MIPS. - m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); + m2l_->OpRegCopy(r_base_, m2l_->TargetRefReg(kRet0)); m2l_->OpUnconditionalBranch(cont_); } @@ -617,7 +618,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, // The slow path is invoked if the r_base is NULL or the class pointed // to by it is not initialized. LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); - RegStorage r_tmp = TargetReg(kArg2); + RegStorage r_tmp = TargetReg(kArg2, false); LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), @@ -693,10 +694,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, // May do runtime call so everything to home locations. FlushAllRegs(); // Using fixed register to sync with possible call to runtime support. - RegStorage r_method = TargetReg(kArg1); + RegStorage r_method = TargetRefReg(kArg1); LockTemp(r_method); LoadCurrMethodDirect(r_method); - r_base = TargetReg(kArg0); + r_base = TargetRefReg(kArg0); LockTemp(r_base); LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, kNotVolatile); @@ -710,7 +711,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, // The slow path is invoked if the r_base is NULL or the class pointed // to by it is not initialized. LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); - RegStorage r_tmp = TargetReg(kArg2); + RegStorage r_tmp = TargetReg(kArg2, false); LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), @@ -954,7 +955,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, rl_method_.reg, true); } - m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); + m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetRefReg(kRet0)); m2l_->OpUnconditionalBranch(cont_); } @@ -1071,10 +1072,10 @@ static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_ mir_to_lir->LoadClassType(type_idx, kArg0); if (!is_type_initialized) { func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); - mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); + mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); } else { func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); - mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0), true); + mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); } } else { // Use the direct pointer. @@ -1122,9 +1123,6 @@ void Mir2Lir::GenThrow(RegLocation rl_src) { } } -#define IsSameReg(r1, r2) \ - (GetRegInfo(r1)->Master()->GetReg().GetReg() == GetRegInfo(r2)->Master()->GetReg().GetReg()) - // For final classes there are no sub-classes to check and so we can answer the instance-of // question with simple comparisons. void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, @@ -1209,15 +1207,15 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref + OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path + LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref } else if (use_declaring_class) { - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { // Load dex cache entry into class_reg (kArg2) - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); @@ -1233,7 +1231,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); } OpRegCopy(TargetRefReg(kArg2), TargetRefReg(kRet0)); // Align usage with fast path - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ + LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); /* reload Ref */ // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); hop_branch->target = hop_target; @@ -1245,7 +1243,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know // On MIPS rArg0 != rl_result, place false in result if branch is taken. LoadConstant(rl_result.reg, 0); } - LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); + LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, NULL); /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); @@ -1256,14 +1254,14 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (type_known_final) { // rl_result == ref == null == 0. if (cu_->instruction_set == kThumb2) { - OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? + OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? LIR* it = OpIT(kCondEq, "E"); // if-convert the test LoadConstant(rl_result.reg, 1); // .eq case - load true LoadConstant(rl_result.reg, 0); // .ne case - load false OpEndIT(it); } else { LoadConstant(rl_result.reg, 0); // ne case - load false - branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); + branchover = OpCmpBranch(kCondNe, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); LoadConstant(rl_result.reg, 1); // eq case - load true } } else { @@ -1274,11 +1272,11 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LIR* it = nullptr; if (!type_known_abstract) { /* Uses conditional nullification */ - OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? + OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? it = OpIT(kCondEq, "EE"); // if-convert the test - LoadConstant(TargetReg(kArg0), 1); // .eq case - load true + LoadConstant(TargetReg(kArg0, false), 1); // .eq case - load true } - OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class + OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) if (it != nullptr) { OpEndIT(it); @@ -1288,12 +1286,12 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (!type_known_abstract) { /* Uses branchovers */ LoadConstant(rl_result.reg, 1); // assume true - branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); + branchover = OpCmpBranch(kCondEq, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); } RegStorage r_tgt = cu_->target64 ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); - OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class + OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) FreeTemp(r_tgt); } @@ -1424,15 +1422,15 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ GenerateTargetLabel(); if (load_) { - m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), - m2l_->TargetReg(kArg1), kNotVolatile); + m2l_->LoadRefDisp(m2l_->TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), + m2l_->TargetRefReg(kArg1), kNotVolatile); } if (m2l_->cu_->target64) { - m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2), - m2l_->TargetReg(kArg1), true); + m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetRefReg(kArg2), + m2l_->TargetRefReg(kArg1), true); } else { - m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), - m2l_->TargetReg(kArg1), true); + m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetRefReg(kArg2), + m2l_->TargetRefReg(kArg1), true); } m2l_->OpUnconditionalBranch(cont_); @@ -1444,7 +1442,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ if (type_known_abstract) { // Easier case, run slow path if target is non-null (slow path will load from target) - LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, nullptr); + LIR* branch = OpCmpImmBranch(kCondNe, TargetRefReg(kArg0), 0, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); } else { @@ -1453,7 +1451,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // slow path if the classes are not equal. /* Null is OK - continue */ - LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, nullptr); + LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, nullptr); /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); LoadRefDisp(TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), @@ -1675,13 +1673,13 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, // If we haven't already generated the code use the callout function. if (!done) { FlushAllRegs(); /* Send everything to home location */ - LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); + LoadValueDirectFixed(rl_src2, TargetReg(kArg1, false)); RegStorage r_tgt = cu_->target64 ? CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); - LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); + LoadValueDirectFixed(rl_src1, TargetReg(kArg0, false)); if (check_zero) { - GenDivZeroCheck(TargetReg(kArg1)); + GenDivZeroCheck(TargetReg(kArg1, false)); } // NOTE: callout here is not a safepoint. if (cu_->target64) { @@ -1945,13 +1943,13 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re if (!done) { FlushAllRegs(); /* Everything to home location. */ - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); - Clobber(TargetReg(kArg0)); + LoadValueDirectFixed(rl_src, TargetReg(kArg0, false)); + Clobber(TargetReg(kArg0, false)); if (cu_->target64) { - CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0), lit, + CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, false), lit, false); } else { - CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0), lit, + CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, false), lit, false); } if (is_div) @@ -1985,7 +1983,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc bool call_out = false; bool check_zero = false; ThreadOffset<pointer_size> func_offset(-1); - int ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); + int ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); switch (opcode) { case Instruction::NOT_LONG: @@ -2033,7 +2031,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc return; } else { call_out = true; - ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); + ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); } break; @@ -2045,7 +2043,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc } call_out = true; check_zero = true; - ret_reg = mir_to_lir->TargetReg(kRet0).GetReg(); + ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); break; case Instruction::REM_LONG: @@ -2058,8 +2056,8 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc check_zero = true; func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ - ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2).GetReg() : - mir_to_lir->TargetReg(kRet0).GetReg(); + ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, false).GetReg() : + mir_to_lir->TargetReg(kRet0, false).GetReg(); break; case Instruction::AND_LONG_2ADDR: case Instruction::AND_LONG: @@ -2102,14 +2100,11 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc } else { mir_to_lir->FlushAllRegs(); /* Send everything to home location */ if (check_zero) { - RegStorage r_tmp1 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg0), - mir_to_lir->TargetReg(kArg1)); - RegStorage r_tmp2 = RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), - mir_to_lir->TargetReg(kArg3)); + RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kArg1); + RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kArg3); mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); - mir_to_lir->GenDivZeroCheckWide(RegStorage::MakeRegPair(mir_to_lir->TargetReg(kArg2), - mir_to_lir->TargetReg(kArg3))); + mir_to_lir->GenDivZeroCheckWide(mir_to_lir->TargetReg(kArg2, kArg3)); mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); // NOTE: callout here is not a safepoint mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); @@ -2117,7 +2112,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); } // Adjust return regs in to handle case of rem returning kArg2/kArg3 - if (ret_reg == mir_to_lir->TargetReg(kRet0).GetReg()) + if (ret_reg == mir_to_lir->TargetReg(kRet0, false).GetReg()) rl_result = mir_to_lir->GetReturnWide(kCoreReg); else rl_result = mir_to_lir->GetReturnWideAlt(); diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index c75e681683..5631721465 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -132,7 +132,7 @@ INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc) template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadConstant(TargetReg(kArg0), arg0); + LoadConstant(TargetReg(kArg0, false), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -142,7 +142,7 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg0), arg0); + OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -153,14 +153,13 @@ void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_off RegLocation arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); if (arg0.wide == 0) { - LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0)); + LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0)); } else { RegStorage r_tmp; if (cu_->target64) { - r_tmp = RegStorage::Solo64(TargetReg(kArg0).GetReg()); + r_tmp = TargetReg(kArg0, true); } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(arg0.fp ? kFArg0 : kArg0), - TargetReg(arg0.fp ? kFArg1 : kArg1)); + r_tmp = TargetReg(arg0.fp ? kFArg0 : kArg0, arg0.fp ? kFArg1 : kArg1); } LoadValueDirectWideFixed(arg0, r_tmp); } @@ -173,8 +172,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadConstant(TargetReg(kArg0), arg0); - LoadConstant(TargetReg(kArg1), arg1); + LoadConstant(TargetReg(kArg0, false), arg0); + LoadConstant(TargetReg(kArg1, false), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -193,14 +192,14 @@ void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_ } else { if (cu_->instruction_set == kMips) { // skip kArg1 for stack alignment. - r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); + r_tmp = TargetReg(kArg2, kArg3); } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2)); + r_tmp = TargetReg(kArg1, kArg2); } } LoadValueDirectWideFixed(arg1, r_tmp); } - LoadConstant(TargetReg(kArg0), arg0); + LoadConstant(TargetReg(kArg0, false), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -213,7 +212,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_ RegStorage r_tgt = CallHelperSetup(helper_offset); DCHECK(!arg0.wide); LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0)); - LoadConstant(TargetReg(kArg1), arg1); + LoadConstant(TargetReg(kArg1, false), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -225,7 +224,7 @@ void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1); - LoadConstant(TargetReg(kArg0), arg0); + LoadConstant(TargetReg(kArg0, false), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -235,8 +234,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg0), arg0); - LoadConstant(TargetReg(kArg1), arg1); + OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + LoadConstant(TargetReg(kArg1, false), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -246,8 +245,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadCurrMethodDirect(TargetReg(kArg1)); - LoadConstant(TargetReg(kArg0), arg0); + LoadCurrMethodDirect(TargetRefReg(kArg1)); + LoadConstant(TargetReg(kArg0, false), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -257,7 +256,7 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - DCHECK(!IsSameReg(TargetReg(kArg1), arg0)); + DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0)); if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) { OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); } @@ -272,7 +271,7 @@ void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> h RegStorage arg0, RegLocation arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - DCHECK(!IsSameReg(TargetReg(kArg1), arg0)); + DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0)); if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) { OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); } @@ -289,7 +288,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> RegLocation arg0, RegLocation arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - if (cu_->instruction_set == kArm64) { + if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) { RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0); RegStorage arg1_reg; @@ -311,78 +310,47 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> LoadValueDirectWideFixed(arg1, arg1_reg); } } else { + DCHECK(!cu_->target64); if (arg0.wide == 0) { - LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0)); + LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0, false) : TargetReg(kArg0, false)); if (arg1.wide == 0) { if (cu_->instruction_set == kMips) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1)); - } else if (cu_->instruction_set == kArm64) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg1)); - } else if (cu_->instruction_set == kX86_64) { - if (arg0.fp) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg0)); - } else { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg0) : TargetReg(kArg1)); - } + LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg1, false)); } else { - LoadValueDirectFixed(arg1, TargetReg(kArg1)); + LoadValueDirectFixed(arg1, TargetReg(kArg1, false)); } } else { if (cu_->instruction_set == kMips) { RegStorage r_tmp; if (arg1.fp) { - r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3)); + r_tmp = TargetReg(kFArg2, kFArg3); } else { // skip kArg1 for stack alignment. - r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); + r_tmp = TargetReg(kArg2, kArg3); } LoadValueDirectWideFixed(arg1, r_tmp); } else { RegStorage r_tmp; - if (cu_->target64) { - r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg()); - } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2)); - } + r_tmp = TargetReg(kArg1, kArg2); LoadValueDirectWideFixed(arg1, r_tmp); } } } else { RegStorage r_tmp; if (arg0.fp) { - if (cu_->target64) { - r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg0).GetReg()); - } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg0), TargetReg(kFArg1)); - } + r_tmp = TargetReg(kFArg0, kFArg1); } else { - if (cu_->target64) { - r_tmp = RegStorage::Solo64(TargetReg(kArg0).GetReg()); - } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1)); - } + r_tmp = TargetReg(kArg0, kArg1); } LoadValueDirectWideFixed(arg0, r_tmp); if (arg1.wide == 0) { - if (cu_->target64) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg1)); - } else { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2)); - } + LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg2, false)); } else { RegStorage r_tmp; if (arg1.fp) { - if (cu_->target64) { - r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg1).GetReg()); - } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3)); - } + r_tmp = TargetReg(kFArg2, kFArg3); } else { - if (cu_->target64) { - r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg()); - } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); - } + r_tmp = TargetReg(kArg2, kArg3); } LoadValueDirectWideFixed(arg1, r_tmp); } @@ -395,8 +363,8 @@ INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation a RegLocation arg1, bool safepoint_pc) void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) { - if (IsSameReg(arg1, TargetReg(kArg0))) { - if (IsSameReg(arg0, TargetReg(kArg1))) { + if (IsSameReg(arg1, TargetReg(kArg0, arg1.Is64Bit()))) { + if (IsSameReg(arg0, TargetReg(kArg1, arg0.Is64Bit()))) { // Swap kArg0 and kArg1 with kArg2 as temp. OpRegCopy(TargetReg(kArg2, arg1.Is64Bit()), arg1); OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); @@ -427,7 +395,7 @@ void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offse RegStorage arg1, int arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); CopyToArgumentRegs(arg0, arg1); - LoadConstant(TargetReg(kArg2), arg2); + LoadConstant(TargetReg(kArg2, false), arg2); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -439,8 +407,8 @@ void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> h int arg0, RegLocation arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); - LoadCurrMethodDirect(TargetReg(kArg1)); - LoadConstant(TargetReg(kArg0, arg0), arg0); + LoadCurrMethodDirect(TargetRefReg(kArg1)); + LoadConstant(TargetReg(kArg0, false), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -451,9 +419,9 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadCurrMethodDirect(TargetReg(kArg1)); - LoadConstant(TargetReg(kArg2), arg2); - LoadConstant(TargetReg(kArg0), arg0); + LoadCurrMethodDirect(TargetRefReg(kArg1)); + LoadConstant(TargetReg(kArg2, false), arg2); + LoadConstant(TargetReg(kArg0, false), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -474,11 +442,11 @@ void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_si if (cu_->target64) { r_tmp = TargetReg(kArg2, true); } else { - r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); + r_tmp = TargetReg(kArg2, kArg3); } LoadValueDirectWideFixed(arg2, r_tmp); } - LoadConstant(TargetReg(kArg0), arg0); + LoadConstant(TargetReg(kArg0, false), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -522,7 +490,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { StoreValue(rl_method, rl_src); // If Method* has been promoted, explicitly flush if (rl_method.location == kLocPhysReg) { - StoreRefDisp(TargetReg(kSp), 0, rl_src.reg, kNotVolatile); + StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile); } if (cu_->num_ins == 0) { @@ -585,15 +553,15 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { } } if (need_flush) { - Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg); + Store32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg); } } else { // If arriving in frame & promoted if (v_map->core_location == kLocPhysReg) { - Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); + Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); } if (v_map->fp_location == kLocPhysReg) { - Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg)); + Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg)); } } } @@ -614,13 +582,13 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, case 0: // Get the current Method* [sets kArg0] if (direct_code != static_cast<uintptr_t>(-1)) { if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); + cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code); } } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { cg->LoadCodeAddress(target_method, type, kInvokeTgt); } if (direct_method != static_cast<uintptr_t>(-1)) { - cg->LoadConstant(cg->TargetReg(kArg0), direct_method); + cg->LoadConstant(cg->TargetRefReg(kArg0), direct_method); } else { cg->LoadMethodAddress(target_method, type, kArg0); } @@ -643,7 +611,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, // Set up direct code if known. if (direct_code != 0) { if (direct_code != static_cast<uintptr_t>(-1)) { - cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); + cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code); } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); cg->LoadCodeAddress(target_method, type, kInvokeTgt); @@ -662,7 +630,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, if (direct_code == 0) { cg->LoadWordDisp(arg0_ref, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetReg(kInvokeTgt)); + cg->TargetPtrReg(kInvokeTgt)); } break; } @@ -700,17 +668,17 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags); // get this->klass_ [use kArg1, set kInvokeTgt] cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetReg(kInvokeTgt), + cg->TargetPtrReg(kInvokeTgt), kNotVolatile); cg->MarkPossibleNullPointerException(info->opt_flags); break; case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt] - cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(), - cg->TargetReg(kInvokeTgt), + cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(), + cg->TargetPtrReg(kInvokeTgt), kNotVolatile); break; case 3: // Get target method [use kInvokeTgt, set kArg0] - cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), + cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt), ObjArray::OffsetOfElement(method_idx).Int32Value(), cg->TargetRefReg(kArg0), kNotVolatile); @@ -719,7 +687,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { cg->LoadWordDisp(cg->TargetRefReg(kArg0), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetReg(kInvokeTgt)); + cg->TargetPtrReg(kInvokeTgt)); break; } // Intentional fallthrough for X86 @@ -744,9 +712,9 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, switch (state) { case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)] CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); - cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index); + cg->LoadConstant(cg->TargetReg(kHiddenArg, false), target_method.dex_method_index); if (cu->instruction_set == kX86) { - cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg)); + cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, false), cg->TargetReg(kHiddenArg, false)); } break; case 1: { // Get "this" [set kArg1] @@ -758,19 +726,19 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags); // Get this->klass_ [use kArg1, set kInvokeTgt] cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetReg(kInvokeTgt), + cg->TargetPtrReg(kInvokeTgt), kNotVolatile); cg->MarkPossibleNullPointerException(info->opt_flags); break; case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt] // NOTE: native pointer. - cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(), - cg->TargetReg(kInvokeTgt), + cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(), + cg->TargetPtrReg(kInvokeTgt), kNotVolatile); break; case 4: // Get target method [use kInvokeTgt, set kArg0] // NOTE: native pointer. - cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), + cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt), ObjArray::OffsetOfElement(method_idx % ClassLinker::kImtSize).Int32Value(), cg->TargetRefReg(kArg0), kNotVolatile); @@ -779,7 +747,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { cg->LoadWordDisp(cg->TargetRefReg(kArg0), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetReg(kInvokeTgt)); + cg->TargetPtrReg(kInvokeTgt)); break; } // Intentional fallthrough for X86 @@ -801,11 +769,11 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<po if (state == 0) { if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { // Load trampoline target - cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt)); + cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(), cg->TargetPtrReg(kInvokeTgt)); } // Load kArg0 with method index CHECK_EQ(cu->dex_file, target_method.dex_file); - cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index); + cg->LoadConstant(cg->TargetReg(kArg0, false), target_method.dex_method_index); return 1; } return -1; @@ -884,7 +852,7 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this) { int last_arg_reg = 3 - 1; - int arg_regs[3] = {TargetReg(kArg1).GetReg(), TargetReg(kArg2).GetReg(), TargetReg(kArg3).GetReg()}; + int arg_regs[3] = {TargetReg(kArg1, false).GetReg(), TargetReg(kArg2, false).GetReg(), TargetReg(kArg3, false).GetReg()}; int next_reg = 0; int next_arg = 0; @@ -959,17 +927,17 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, } } else { // kArg2 & rArg3 can safely be used here - reg = TargetReg(kArg3); + reg = TargetReg(kArg3, false); { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - Load32Disp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); + Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); } call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); } { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - Store32Disp(TargetReg(kSp), (next_use + 1) * 4, reg); + Store32Disp(TargetPtrReg(kSp), (next_use + 1) * 4, reg); } call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); @@ -983,8 +951,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, if (rl_arg.location == kLocPhysReg) { arg_reg = rl_arg.reg; } else { - arg_reg = rl_arg.wide ? RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)) : - TargetReg(kArg2); + arg_reg = rl_arg.wide ? TargetReg(kArg2, kArg3) : TargetReg(kArg2, false); if (rl_arg.wide) { LoadValueDirectWideFixed(rl_arg, arg_reg); } else { @@ -997,10 +964,10 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); if (rl_arg.wide) { - StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), outs_offset, arg_reg, k64, kNotVolatile); next_use += 2; } else { - Store32Disp(TargetReg(kSp), outs_offset, arg_reg); + Store32Disp(TargetPtrReg(kSp), outs_offset, arg_reg); next_use++; } } @@ -1015,13 +982,13 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetReg(kArg1), 0, tmp); + Load32Disp(TargetRefReg(kArg1), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -1067,14 +1034,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, loc = UpdateLocWide(loc); if ((next_arg >= 2) && (loc.location == kLocPhysReg)) { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); } next_arg += 2; } else { loc = UpdateLoc(loc); if ((next_arg >= 3) && (loc.location == kLocPhysReg)) { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); + Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); } next_arg++; } @@ -1095,23 +1062,23 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Use vldm/vstm pair using kArg3 as a temp call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); - OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset); + OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), start_offset); LIR* ld = nullptr; { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack); + ld = OpVldm(TargetRefReg(kArg3), regs_left_to_pass_via_stack); } // TUNING: loosen barrier ld->u.m.def_mask = &kEncodeAll; call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); - OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4)); + OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4)); call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); LIR* st = nullptr; { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack); + st = OpVstm(TargetRefReg(kArg3), regs_left_to_pass_via_stack); } st->u.m.def_mask = &kEncodeAll; call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, @@ -1160,23 +1127,23 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; if (src_is_16b_aligned) { - ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); + ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovA128FP); } else if (src_is_8b_aligned) { - ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); - ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), + ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovLo128FP); + ld2 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset + (bytes_to_move >> 1), kMovHi128FP); } else { - ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); + ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovU128FP); } if (dest_is_16b_aligned) { - st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); + st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovA128FP); } else if (dest_is_8b_aligned) { - st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); - st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), + st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovLo128FP); + st2 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP); } else { - st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); + st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovU128FP); } // TODO If we could keep track of aliasing information for memory accesses that are wider @@ -1210,11 +1177,11 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Instead of allocating a new temp, simply reuse one of the registers being used // for argument passing. - RegStorage temp = TargetReg(kArg3); + RegStorage temp = TargetReg(kArg3, false); // Now load the argument VR and store to the outs. - Load32Disp(TargetReg(kSp), current_src_offset, temp); - Store32Disp(TargetReg(kSp), current_dest_offset, temp); + Load32Disp(TargetPtrReg(kSp), current_src_offset, temp); + Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp); } current_src_offset += bytes_to_move; @@ -1223,14 +1190,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, } } else { // Generate memcpy - OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); - OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); + OpRegRegImm(kOpAdd, TargetRefReg(kArg0), TargetPtrReg(kSp), outs_offset); + OpRegRegImm(kOpAdd, TargetRefReg(kArg1), TargetPtrReg(kSp), start_offset); if (cu_->target64) { - CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0), - TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetRefReg(kArg0), + TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false); } else { - CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0), - TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetRefReg(kArg0), + TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false); } } @@ -1242,13 +1209,13 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, direct_code, direct_method, type); if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetReg(kArg1), 0, tmp); + Load32Disp(TargetRefReg(kArg1), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -1565,9 +1532,9 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { ClobberCallerSave(); LockCallTemps(); // Using fixed registers - RegStorage reg_ptr = TargetReg(kArg0); - RegStorage reg_char = TargetReg(kArg1); - RegStorage reg_start = TargetReg(kArg2); + RegStorage reg_ptr = TargetRefReg(kArg0); + RegStorage reg_char = TargetReg(kArg1, false); + RegStorage reg_start = TargetReg(kArg2, false); LoadValueDirectFixed(rl_obj, reg_ptr); LoadValueDirectFixed(rl_char, reg_char); @@ -1609,8 +1576,8 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { } ClobberCallerSave(); LockCallTemps(); // Using fixed registers - RegStorage reg_this = TargetReg(kArg0); - RegStorage reg_cmp = TargetReg(kArg1); + RegStorage reg_this = TargetRefReg(kArg0); + RegStorage reg_cmp = TargetRefReg(kArg1); RegLocation rl_this = info->args[0]; RegLocation rl_cmp = info->args[1]; @@ -1657,11 +1624,11 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { case kThumb2: // Fall-through. case kMips: - Load32Disp(TargetReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg); + Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg); break; case kArm64: - Load32Disp(TargetReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg); + Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg); break; case kX86: @@ -1695,7 +1662,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); - RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); + RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true); if (is_long) { if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64); @@ -1875,7 +1842,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { } LIR* call_inst; if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { - call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt)); + call_inst = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt)); } else { if (fast_path) { if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) { @@ -1883,7 +1850,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { call_inst = reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type); } else { - call_inst = OpMem(kOpBlx, TargetReg(kArg0), + call_inst = OpMem(kOpBlx, TargetRefReg(kArg0), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()); } } else { diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 1cddeb9771..e5798fdc0b 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -66,7 +66,7 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) { } else { // Lives in the frame, need to store. ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32, kNotVolatile); } if (!zero_reg.Valid()) { FreeTemp(temp_reg); @@ -93,9 +93,9 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) { (rl_src.location == kLocCompilerTemp)); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); if (rl_src.ref) { - LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, kNotVolatile); + LoadRefDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, kNotVolatile); } else { - Load32Disp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest); + Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest); } } } @@ -126,7 +126,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest) { DCHECK((rl_src.location == kLocDalvikFrame) || (rl_src.location == kLocCompilerTemp)); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, kNotVolatile); + LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, kNotVolatile); } } @@ -215,9 +215,9 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) { def_start = last_lir_insn_; ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); if (rl_dest.ref) { - StoreRefDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kNotVolatile); + StoreRefDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kNotVolatile); } else { - Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); + Store32Disp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); } MarkClean(rl_dest); def_end = last_lir_insn_; @@ -305,7 +305,7 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) { DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1), mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low))); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile); MarkClean(rl_dest); def_end = last_lir_insn_; MarkDefWide(rl_dest, def_start, def_end); @@ -333,7 +333,7 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) { if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) { LIR *def_start = last_lir_insn_; ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); + Store32Disp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); MarkClean(rl_dest); LIR *def_end = last_lir_insn_; if (!rl_dest.ref) { @@ -369,7 +369,7 @@ void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) { DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1), mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low))); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile); MarkClean(rl_dest); LIR *def_end = last_lir_insn_; MarkDefWide(rl_dest, def_start, def_end); diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 237288e7b1..caadc0ad89 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -92,7 +92,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) if (!reg_arg.Valid()) { RegStorage new_reg = wide ? AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class); - LoadBaseDisp(TargetReg(kSp), offset, new_reg, wide ? k64 : k32, kNotVolatile); + LoadBaseDisp(TargetPtrReg(kSp), offset, new_reg, wide ? k64 : k32, kNotVolatile); return new_reg; } else { // Check if we need to copy the arg to a different reg_class. @@ -120,7 +120,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) // If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg. if (!reg_arg_low.Valid()) { RegStorage new_regs = AllocTypedTempWide(false, reg_class); - LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64, kNotVolatile); + LoadBaseDisp(TargetPtrReg(kSp), offset, new_regs, k64, kNotVolatile); return new_regs; // The reg_class is OK, we can return. } else { // Assume that no ABI allows splitting a wide fp reg between a narrow fp reg and memory, @@ -128,7 +128,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) DCHECK(!reg_arg_low.IsFloat()); reg_arg_high = AllocTemp(); int offset_high = offset + sizeof(uint32_t); - Load32Disp(TargetReg(kSp), offset_high, reg_arg_high); + Load32Disp(TargetPtrReg(kSp), offset_high, reg_arg_high); // Continue below to check the reg_class. } } @@ -140,7 +140,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) // conceivably break this assumption but Android supports only little-endian architectures. DCHECK(!wide); reg_arg_low = AllocTypedTemp(false, reg_class); - Load32Disp(TargetReg(kSp), offset, reg_arg_low); + Load32Disp(TargetPtrReg(kSp), offset, reg_arg_low); return reg_arg_low; // The reg_class is OK, we can return. } @@ -185,7 +185,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { if (reg.Valid()) { OpRegCopy(rl_dest.reg, reg); } else { - Load32Disp(TargetReg(kSp), offset, rl_dest.reg); + Load32Disp(TargetPtrReg(kSp), offset, rl_dest.reg); } } else { if (cu_->target64) { @@ -193,7 +193,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { if (reg.Valid()) { OpRegCopy(rl_dest.reg, reg); } else { - LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, kNotVolatile); + LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, k64, kNotVolatile); } return; } @@ -206,12 +206,12 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { } else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) { OpRegCopy(rl_dest.reg, reg_arg_low); int offset_high = offset + sizeof(uint32_t); - Load32Disp(TargetReg(kSp), offset_high, rl_dest.reg.GetHigh()); + Load32Disp(TargetPtrReg(kSp), offset_high, rl_dest.reg.GetHigh()); } else if (!reg_arg_low.Valid() && reg_arg_high.Valid()) { OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high); - Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow()); + Load32Disp(TargetPtrReg(kSp), offset, rl_dest.reg.GetLow()); } else { - LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, kNotVolatile); + LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, k64, kNotVolatile); } } } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 8ebd64af7a..47844cbb34 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -1202,6 +1202,16 @@ class Mir2Lir : public Backend { } /** + * @brief Portable way of getting special register pair from the backend. + * @param reg Enumeration describing the purpose of the first register. + * @param reg Enumeration describing the purpose of the second register. + * @return Return the #RegStorage corresponding to the given purpose @p reg. + */ + virtual RegStorage TargetReg(SpecialTargetRegister reg1, SpecialTargetRegister reg2) { + return RegStorage::MakeRegPair(TargetReg(reg1, false), TargetReg(reg2, false)); + } + + /** * @brief Portable way of getting a special register for storing a reference. * @see TargetReg() */ @@ -1209,6 +1219,14 @@ class Mir2Lir : public Backend { return TargetReg(reg); } + /** + * @brief Portable way of getting a special register for storing a pointer. + * @see TargetReg() + */ + virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) { + return TargetReg(reg); + } + // Get a reg storage corresponding to the wide & ref flags of the reg location. virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) { if (loc.ref) { diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 6bedae868c..13bd4432d7 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -757,7 +757,7 @@ void Mir2Lir::FlushRegWide(RegStorage reg) { } int v_reg = mir_graph_->SRegToVReg(info1->SReg()); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile); } } else { RegisterInfo* info = GetRegInfo(reg); @@ -765,7 +765,7 @@ void Mir2Lir::FlushRegWide(RegStorage reg) { info->SetIsDirty(false); int v_reg = mir_graph_->SRegToVReg(info->SReg()); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile); } } } @@ -777,7 +777,7 @@ void Mir2Lir::FlushReg(RegStorage reg) { info->SetIsDirty(false); int v_reg = mir_graph_->SRegToVReg(info->SReg()); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, kWord, kNotVolatile); } } @@ -1313,7 +1313,7 @@ void Mir2Lir::DoPromotion() { RegLocation *curr = &mir_graph_->reg_location_[i]; int p_map_idx = SRegToPMap(curr->s_reg_low); int reg_num = curr->fp ? promotion_map_[p_map_idx].fp_reg : promotion_map_[p_map_idx].core_reg; - bool wide = curr->wide || (cu_->target64 && curr->ref && cu_->instruction_set != kX86_64); + bool wide = curr->wide || (cu_->target64 && curr->ref); RegStorage reg = RegStorage::InvalidReg(); if (curr->fp && promotion_map_[p_map_idx].fp_location == kLocPhysReg) { if (wide && cu_->instruction_set == kThumb2) { diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index bfbfa0e49a..6ca220cb2e 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -158,29 +158,33 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ - LoadValueDirectFixed(rl_src, rs_rX86_ARG0); + RegStorage array_ptr = TargetRefReg(kArg0); + RegStorage payload = TargetPtrReg(kArg1); + RegStorage method_start = TargetPtrReg(kArg2); + + LoadValueDirectFixed(rl_src, array_ptr); // Materialize a pointer to the fill data image if (base_of_code_ != nullptr) { // We can use the saved value. RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); if (rl_method.wide) { - LoadValueDirectWide(rl_method, rs_rX86_ARG2); + LoadValueDirectWide(rl_method, method_start); } else { - LoadValueDirect(rl_method, rs_rX86_ARG2); + LoadValueDirect(rl_method, method_start); } store_method_addr_used_ = true; } else { // TODO(64) force to be 64-bit - NewLIR1(kX86StartOfMethod, rs_rX86_ARG2.GetReg()); + NewLIR1(kX86StartOfMethod, method_start.GetReg()); } - NewLIR2(kX86PcRelAdr, rs_rX86_ARG1.GetReg(), WrapPointer(tab_rec)); - NewLIR2(cu_->target64 ? kX86Add64RR : kX86Add32RR, rs_rX86_ARG1.GetReg(), rs_rX86_ARG2.GetReg()); + NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec)); + OpRegReg(kOpAdd, payload, method_start); if (cu_->target64) { - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), rs_rX86_ARG0, - rs_rX86_ARG1, true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), array_ptr, + payload, true); } else { - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), rs_rX86_ARG0, - rs_rX86_ARG1, true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), array_ptr, + payload, true); } } @@ -291,11 +295,12 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { FlushIns(ArgLocs, rl_method); if (base_of_code_ != nullptr) { + RegStorage method_start = TargetPtrReg(kArg0); // We have been asked to save the address of the method start for later use. - setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rs_rX86_ARG0.GetReg()); + setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg()); int displacement = SRegOffset(base_of_code_->s_reg_low); // Native pointer - must be natural word size. - setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0, + setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start, cu_->target64 ? k64 : k32, kNotVolatile); } diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index afb618494d..646da7f22e 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -34,9 +34,11 @@ class X86Mir2Lir : public Mir2Lir { class InToRegStorageX86_64Mapper : public InToRegStorageMapper { public: - InToRegStorageX86_64Mapper() : cur_core_reg_(0), cur_fp_reg_(0) {} + explicit InToRegStorageX86_64Mapper(Mir2Lir* ml) : ml_(ml), cur_core_reg_(0), cur_fp_reg_(0) {} virtual ~InToRegStorageX86_64Mapper() {} virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide); + protected: + Mir2Lir* ml_; private: int cur_core_reg_; int cur_fp_reg_; @@ -85,7 +87,22 @@ class X86Mir2Lir : public Mir2Lir { void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg); // Required for target - register utilities. - RegStorage TargetReg(SpecialTargetRegister reg); + RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE; + RegStorage TargetReg32(SpecialTargetRegister reg); + RegStorage TargetReg(SpecialTargetRegister symbolic_reg, bool is_wide) OVERRIDE { + RegStorage reg = TargetReg32(symbolic_reg); + if (is_wide) { + return (reg.Is64Bit()) ? reg : As64BitReg(reg); + } else { + return (reg.Is32Bit()) ? reg : As32BitReg(reg); + } + } + RegStorage TargetRefReg(SpecialTargetRegister symbolic_reg) OVERRIDE { + return TargetReg(symbolic_reg, cu_->target64); + } + RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE { + return TargetReg(symbolic_reg, cu_->target64); + } RegStorage GetArgMappingToPhysicalReg(int arg_num); RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num); RegLocation GetReturnAlt(); @@ -388,6 +405,43 @@ class X86Mir2Lir : public Mir2Lir { std::vector<uint8_t>* ReturnCallFrameInformation(); protected: + // Casting of RegStorage + RegStorage As32BitReg(RegStorage reg) { + DCHECK(!reg.IsPair()); + if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) { + if (kFailOnSizeError) { + LOG(FATAL) << "Expected 64b register " << reg.GetReg(); + } else { + LOG(WARNING) << "Expected 64b register " << reg.GetReg(); + return reg; + } + } + RegStorage ret_val = RegStorage(RegStorage::k32BitSolo, + reg.GetRawBits() & RegStorage::kRegTypeMask); + DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask) + ->GetReg().GetReg(), + ret_val.GetReg()); + return ret_val; + } + + RegStorage As64BitReg(RegStorage reg) { + DCHECK(!reg.IsPair()); + if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) { + if (kFailOnSizeError) { + LOG(FATAL) << "Expected 32b register " << reg.GetReg(); + } else { + LOG(WARNING) << "Expected 32b register " << reg.GetReg(); + return reg; + } + } + RegStorage ret_val = RegStorage(RegStorage::k64BitSolo, + reg.GetRawBits() & RegStorage::kRegTypeMask); + DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask) + ->GetReg().GetReg(), + ret_val.GetReg()); + return ret_val; + } + size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index, int32_t raw_base, int32_t displacement); void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg); diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 7454475555..4414d7c090 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -145,12 +145,12 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do } else { // It must have been register promoted if it is not a temp but is still in physical // register. Since we need it to be in memory to convert, we place it there now. - StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64, kNotVolatile); + StoreBaseDisp(rs_rX86_SP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile); } } // Push the source virtual register onto the x87 stack. - LIR *fild64 = NewLIR2NoDest(kX86Fild64M, TargetReg(kSp).GetReg(), + LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP.GetReg(), src_v_reg_offset + LOWORD_OFFSET); AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2, true /* is_load */, true /* is64bit */); @@ -158,7 +158,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do // Now pop off x87 stack and store it in the destination VR's stack location. int opcode = is_double ? kX86Fstp64M : kX86Fstp32M; int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset; - LIR *fstp = NewLIR2NoDest(opcode, TargetReg(kSp).GetReg(), displacement); + LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement); AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double); /* @@ -179,11 +179,11 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do */ rl_result = EvalLoc(rl_dest, kFPReg, true); if (is_double) { - LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile); + LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile); StoreFinalValueWide(rl_dest, rl_result); } else { - Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg); + Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg); StoreFinalValue(rl_dest, rl_result); } @@ -364,7 +364,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation } else { // It must have been register promoted if it is not a temp but is still in physical // register. Since we need it to be in memory to convert, we place it there now. - StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32, + StoreBaseDisp(rs_rX86_SP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32, kNotVolatile); } } @@ -375,7 +375,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation FlushSpecificReg(reg_info); ResetDef(rl_src2.reg); } else { - StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32, + StoreBaseDisp(rs_rX86_SP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32, kNotVolatile); } } @@ -383,12 +383,12 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation int fld_opcode = is_double ? kX86Fld64M : kX86Fld32M; // Push the source virtual registers onto the x87 stack. - LIR *fld_2 = NewLIR2NoDest(fld_opcode, TargetReg(kSp).GetReg(), + LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(), src2_v_reg_offset + LOWORD_OFFSET); AnnotateDalvikRegAccess(fld_2, (src2_v_reg_offset + LOWORD_OFFSET) >> 2, true /* is_load */, is_double /* is64bit */); - LIR *fld_1 = NewLIR2NoDest(fld_opcode, TargetReg(kSp).GetReg(), + LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(), src1_v_reg_offset + LOWORD_OFFSET); AnnotateDalvikRegAccess(fld_1, (src1_v_reg_offset + LOWORD_OFFSET) >> 2, true /* is_load */, is_double /* is64bit */); @@ -417,7 +417,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation // Now store result in the destination VR's stack location. int displacement = dest_v_reg_offset + LOWORD_OFFSET; int opcode = is_double ? kX86Fst64M : kX86Fst32M; - LIR *fst = NewLIR2NoDest(opcode, TargetReg(kSp).GetReg(), displacement); + LIR *fst = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement); AnnotateDalvikRegAccess(fst, displacement >> 2, false /* is_load */, is_double /* is64bit */); // Pop ST(1) and ST(0). @@ -436,10 +436,10 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation if (rl_result.location == kLocPhysReg) { rl_result = EvalLoc(rl_dest, kFPReg, true); if (is_double) { - LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile); + LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile); StoreFinalValueWide(rl_dest, rl_result); } else { - Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg); + Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg); StoreFinalValue(rl_dest, rl_result); } } @@ -627,7 +627,7 @@ bool X86Mir2Lir::GenInlinedAbsFloat(CallInfo* info) { // Operate directly into memory. int displacement = SRegOffset(rl_dest.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - LIR *lir = NewLIR3(kX86And32MI, TargetReg(kSp).GetReg(), displacement, 0x7fffffff); + LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement, 0x7fffffff); AnnotateDalvikRegAccess(lir, displacement >> 2, false /*is_load */, false /* is_64bit */); AnnotateDalvikRegAccess(lir, displacement >> 2, true /* is_load */, false /* is_64bit*/); return true; @@ -682,7 +682,7 @@ bool X86Mir2Lir::GenInlinedAbsDouble(CallInfo* info) { // Operate directly into memory. int displacement = SRegOffset(rl_dest.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - LIR *lir = NewLIR3(kX86And32MI, TargetReg(kSp).GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff); + LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff); AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, true /* is_load */, true /* is_64bit*/); AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, false /*is_load */, true /* is_64bit */); return true; diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 14a18e5954..7a4ea26432 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -93,7 +93,7 @@ X86ConditionCode X86ConditionEncoding(ConditionCode cond) { } LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) { - NewLIR2(kX86Cmp32RR, src1.GetReg(), src2.GetReg()); + NewLIR2(src1.Is64Bit() ? kX86Cmp64RR : kX86Cmp32RR, src1.GetReg(), src2.GetReg()); X86ConditionCode cc = X86ConditionEncoding(cond); LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc); @@ -105,9 +105,13 @@ LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) { if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) { // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode - NewLIR2(kX86Test32RR, reg.GetReg(), reg.GetReg()); + NewLIR2(reg.Is64Bit() ? kX86Test64RR: kX86Test32RR, reg.GetReg(), reg.GetReg()); } else { - NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg.GetReg(), check_value); + if (reg.Is64Bit()) { + NewLIR2(IS_SIMM8(check_value) ? kX86Cmp64RI8 : kX86Cmp64RI, reg.GetReg(), check_value); + } else { + NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg.GetReg(), check_value); + } } X86ConditionCode cc = X86ConditionEncoding(cond); LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc); @@ -241,7 +245,7 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { // FIXME: depending on how you use registers you could get a false != mismatch when dealing // with different views of the same underlying physical resource (i.e. solo32 vs. solo64). const bool result_reg_same_as_src = - (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg()); + (rl_src.location == kLocPhysReg && rl_src.reg.GetRegNum() == rl_result.reg.GetRegNum()); const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src); const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src); const bool catch_all_case = !(true_zero_case || false_zero_case); @@ -846,14 +850,14 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u); if (!obj_in_si && !obj_in_di) { - LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj); + LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj); // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it. DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info)); int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u; AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false); } if (!off_in_si && !off_in_di) { - LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off); + LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off); // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it. DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info)); int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u; @@ -1008,23 +1012,23 @@ void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage new_index = index_; // Move index out of kArg1, either directly to kArg0, or to kArg2. // TODO: clean-up to check not a number but with type - if (index_.GetRegNum() == m2l_->TargetReg(kArg1).GetRegNum()) { - if (array_base_.GetRegNum() == m2l_->TargetReg(kArg0).GetRegNum()) { - m2l_->OpRegCopy(m2l_->TargetReg(kArg2), index_); - new_index = m2l_->TargetReg(kArg2); + if (index_ == m2l_->TargetReg(kArg1, false)) { + if (array_base_ == m2l_->TargetRefReg(kArg0)) { + m2l_->OpRegCopy(m2l_->TargetReg(kArg2, false), index_); + new_index = m2l_->TargetReg(kArg2, false); } else { - m2l_->OpRegCopy(m2l_->TargetReg(kArg0), index_); - new_index = m2l_->TargetReg(kArg0); + m2l_->OpRegCopy(m2l_->TargetReg(kArg0, false), index_); + new_index = m2l_->TargetReg(kArg0, false); } } // Load array length to kArg1. - m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_); + m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_); if (cu_->target64) { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), - new_index, m2l_->TargetReg(kArg1), true); + new_index, m2l_->TargetReg(kArg1, false), true); } else { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), - new_index, m2l_->TargetReg(kArg1), true); + new_index, m2l_->TargetReg(kArg1, false), true); } } @@ -1057,14 +1061,14 @@ void X86Mir2Lir::GenArrayBoundsCheck(int32_t index, GenerateTargetLabel(kPseudoThrowTarget); // Load array length to kArg1. - m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_); - m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); + m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_); + m2l_->LoadConstant(m2l_->TargetReg(kArg0, false), index_); if (cu_->target64) { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), - m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); + m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true); } else { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), - m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); + m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true); } } @@ -1406,7 +1410,7 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, // RHS is in memory. DCHECK((rl_src.location == kLocDalvikFrame) || (rl_src.location == kLocCompilerTemp)); - int r_base = TargetReg(kSp).GetReg(); + int r_base = rs_rX86_SP.GetReg(); int displacement = SRegOffset(rl_src.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); @@ -1440,7 +1444,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi // Operate directly into memory. X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false); - int r_base = TargetReg(kSp).GetReg(); + int r_base = rs_rX86_SP.GetReg(); int displacement = SRegOffset(rl_dest.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); @@ -2122,7 +2126,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction if ((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp)) { - int r_base = TargetReg(kSp).GetReg(); + int r_base = rs_rX86_SP.GetReg(); int displacement = SRegOffset(rl_dest.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); @@ -2153,7 +2157,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction // Can we just do this into memory? if ((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp)) { - int r_base = TargetReg(kSp).GetReg(); + int r_base = rs_rX86_SP.GetReg(); int displacement = SRegOffset(rl_dest.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); @@ -2271,7 +2275,8 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegStorage result_reg = rl_result.reg; // For 32-bit, SETcc only works with EAX..EDX. - if (result_reg == object.reg || !IsByteRegister(result_reg)) { + RegStorage object_32reg = object.reg.Is64Bit() ? As32BitReg(object.reg) : object.reg; + if (result_reg == object_32reg || !IsByteRegister(result_reg)) { result_reg = AllocateByteRegister(); } @@ -2337,8 +2342,10 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k FlushAllRegs(); // May generate a call - use explicit registers. LockCallTemps(); - LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 gets current Method*. - RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class*. + RegStorage method_reg = TargetRefReg(kArg1); // kArg1 gets current Method*. + LoadCurrMethodDirect(method_reg); + RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class*. + RegStorage ref_reg = TargetRefReg(kArg0); // kArg2 will hold the ref. // Reference must end up in kArg0. if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, @@ -2350,16 +2357,16 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetReg(kRet0)); - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); + OpRegCopy(class_reg, TargetRefReg(kRet0)); + LoadValueDirectFixed(rl_src, ref_reg); } else if (use_declaring_class) { - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); - LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadValueDirectFixed(rl_src, ref_reg); + LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { // Load dex cache entry into class_reg (kArg2). - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); - LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadValueDirectFixed(rl_src, ref_reg); + LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); int32_t offset_of_type = mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() + @@ -2374,8 +2381,8 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k } else { CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); } - OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path. - LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* Reload Ref. */ + OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path. + LoadValueDirectFixed(rl_src, ref_reg); /* Reload Ref. */ // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); hop_branch->target = hop_target; @@ -2386,33 +2393,34 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k // On x86-64 kArg0 is not EAX, so we have to copy ref from kArg0 to EAX. if (cu_->target64) { - OpRegCopy(rl_result.reg, TargetReg(kArg0)); + OpRegCopy(rl_result.reg, ref_reg); } // For 32-bit, SETcc only works with EAX..EDX. DCHECK_LT(rl_result.reg.GetRegNum(), 4); // Is the class NULL? - LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); + LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL); + RegStorage ref_class_reg = TargetRefReg(kArg1); // kArg2 will hold the Class*. /* Load object->klass_. */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1), + LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg, kNotVolatile); /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */ LIR* branchover = nullptr; if (type_known_final) { // Ensure top 3 bytes of result are 0. LoadConstant(rl_result.reg, 0); - OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); + OpRegReg(kOpCmp, ref_class_reg, class_reg); // Set the low byte of the result to 0 or 1 from the compare condition code. NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq); } else { if (!type_known_abstract) { LoadConstant(rl_result.reg, 1); // Assume result succeeds. - branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); + branchover = OpCmpBranch(kCondEq, ref_class_reg, class_reg, NULL); } - OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); + OpRegCopy(TargetRefReg(kArg0), class_reg); if (cu_->target64) { OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)); } else { @@ -2552,7 +2560,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, } else { if (shift_op) { // X86 doesn't require masking and must use ECX. - RegStorage t_reg = TargetReg(kCount); // rCX + RegStorage t_reg = TargetReg(kCount, false); // rCX LoadValueDirectFixed(rl_rhs, t_reg); if (is_two_addr) { // Can we do this directly into memory? @@ -2740,7 +2748,7 @@ void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, } // X86 doesn't require masking and must use ECX. - RegStorage t_reg = TargetReg(kCount); // rCX + RegStorage t_reg = TargetReg(kCount, false); // rCX LoadValueDirectFixed(rl_shift, t_reg); if (is_two_addr) { // Can we do this directly into memory? diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 730a271737..f80e200bac 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -154,8 +154,7 @@ RegLocation X86Mir2Lir::LocCReturn() { } RegLocation X86Mir2Lir::LocCReturnRef() { - // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported. - return x86_loc_c_return; + return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; } RegLocation X86Mir2Lir::LocCReturnWide() { @@ -170,8 +169,8 @@ RegLocation X86Mir2Lir::LocCReturnDouble() { return x86_loc_c_return_double; } -// Return a target-dependent special register. -RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { +// Return a target-dependent special register for 32-bit. +RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { RegStorage res_reg = RegStorage::InvalidReg(); switch (reg) { case kSelf: res_reg = RegStorage::InvalidReg(); break; @@ -204,6 +203,11 @@ RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { return res_reg; } +RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { + LOG(FATAL) << "Do not use this function!!!"; + return RegStorage::InvalidReg(); +} + /* * Decode the register id. */ @@ -832,7 +836,7 @@ void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { (rl_dest.location == kLocCompilerTemp)) { int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); - int r_base = TargetReg(kSp).GetReg(); + int r_base = rs_rX86_SP.GetReg(); int displacement = SRegOffset(rl_dest.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); @@ -887,7 +891,7 @@ void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeT uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); // Generate the move instruction with the unique pointer and save index, dex_file, and type. - LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), + LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), static_cast<int>(target_method_id_ptr), target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); AppendLIR(move); @@ -904,7 +908,7 @@ void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); // Generate the move instruction with the unique pointer and save index and type. - LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(), + LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), static_cast<int>(ptr), type_idx); AppendLIR(move); class_type_address_insns_.Insert(move); @@ -1746,29 +1750,22 @@ LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { // ------------ ABI support: mapping of args to physical registers ------------- RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { - const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5}; - const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage); - const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3, - rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7}; - const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage); + const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; + const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); + const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, + kFArg4, kFArg5, kFArg6, kFArg7}; + const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); - RegStorage result = RegStorage::InvalidReg(); if (is_double_or_float) { if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { - result = fpArgMappingToPhysicalReg[cur_fp_reg_++]; - if (result.Valid()) { - result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg()); - } + return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); } } else { if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { - result = coreArgMappingToPhysicalReg[cur_core_reg_++]; - if (result.Valid()) { - result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg()); - } + return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); } } - return result; + return RegStorage::InvalidReg(); } RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { @@ -1806,7 +1803,7 @@ RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; - InToRegStorageX86_64Mapper mapper; + InToRegStorageX86_64Mapper mapper(this); in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); } return in_to_reg_storage_mapping_.Get(arg_num); @@ -1847,13 +1844,13 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { RegLocation rl_src = rl_method; rl_src.location = kLocPhysReg; - rl_src.reg = TargetReg(kArg0); + rl_src.reg = TargetRefReg(kArg0); rl_src.home = false; MarkLive(rl_src); StoreValue(rl_method, rl_src); // If Method* has been promoted, explicitly flush if (rl_method.location == kLocPhysReg) { - StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile); + StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); } if (cu_->num_ins == 0) { @@ -1890,9 +1887,9 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { } else { // Needs flush. if (t_loc->ref) { - StoreRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile); + StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); } else { - StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, + StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, kNotVolatile); } } @@ -1900,9 +1897,9 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { // If arriving in frame & promoted. if (t_loc->location == kLocPhysReg) { if (t_loc->ref) { - LoadRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); + LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); } else { - LoadBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, + LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, t_loc->wide ? k64 : k32, kNotVolatile); } } @@ -1974,7 +1971,7 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, const int start_index = skip_this ? 1 : 0; - InToRegStorageX86_64Mapper mapper; + InToRegStorageX86_64Mapper mapper(this); InToRegStorageMapping in_to_reg_storage_mapping; in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); @@ -1993,14 +1990,14 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, loc = UpdateLocWide(loc); if (loc.location == kLocPhysReg) { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); + StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); } next_arg += 2; } else { loc = UpdateLoc(loc); if (loc.location == kLocPhysReg) { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); + StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); } next_arg++; } @@ -2057,23 +2054,23 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); if (src_is_16b_aligned) { - ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP); + ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); } else if (src_is_8b_aligned) { - ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP); - ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), + ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); + ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), kMovHi128FP); } else { - ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP); + ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); } if (dest_is_16b_aligned) { - st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP); + st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); } else if (dest_is_8b_aligned) { - st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP); - st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), + st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); + st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP); } else { - st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP); + st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); } // TODO If we could keep track of aliasing information for memory accesses that are wider @@ -2107,11 +2104,11 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Instead of allocating a new temp, simply reuse one of the registers being used // for argument passing. - RegStorage temp = TargetReg(kArg3); + RegStorage temp = TargetReg(kArg3, false); // Now load the argument VR and store to the outs. - Load32Disp(TargetReg(kSp), current_src_offset, temp); - Store32Disp(TargetReg(kSp), current_dest_offset, temp); + Load32Disp(rs_rX86_SP, current_src_offset, temp); + Store32Disp(rs_rX86_SP, current_dest_offset, temp); } current_src_offset += bytes_to_move; @@ -2123,8 +2120,8 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Now handle rest not registers if they are if (in_to_reg_storage_mapping.IsThereStackMapped()) { - RegStorage regSingle = TargetReg(kArg2); - RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg()); + RegStorage regSingle = TargetReg(kArg2, false); + RegStorage regWide = TargetReg(kArg3, true); for (int i = start_index; i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { RegLocation rl_arg = info->args[i]; @@ -2137,17 +2134,17 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); if (rl_arg.wide) { if (rl_arg.location == kLocPhysReg) { - StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile); + StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); } else { LoadValueDirectWideFixed(rl_arg, regWide); - StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile); + StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); } } else { if (rl_arg.location == kLocPhysReg) { - StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile); + StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); } else { LoadValueDirectFixed(rl_arg, regSingle); - StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile); + StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); } } } @@ -2183,13 +2180,13 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, direct_code, direct_method, type); if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetReg(kArg1), 0, tmp); + Load32Disp(TargetRefReg(kArg1), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h index 7ff4f725d1..a52e842baf 100644 --- a/compiler/dex/quick/x86/x86_lir.h +++ b/compiler/dex/quick/x86/x86_lir.h @@ -353,6 +353,12 @@ const RegLocation x86_loc_c_return const RegLocation x86_loc_c_return_wide {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG}; +const RegLocation x86_loc_c_return_ref + {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1, + RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG}; +const RegLocation x86_64_loc_c_return_ref + {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1, + RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG}; const RegLocation x86_64_loc_c_return_wide {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG}; |