diff options
Diffstat (limited to 'compiler')
27 files changed, 378 insertions, 390 deletions
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h index 6f4fa3ab50..ba4b5c356a 100644 --- a/compiler/dex/compiler_enums.h +++ b/compiler/dex/compiler_enums.h @@ -159,8 +159,10 @@ enum AssemblerStatus { }; enum OpSize { - kWord, - kLong, + kWord, // Natural word size of target (32/64). + k32, + k64, + kReference, // Object reference; compressed on 64-bit targets. kSingle, kDouble, kUnsignedHalf, diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 51419f4586..937e2585ef 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -42,6 +42,11 @@ void MIRGraph::DoConstantPropagation(BasicBlock* bb) { MIR* mir; for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { + // Skip pass if BB has MIR without SSA representation. + if (mir->ssa_rep == NULL) { + return; + } + uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; DecodedInstruction *d_insn = &mir->dalvikInsn; diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index d0d0e6b3a7..b374ed861e 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -128,7 +128,7 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, // Load the displacement from the switch table RegStorage disp_reg = AllocTemp(); - LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord); + LoadBaseIndexed(table_base, keyReg, disp_reg, 2, k32); // ..and go! NOTE: No instruction set switch here - must stay Thumb2 LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg()); @@ -180,6 +180,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { */ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { FlushAllRegs(); + // FIXME: need separate LoadValues for object references. LoadValueDirectFixed(rl_src, rs_r0); // Get obj LockCallTemps(); // Prepare for explicit register usage constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. @@ -193,7 +194,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); } } - LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); + Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2); MarkPossibleNullPointerException(opt_flags); LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL); @@ -219,7 +220,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { } else { // Explicit null-check as slow-path is entered using an IT. GenNullCheck(rs_r0, opt_flags); - LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); + Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2); MarkPossibleNullPointerException(opt_flags); OpRegImm(kOpCmp, rs_r1, 0); @@ -248,7 +249,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LoadValueDirectFixed(rl_src, rs_r0); // Get obj LockCallTemps(); // Prepare for explicit register usage LIR* null_check_branch = nullptr; - LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); + Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. if (kArchVariantHasGoodBranchPredictor) { if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { @@ -259,11 +260,11 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); } } - LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); + Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); MarkPossibleNullPointerException(opt_flags); LoadConstantNoClobber(rs_r3, 0); LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL); - StoreWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3); + Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3); LIR* unlock_success_branch = OpUnconditionalBranch(NULL); LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); @@ -284,14 +285,14 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { } else { // Explicit null-check as slow-path is entered using an IT. GenNullCheck(rs_r0, opt_flags); - LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock + Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock MarkPossibleNullPointerException(opt_flags); - LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); + Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); LoadConstantNoClobber(rs_r3, 0); // Is lock unheld on lock or held by us (==thread_id) on unlock? OpRegReg(kOpCmp, rs_r1, rs_r2); LIR* it = OpIT(kCondEq, "EE"); - StoreWordDisp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3); + Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3); // Go expensive route - UnlockObjectFromCode(obj); LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), rs_rARM_LR); @@ -307,9 +308,9 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset<4>().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); RegStorage reset_reg = AllocTemp(); - LoadWordDisp(rs_rARM_SELF, ex_offset, rl_result.reg); + Load32Disp(rs_rARM_SELF, ex_offset, rl_result.reg); LoadConstant(reset_reg, 0); - StoreWordDisp(rs_rARM_SELF, ex_offset, reset_reg); + Store32Disp(rs_rARM_SELF, ex_offset, reset_reg); FreeTemp(reset_reg); StoreValue(rl_dest, rl_result); } @@ -354,7 +355,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { if (!skip_overflow_check) { if (Runtime::Current()->ExplicitStackOverflowChecks()) { /* Load stack limit */ - LoadWordDisp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12); + Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12); } } /* Spill core callee saves */ @@ -391,6 +392,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow); // Load the entrypoint directly into the pc instead of doing a load + branch. Assumes // codegen and target are in thumb2 mode. + // NOTE: native pointer. m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC); } @@ -421,7 +423,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { // a sub instruction. Otherwise we will get a temp allocation and the // code size will increase. OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, Thread::kStackOverflowReservedBytes); - LoadWordDisp(rs_r12, 0, rs_r12); + Load32Disp(rs_r12, 0, rs_r12); MarkPossibleStackOverflowException(); OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); } diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index b0bc11d458..a89b307d60 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -126,8 +126,6 @@ class ArmMir2Lir FINAL : public Mir2Lir { RegLocation rl_src2); void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); - LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset, - ThrowKind kind); RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div); RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div); void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index d5b34a5c39..f47e693e4d 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -314,11 +314,11 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va /* * A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit * compare-and-branch if zero is ideal if it will reach. However, because null checks - * branch forward to a launch pad, they will frequently not reach - and thus have to + * branch forward to a slow path, they will frequently not reach - and thus have to * be converted to a long form during assembly (which will trigger another assembly * pass). Here we estimate the branch distance for checks, and if large directly * generate the long form in an attempt to avoid an extra assembly pass. - * TODO: consider interspersing launchpads in code following unconditional branches. + * TODO: consider interspersing slowpaths in code following unconditional branches. */ bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget)); skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64); @@ -608,12 +608,6 @@ bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) return true; } -LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, - int offset, ThrowKind kind) { - LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm"; - return NULL; -} - RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_div, bool check_zero) { LOG(FATAL) << "Unexpected use of GenDivRem for Arm"; @@ -684,18 +678,18 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_dest = InlineTarget(info); RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - if (size == kLong) { + if (size == k64) { // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0. if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) { - LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow()); - LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh()); + Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow()); + Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh()); } else { - LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh()); - LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow()); + Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh()); + Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow()); } StoreValueWide(rl_dest, rl_result); } else { - DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); + DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0. LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG); StoreValue(rl_dest, rl_result); @@ -708,13 +702,13 @@ bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1] RegLocation rl_src_value = info->args[2]; // [size] value RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); - if (size == kLong) { + if (size == k64) { // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0. RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg); - StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), kWord); - StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), kWord); + StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32); + StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32); } else { - DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); + DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0. RegLocation rl_value = LoadValue(rl_src_value, kCoreReg); StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size); @@ -1148,7 +1142,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, if (needs_range_check) { reg_len = AllocTemp(); /* Get len */ - LoadWordDisp(rl_array.reg, len_offset, reg_len); + Load32Disp(rl_array.reg, len_offset, reg_len); MarkPossibleNullPointerException(opt_flags); } else { ForceImplicitNullCheck(rl_array.reg, opt_flags); @@ -1217,7 +1211,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, bool constant_index = rl_index.is_const; int data_offset; - if (size == kLong || size == kDouble) { + if (size == k64 || size == kDouble) { data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); @@ -1254,7 +1248,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, reg_len = AllocTemp(); // NOTE: max live temps(4) here. /* Get len */ - LoadWordDisp(rl_array.reg, len_offset, reg_len); + Load32Disp(rl_array.reg, len_offset, reg_len); MarkPossibleNullPointerException(opt_flags); } else { ForceImplicitNullCheck(rl_array.reg, opt_flags); diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index 1053a8fc41..305e89ba92 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -656,7 +656,7 @@ void ArmMir2Lir::FlushReg(RegStorage reg) { if (info->live && info->dirty) { info->dirty = false; int v_reg = mir_graph_->SRegToVReg(info->s_reg); - StoreBaseDisp(rs_rARM_SP, VRegOffset(v_reg), reg, kWord); + StoreBaseDisp(rs_rARM_SP, VRegOffset(v_reg), reg, k32); } } @@ -738,8 +738,8 @@ RegStorage ArmMir2Lir::LoadHelper(ThreadOffset<4> offset) { LIR* ArmMir2Lir::CheckSuspendUsingLoad() { RegStorage tmp = rs_r0; - LoadWordDisp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp); - LIR* load2 = LoadWordDisp(tmp, 0, tmp); + Load32Disp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp); + LIR* load2 = Load32Disp(tmp, 0, tmp); return load2; } diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 70cbdd2e31..6879ffc08a 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -699,23 +699,24 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora if (ARM_FPREG(r_dest.GetReg())) { if (ARM_SINGLEREG(r_dest.GetReg())) { - DCHECK((size == kWord) || (size == kSingle)); + DCHECK((size == k32) || (size == kSingle)); opcode = kThumb2Vldrs; size = kSingle; } else { DCHECK(ARM_DOUBLEREG(r_dest.GetReg())); - DCHECK((size == kLong) || (size == kDouble)); + DCHECK((size == k64) || (size == kDouble)); DCHECK_EQ((r_dest.GetReg() & 0x1), 0); opcode = kThumb2Vldrd; size = kDouble; } } else { if (size == kSingle) - size = kWord; + size = k32; } switch (size) { case kDouble: // fall-through + // Intentional fall-though. case kSingle: reg_ptr = AllocTemp(); if (scale) { @@ -727,7 +728,9 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0); FreeTemp(reg_ptr); return load; - case kWord: + case k32: + // Intentional fall-though. + case kReference: opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR; break; case kUnsignedHalf: @@ -764,23 +767,24 @@ LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor if (ARM_FPREG(r_src.GetReg())) { if (ARM_SINGLEREG(r_src.GetReg())) { - DCHECK((size == kWord) || (size == kSingle)); + DCHECK((size == k32) || (size == kSingle)); opcode = kThumb2Vstrs; size = kSingle; } else { DCHECK(ARM_DOUBLEREG(r_src.GetReg())); - DCHECK((size == kLong) || (size == kDouble)); + DCHECK((size == k64) || (size == kDouble)); DCHECK_EQ((r_src.GetReg() & 0x1), 0); opcode = kThumb2Vstrd; size = kDouble; } } else { if (size == kSingle) - size = kWord; + size = k32; } switch (size) { case kDouble: // fall-through + // Intentional fall-though. case kSingle: reg_ptr = AllocTemp(); if (scale) { @@ -792,14 +796,18 @@ LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0); FreeTemp(reg_ptr); return store; - case kWord: + case k32: + // Intentional fall-though. + case kReference: opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR; break; case kUnsignedHalf: + // Intentional fall-though. case kSignedHalf: opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR; break; case kUnsignedByte: + // Intentional fall-though. case kSignedByte: opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR; break; @@ -832,7 +840,8 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag bool null_pointer_safepoint = false; switch (size) { case kDouble: - case kLong: + // Intentional fall-though. + case k64: if (ARM_FPREG(dest_low_reg)) { // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK. if (r_dest.IsPair()) { @@ -849,15 +858,18 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(), displacement >> 2); } else { - load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), kWord, s_reg); + load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), k32, s_reg); null_pointer_safepoint = true; - LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), kWord, INVALID_SREG); + LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), k32, INVALID_SREG); } already_generated = true; } break; case kSingle: - case kWord: + // Intentional fall-though. + case k32: + // Intentional fall-though. + case kReference: if (ARM_FPREG(r_dest.GetReg())) { opcode = kThumb2Vldrs; if (displacement <= 1020) { @@ -953,13 +965,17 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg) { - DCHECK(!((size == kLong) || (size == kDouble))); + DCHECK(!((size == k64) || (size == kDouble))); + // TODO: base this on target. + if (size == kWord) { + size = k32; + } return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg); } LIR* ArmMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg) { - return LoadBaseDispBody(r_base, displacement, r_dest, kLong, s_reg); + return LoadBaseDispBody(r_base, displacement, r_dest, k64, s_reg); } @@ -975,16 +991,16 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora int src_low_reg = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg(); bool null_pointer_safepoint = false; switch (size) { - case kLong: + case k64: case kDouble: if (!ARM_FPREG(src_low_reg)) { if (displacement <= 1020) { store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(), displacement >> 2); } else { - store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), kWord); + store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), k32); null_pointer_safepoint = true; - StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), kWord); + StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), k32); } already_generated = true; } else { @@ -1001,7 +1017,8 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora } break; case kSingle: - case kWord: + case k32: + case kReference: if (ARM_FPREG(r_src.GetReg())) { DCHECK(ARM_SINGLEREG(r_src.GetReg())); opcode = kThumb2Vstrs; @@ -1082,12 +1099,16 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) { - DCHECK(!((size == kLong) || (size == kDouble))); + // TODO: base this on target. + if (size == kWord) { + size = k32; + } + DCHECK(!((size == k64) || (size == kDouble))); return StoreBaseDispBody(r_base, displacement, r_src, size); } LIR* ArmMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { - return StoreBaseDispBody(r_base, displacement, r_src, kLong); + return StoreBaseDispBody(r_base, displacement, r_src, k64); } LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 6e6b8f0a30..677ee15462 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -926,7 +926,6 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena mir_graph_(mir_graph), switch_tables_(arena, 4, kGrowableArraySwitchTables), fill_array_data_(arena, 4, kGrowableArrayFillArrayData), - throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads), suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads), tempreg_info_(arena, 20, kGrowableArrayMisc), reginfo_map_(arena, 64, kGrowableArrayMisc), @@ -1118,7 +1117,7 @@ bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, int offset, int check_value, LIR* target) { // Handle this for architectures that can't compare to memory. - LoadWordDisp(base_reg, offset, temp_reg); + Load32Disp(base_reg, offset, temp_reg); LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); return branch; } diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index 8806e68b93..3ec31ba7d9 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -212,8 +212,8 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0), INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, 0), - INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, kWord), - INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, kLong), + INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, k32), + INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, k64), INTRINSIC(JavaLangShort, ReverseBytes, S_S, kIntrinsicReverseBytes, kSignedHalf), INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0), @@ -241,12 +241,12 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0), INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte), - INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, kWord), - INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, kLong), + INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, k32), + INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, k64), INTRINSIC(LibcoreIoMemory, PeekShortNative, J_S, kIntrinsicPeek, kSignedHalf), INTRINSIC(LibcoreIoMemory, PokeByte, JB_V, kIntrinsicPoke, kSignedByte), - INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, kWord), - INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, kLong), + INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, k32), + INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, k64), INTRINSIC(LibcoreIoMemory, PokeShortNative, JS_V, kIntrinsicPoke, kSignedHalf), INTRINSIC(SunMiscUnsafe, CompareAndSwapInt, ObjectJII_Z, kIntrinsicCas, diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 6781a9bed4..313174d218 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -19,12 +19,17 @@ #include "dex/quick/mir_to_lir-inl.h" #include "entrypoints/quick/quick_entrypoints.h" #include "mirror/array.h" +#include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "verifier/method_verifier.h" #include <functional> namespace art { +// Shortcuts to repeatedly used long types. +typedef mirror::ObjectArray<mirror::Object> ObjArray; +typedef mirror::ObjectArray<mirror::Class> ClassArray; + /* * This source files contains "gen" codegen routines that should * be applicable to most targets. Only mid-level support utilities @@ -42,22 +47,6 @@ void Mir2Lir::GenBarrier() { barrier->u.m.def_mask = ENCODE_ALL; } -LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) { - LIR* tgt; - LIR* branch; - if (c_code == kCondAl) { - tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal, - imm_val); - branch = OpUnconditionalBranch(tgt); - } else { - tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val); - branch = OpCmpImmBranch(c_code, reg, imm_val, tgt); - } - // Remember branch target - will process later - throw_launchpads_.Insert(tgt); - return branch; -} - void Mir2Lir::GenDivZeroException() { LIR* branch = OpUnconditionalBranch(nullptr); AddDivZeroCheckSlowPath(branch); @@ -204,23 +193,12 @@ void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { // register with offset 0. This will cause a signal if the register contains 0 (null). RegStorage tmp = AllocTemp(); // TODO: for Mips, would be best to use rZERO as the bogus register target. - LIR* load = LoadWordDisp(reg, 0, tmp); + LIR* load = Load32Disp(reg, 0, tmp); FreeTemp(tmp); MarkSafepointPC(load); } } -/* Perform check on two registers */ -LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2, - ThrowKind kind) { - LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(), - reg2.GetReg()); - LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); - // Remember branch target - will process later - throw_launchpads_.Insert(tgt); - return branch; -} - void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, LIR* fall_through) { @@ -426,7 +404,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { for (int i = 0; i < elems; i++) { RegLocation loc = UpdateLoc(info->args[i]); if (loc.location == kLocPhysReg) { - StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord); + Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); } } /* @@ -463,8 +441,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { // Generate the copy loop. Going backwards for convenience LIR* target = NewLIR0(kPseudoTargetLabel); // Copy next element - LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord); - StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); + LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); + StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); FreeTemp(r_val); OpDecAndBranch(kCondGe, r_idx, target); if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { @@ -476,9 +454,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { // TUNING: interleave for (int i = 0; i < elems; i++) { RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); - StoreBaseDisp(TargetReg(kRet0), - mirror::Array::DataOffset(component_size).Int32Value() + i * 4, - rl_arg.reg, kWord); + Store32Disp(TargetReg(kRet0), + mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); // If the LoadValue caused a temp to be allocated, free it if (IsTemp(rl_arg.reg)) { FreeTemp(rl_arg.reg); @@ -529,7 +506,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, // Fast path, static storage base is this method's class RegLocation rl_method = LoadCurrMethod(); r_base = AllocTemp(); - LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); + LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); if (IsTemp(rl_method.reg)) { FreeTemp(rl_method.reg); } @@ -546,9 +523,9 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, LoadCurrMethodDirect(r_method); r_base = TargetReg(kArg0); LockTemp(r_base); - LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); - LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + - sizeof(int32_t*) * field_info.StorageIndex(), r_base); + LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); + int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); + LoadRefDisp(r_base, offset_of_field, r_base); // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. if (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { @@ -588,8 +565,10 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, } if (is_long_or_double) { StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); + } else if (rl_src.ref) { + StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); } else { - StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); + Store32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); } if (field_info.IsVolatile()) { // A load might follow the volatile store so insert a StoreLoad barrier. @@ -620,7 +599,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, // Fast path, static storage base is this method's class RegLocation rl_method = LoadCurrMethod(); r_base = AllocTemp(); - LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); + LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); } else { // Medium path, static storage base in a different class which requires checks that the other // class is initialized @@ -633,9 +612,9 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, LoadCurrMethodDirect(r_method); r_base = TargetReg(kArg0); LockTemp(r_base); - LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); - LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + - sizeof(int32_t*) * field_info.StorageIndex(), r_base); + LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); + int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); + LoadRefDisp(r_base, offset_of_field, r_base); // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. if (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { @@ -668,8 +647,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, if (is_long_or_double) { LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG); + } else if (rl_result.ref) { + LoadRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); } else { - LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); + Load32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); } FreeTemp(r_base); @@ -728,31 +709,6 @@ void Mir2Lir::HandleSuspendLaunchPads() { } } -void Mir2Lir::HandleThrowLaunchPads() { - int num_elems = throw_launchpads_.Size(); - for (int i = 0; i < num_elems; i++) { - ResetRegPool(); - ResetDefTracking(); - LIR* lab = throw_launchpads_.Get(i); - current_dalvik_offset_ = lab->operands[1]; - AppendLIR(lab); - ThreadOffset<4> func_offset(-1); - int v1 = lab->operands[2]; - switch (lab->operands[0]) { - case kThrowNoSuchMethod: - OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1)); - func_offset = - QUICK_ENTRYPOINT_OFFSET(4, pThrowNoSuchMethod); - break; - default: - LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0]; - } - ClobberCallerSave(); - RegStorage r_tgt = CallHelperSetup(func_offset); - CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */); - } -} - void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object) { @@ -800,7 +756,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, } else { rl_result = EvalLoc(rl_dest, reg_class, true); GenNullCheck(rl_obj.reg, opt_flags); - LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord, + LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, k32, rl_obj.s_reg_low); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { @@ -864,7 +820,7 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, // There might have been a store before this volatile one so insert StoreStore barrier. GenMemBarrier(kStoreStore); } - StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord); + Store32Disp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // A load might follow the volatile store so insert a StoreLoad barrier. @@ -913,11 +869,9 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // We're don't need access checks, load type from dex cache int32_t dex_cache_offset = mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); - LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg); - int32_t offset_of_type = - mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) - * type_idx); - LoadWordDisp(res_reg, offset_of_type, rl_result.reg); + Load32Disp(rl_method.reg, dex_cache_offset, res_reg); + int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); + Load32Disp(res_reg, offset_of_type, rl_result.reg); if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx) || SLOW_TYPE_PATH) { // Slow path, at runtime test if type is null and if so initialize @@ -963,8 +917,8 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { /* NOTE: Most strings should be available at compile time */ - int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + - (sizeof(mirror::String*) * string_idx); + int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). + Int32Value(); if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { // slow path, resolve string if not in dex cache @@ -982,11 +936,11 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { r_method = TargetReg(kArg2); LoadCurrMethodDirect(r_method); } - LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), - TargetReg(kArg0)); + LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), + TargetReg(kArg0)); // Might call out to helper, which will return resolved string in kRet0 - LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); + Load32Disp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) { // OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? @@ -1036,8 +990,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { RegLocation rl_method = LoadCurrMethod(); RegStorage res_reg = AllocTemp(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); - LoadWordDisp(res_reg, offset_of_string, rl_result.reg); + LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); + Load32Disp(res_reg, offset_of_string, rl_result.reg); StoreValue(rl_dest, rl_result); } } @@ -1122,19 +1076,18 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re LoadCurrMethodDirect(check_class); if (use_declaring_class) { - LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); - LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); + LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); + LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); } else { - LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), - check_class); - LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); - int32_t offset_of_type = - mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + - (sizeof(mirror::Class*) * type_idx); - LoadWordDisp(check_class, offset_of_type, check_class); + LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + check_class); + LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); + int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); + LoadRefDisp(check_class, offset_of_type, check_class); } LIR* ne_branchover = NULL; + // FIXME: what should we be comparing here? compressed or decompressed references? if (cu_->instruction_set == kThumb2) { OpRegReg(kOpCmp, check_class, object_class); // Same? LIR* it = OpIT(kCondEq, ""); // if-convert the test @@ -1180,17 +1133,15 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref } else if (use_declaring_class) { LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref - LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); } else { // Load dex cache entry into class_reg (kArg2) LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref - LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), - class_reg); - int32_t offset_of_type = - mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) - * type_idx); - LoadWordDisp(class_reg, offset_of_type, class_reg); + LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + class_reg); + int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); + LoadRefDisp(class_reg, offset_of_type, class_reg); if (!can_assume_type_is_in_dex_cache) { // Need to test presence of type in dex cache at runtime LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); @@ -1214,7 +1165,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); + LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ LIR* branchover = NULL; if (type_known_final) { @@ -1317,16 +1268,14 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ type_idx, TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path } else if (use_declaring_class) { - LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), - class_reg); + LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + class_reg); } else { // Load dex cache entry into class_reg (kArg2) - LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), - class_reg); - int32_t offset_of_type = - mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + - (sizeof(mirror::Class*) * type_idx); - LoadWordDisp(class_reg, offset_of_type, class_reg); + LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + class_reg); + int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); + LoadRefDisp(class_reg, offset_of_type, class_reg); if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { // Need to test presence of type in dex cache at runtime LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); @@ -1374,8 +1323,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ GenerateTargetLabel(); if (load_) { - m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), - m2l_->TargetReg(kArg1)); + m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), + m2l_->TargetReg(kArg1)); } m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), m2l_->TargetReg(kArg1), true); @@ -1401,7 +1350,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); + LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); LIR* cont = NewLIR0(kPseudoTargetLabel); diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 758096b954..53b6ed420e 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -34,10 +34,10 @@ namespace art { * and "op" calls may be used here. */ -void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) { - class IntrinsicLaunchpadPath : public Mir2Lir::LIRSlowPath { +void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) { + class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath { public: - IntrinsicLaunchpadPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr) + IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr) : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) { } @@ -57,7 +57,7 @@ void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) { CallInfo* const info_; }; - AddSlowPath(new (arena_) IntrinsicLaunchpadPath(this, info, branch, resume)); + AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume)); } /* @@ -360,7 +360,11 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { rl_src.reg = TargetReg(kArg0); rl_src.home = false; MarkLive(rl_src.reg, rl_src.s_reg_low); - StoreValue(rl_method, rl_src); + if (rl_method.wide) { + StoreValueWide(rl_method, rl_src); + } else { + StoreValue(rl_method, rl_src); + } // If Method* has been promoted, explicitly flush if (rl_method.location == kLocPhysReg) { StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0)); @@ -425,16 +429,15 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { } } if (need_flush) { - StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord); + Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg); } } else { // If arriving in frame & promoted if (v_map->core_location == kLocPhysReg) { - LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), - RegStorage::Solo32(v_map->core_reg)); + Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); } if (v_map->fp_location == kLocPhysReg) { - LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg)); + Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg)); } } } @@ -476,9 +479,9 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, cg->LoadCurrMethodDirect(cg->TargetReg(kArg0)); break; case 1: // Get method->dex_cache_resolved_methods_ - cg->LoadWordDisp(cg->TargetReg(kArg0), - mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), - cg->TargetReg(kArg0)); + cg->LoadRefDisp(cg->TargetReg(kArg0), + mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), + cg->TargetReg(kArg0)); // Set up direct code if known. if (direct_code != 0) { if (direct_code != static_cast<unsigned int>(-1)) { @@ -491,9 +494,9 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, break; case 2: // Grab target method* CHECK_EQ(cu->dex_file, target_method.dex_file); - cg->LoadWordDisp(cg->TargetReg(kArg0), - mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + - (target_method.dex_method_index * 4), cg->TargetReg(kArg0)); + cg->LoadRefDisp(cg->TargetReg(kArg0), + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + + (target_method.dex_method_index * 4), cg->TargetReg(kArg0)); break; case 3: // Grab the code from the method* if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { @@ -537,18 +540,18 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, case 1: // Is "this" null? [use kArg1] cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags); // get this->klass_ [use kArg1, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetReg(kInvokeTgt)); + cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), + cg->TargetReg(kInvokeTgt)); cg->MarkPossibleNullPointerException(info->opt_flags); break; case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(), - cg->TargetReg(kInvokeTgt)); + cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(), + cg->TargetReg(kInvokeTgt)); break; case 3: // Get target method [use kInvokeTgt, set kArg0] - cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) + - mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(), - cg->TargetReg(kArg0)); + cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) + + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(), + cg->TargetReg(kArg0)); break; case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt] if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { @@ -592,15 +595,17 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, case 2: // Is "this" null? [use kArg1] cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags); // Get this->klass_ [use kArg1, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetReg(kInvokeTgt)); + cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), + cg->TargetReg(kInvokeTgt)); cg->MarkPossibleNullPointerException(info->opt_flags); break; case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(), - cg->TargetReg(kInvokeTgt)); + // NOTE: native pointer. + cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(), + cg->TargetReg(kInvokeTgt)); break; case 4: // Get target method [use kInvokeTgt, set kArg0] + // NOTE: native pointer. cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) + mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(), cg->TargetReg(kArg0)); @@ -753,11 +758,11 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, } else { // kArg2 & rArg3 can safely be used here reg = TargetReg(kArg3); - LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); + Load32Disp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); } - StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord); + Store32Disp(TargetReg(kSp), (next_use + 1) * 4, reg); call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); next_use++; @@ -791,7 +796,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, StoreBaseDispWide(TargetReg(kSp), outs_offset, RegStorage::MakeRegPair(low_reg, high_reg)); next_use += 2; } else { - StoreWordDisp(TargetReg(kSp), outs_offset, low_reg); + Store32Disp(TargetReg(kSp), outs_offset, low_reg); next_use++; } call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, @@ -811,7 +816,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - LoadWordDisp(TargetReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -862,7 +867,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, } else { loc = UpdateLoc(loc); if ((next_arg >= 3) && (loc.location == kLocPhysReg)) { - StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord); + Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); } next_arg++; } @@ -997,8 +1002,8 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, RegStorage temp = TargetReg(kArg3); // Now load the argument VR and store to the outs. - LoadWordDisp(TargetReg(kSp), current_src_offset, temp); - StoreWordDisp(TargetReg(kSp), current_dest_offset, temp); + Load32Disp(TargetReg(kSp), current_src_offset, temp); + Store32Disp(TargetReg(kSp), current_dest_offset, temp); } current_src_offset += bytes_to_move; @@ -1027,7 +1032,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - LoadWordDisp(TargetReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -1087,14 +1092,14 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { reg_ptr = AllocTemp(); if (range_check) { reg_max = AllocTemp(); - LoadWordDisp(rl_obj.reg, count_offset, reg_max); + Load32Disp(rl_obj.reg, count_offset, reg_max); MarkPossibleNullPointerException(info->opt_flags); } - LoadWordDisp(rl_obj.reg, offset_offset, reg_off); + Load32Disp(rl_obj.reg, offset_offset, reg_off); MarkPossibleNullPointerException(info->opt_flags); - LoadWordDisp(rl_obj.reg, value_offset, reg_ptr); + Load32Disp(rl_obj.reg, value_offset, reg_ptr); if (range_check) { - // Set up a launch pad to allow retry in case of bounds violation */ + // Set up a slow path to allow retry in case of bounds violation */ OpRegReg(kOpCmp, rl_idx.reg, reg_max); FreeTemp(reg_max); range_check_branch = OpCondBranch(kCondUge, nullptr); @@ -1115,8 +1120,8 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { } reg_off = AllocTemp(); reg_ptr = AllocTemp(); - LoadWordDisp(rl_obj.reg, offset_offset, reg_off); - LoadWordDisp(rl_obj.reg, value_offset, reg_ptr); + Load32Disp(rl_obj.reg, offset_offset, reg_off); + Load32Disp(rl_obj.reg, value_offset, reg_ptr); } if (rl_idx.is_const) { OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg)); @@ -1141,7 +1146,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { if (range_check) { DCHECK(range_check_branch != nullptr); info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've already null checked. - AddIntrinsicLaunchpad(info, range_check_branch); + AddIntrinsicSlowPath(info, range_check_branch); } return true; } @@ -1158,7 +1163,7 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { RegLocation rl_dest = InlineTarget(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); GenNullCheck(rl_obj.reg, info->opt_flags); - LoadWordDisp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg); + Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg); MarkPossibleNullPointerException(info->opt_flags); if (is_empty) { // dst = (dst == 0); @@ -1182,9 +1187,9 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) { return false; } RegLocation rl_src_i = info->args[0]; - RegLocation rl_dest = (size == kLong) ? InlineTargetWide(info) : InlineTarget(info); // result reg + RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); // result reg RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - if (size == kLong) { + if (size == k64) { RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg); RegStorage r_i_low = rl_i.reg.GetLow(); if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) { @@ -1199,8 +1204,8 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) { } StoreValueWide(rl_dest, rl_result); } else { - DCHECK(size == kWord || size == kSignedHalf); - OpKind op = (size == kWord) ? kOpRev : kOpRevsh; + DCHECK(size == k32 || size == kSignedHalf); + OpKind op = (size == k32) ? kOpRev : kOpRevsh; RegLocation rl_i = LoadValue(rl_src_i, kCoreReg); OpRegReg(op, rl_result.reg, rl_i.reg); StoreValue(rl_dest, rl_result); @@ -1352,7 +1357,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { DCHECK(high_code_point_branch != nullptr); LIR* resume_tgt = NewLIR0(kPseudoTargetLabel); info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. - AddIntrinsicLaunchpad(info, high_code_point_branch, resume_tgt); + AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt); } else { DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0); DCHECK(high_code_point_branch == nullptr); @@ -1384,7 +1389,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. // TUNING: check if rl_cmp.s_reg_low is already null checked LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr); - AddIntrinsicLaunchpad(info, cmp_null_check_branch); + AddIntrinsicSlowPath(info, cmp_null_check_branch); // NOTE: not a safepoint if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { OpReg(kOpBlx, r_tgt); @@ -1402,7 +1407,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); ThreadOffset<4> offset = Thread::PeerOffset<4>(); if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) { - LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg); + Load32Disp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg); } else { CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset); @@ -1429,7 +1434,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, if (is_long) { if (cu_->instruction_set == kX86) { LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg.GetLow(), - rl_result.reg.GetHigh(), kLong, INVALID_SREG); + rl_result.reg.GetHigh(), k64, INVALID_SREG); } else { RegStorage rl_temp_offset = AllocTemp(); OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); @@ -1437,7 +1442,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, FreeTemp(rl_temp_offset.GetReg()); } } else { - LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, kWord); + LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32); } if (is_volatile) { @@ -1477,7 +1482,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, rl_value = LoadValueWide(rl_src_value, kCoreReg); if (cu_->instruction_set == kX86) { StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg.GetLow(), - rl_value.reg.GetHigh(), kLong, INVALID_SREG); + rl_value.reg.GetHigh(), k64, INVALID_SREG); } else { RegStorage rl_temp_offset = AllocTemp(); OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); @@ -1486,7 +1491,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, } } else { rl_value = LoadValue(rl_src_value, kCoreReg); - StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, kWord); + StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32); } // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard. diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 208eadde12..9808f7f36f 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -65,7 +65,7 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) { OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg); } else { // Lives in the frame, need to store. - StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, kWord); + StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32); } if (!zero_reg.Valid()) { FreeTemp(temp_reg); @@ -74,15 +74,6 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) { } } -/* Load a word at base + displacement. Displacement must be word multiple */ -LIR* Mir2Lir::LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { - return LoadBaseDisp(r_base, displacement, r_dest, kWord, INVALID_SREG); -} - -LIR* Mir2Lir::StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { - return StoreBaseDisp(r_base, displacement, r_src, kWord); -} - /* * Load a Dalvik register into a physical register. Take care when * using this routine, as it doesn't perform any bookkeeping regarding @@ -93,11 +84,17 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) { if (rl_src.location == kLocPhysReg) { OpRegCopy(r_dest, rl_src.reg); } else if (IsInexpensiveConstant(rl_src)) { + // On 64-bit targets, will sign extend. Make sure constant reference is always NULL. + DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0)); LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src)); } else { DCHECK((rl_src.location == kLocDalvikFrame) || (rl_src.location == kLocCompilerTemp)); - LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest); + if (rl_src.ref) { + LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest); + } else { + Load32Disp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest); + } } } @@ -194,7 +191,7 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) { ResetDefLoc(rl_dest); if (IsDirty(rl_dest.reg) && oat_live_out(rl_dest.s_reg_low)) { def_start = last_lir_insn_; - StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kWord); + Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); MarkClean(rl_dest); def_end = last_lir_insn_; if (!rl_dest.ref) { @@ -306,7 +303,7 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) { if (IsDirty(rl_dest.reg) && oat_live_out(rl_dest.s_reg_low)) { LIR *def_start = last_lir_insn_; - StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kWord); + Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg); MarkClean(rl_dest); LIR *def_end = last_lir_insn_; if (!rl_dest.ref) { diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index a938478b3d..a237ac76b0 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -112,11 +112,11 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegStorage r_key = AllocTemp(); LIR* loop_label = NewLIR0(kPseudoTargetLabel); LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL); - LoadWordDisp(r_base, 0, r_key); + Load32Disp(r_base, 0, r_key); OpRegImm(kOpAdd, r_base, 8); OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label); RegStorage r_disp = AllocTemp(); - LoadWordDisp(r_base, -4, r_disp); + Load32Disp(r_base, -4, r_disp); OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp); OpReg(kOpBx, rs_rRA); @@ -200,7 +200,7 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, // Load the displacement from the switch table RegStorage r_disp = AllocTemp(); - LoadBaseIndexed(r_base, r_key, r_disp, 2, kWord); + LoadBaseIndexed(r_base, r_key, r_disp, 2, k32); // Add to rAP and go OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp); @@ -263,9 +263,9 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset<4>().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); RegStorage reset_reg = AllocTemp(); - LoadWordDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg); + Load32Disp(rs_rMIPS_SELF, ex_offset, rl_result.reg); LoadConstant(reset_reg, 0); - StoreWordDisp(rs_rMIPS_SELF, ex_offset, reset_reg); + Store32Disp(rs_rMIPS_SELF, ex_offset, reset_reg); FreeTemp(reset_reg); StoreValue(rl_dest, rl_result); } @@ -277,6 +277,7 @@ void MipsMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { RegStorage reg_card_base = AllocTemp(); RegStorage reg_card_no = AllocTemp(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); + // NOTE: native pointer. LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base); OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); @@ -310,7 +311,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) RegStorage new_sp = AllocTemp(); if (!skip_overflow_check) { /* Load stack limit */ - LoadWordDisp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg); + Load32Disp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg); } /* Spill core callee saves */ SpillCoreRegs(); @@ -328,7 +329,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) m2l_->ResetDefTracking(); GenerateTargetLabel(); // LR is offset 0 since we push in reverse order. - m2l_->LoadWordDisp(rs_rMIPS_SP, 0, rs_rRA); + m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA); m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_); m2l_->ClobberCallerSave(); ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow); diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 40641d670d..da65f3424f 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -126,8 +126,6 @@ class MipsMir2Lir FINAL : public Mir2Lir { RegLocation rl_src2); void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); - LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset, - ThrowKind kind); RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div); RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div); void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 237572034d..88d5d2bc41 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -221,12 +221,6 @@ void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch"; } -LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, - int offset, ThrowKind kind) { - LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm"; - return NULL; -} - RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2, bool is_div) { NewLIR2(kMipsDiv, reg1.GetReg(), reg2.GetReg()); @@ -480,7 +474,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, rl_array = LoadValue(rl_array, kCoreReg); rl_index = LoadValue(rl_index, kCoreReg); - if (size == kLong || size == kDouble) { + if (size == k64 || size == kDouble) { data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); @@ -495,12 +489,12 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, if (needs_range_check) { reg_len = AllocTemp(); /* Get len */ - LoadWordDisp(rl_array.reg, len_offset, reg_len); + Load32Disp(rl_array.reg, len_offset, reg_len); } /* reg_ptr -> array data */ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); FreeTemp(rl_array.reg.GetReg()); - if ((size == kLong) || (size == kDouble)) { + if ((size == k64) || (size == kDouble)) { if (scale) { RegStorage r_new_index = AllocTemp(); OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale); @@ -544,7 +538,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; - if (size == kLong || size == kDouble) { + if (size == k64 || size == kDouble) { data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); @@ -572,12 +566,12 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, reg_len = AllocTemp(); // NOTE: max live temps(4) here. /* Get len */ - LoadWordDisp(rl_array.reg, len_offset, reg_len); + Load32Disp(rl_array.reg, len_offset, reg_len); } /* reg_ptr -> array data */ OpRegImm(kOpAdd, reg_ptr, data_offset); /* at this point, reg_ptr points to array, 2 live temps */ - if ((size == kLong) || (size == kDouble)) { + if ((size == k64) || (size == kDouble)) { // TUNING: specific wide routine that can handle fp regs if (scale) { RegStorage r_new_index = AllocTemp(); diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index 3e02faed55..7f4cd5e242 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -369,7 +369,7 @@ void MipsMir2Lir::FlushReg(RegStorage reg) { if (info->live && info->dirty) { info->dirty = false; int v_reg = mir_graph_->SRegToVReg(info->s_reg); - StoreBaseDisp(rs_rMIPS_SP, VRegOffset(v_reg), reg, kWord); + Store32Disp(rs_rMIPS_SP, VRegOffset(v_reg), reg); } } @@ -531,12 +531,14 @@ void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { * there is a trap in the shadow. Allocate a temp register. */ RegStorage MipsMir2Lir::LoadHelper(ThreadOffset<4> offset) { + // NOTE: native pointer. LoadWordDisp(rs_rMIPS_SELF, offset.Int32Value(), rs_rT9); return rs_rT9; } LIR* MipsMir2Lir::CheckSuspendUsingLoad() { RegStorage tmp = AllocTemp(); + // NOTE: native pointer. LoadWordDisp(rs_rMIPS_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp); LIR *inst = LoadWordDisp(tmp, 0, tmp); FreeTemp(tmp); @@ -553,7 +555,7 @@ void MipsMir2Lir::SpillCoreRegs() { for (int reg = 0; mask; mask >>= 1, reg++) { if (mask & 0x1) { offset -= 4; - StoreWordDisp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg)); + Store32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg)); } } } @@ -567,7 +569,7 @@ void MipsMir2Lir::UnSpillCoreRegs() { for (int reg = 0; mask; mask >>= 1, reg++) { if (mask & 0x1) { offset -= 4; - LoadWordDisp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg)); + Load32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg)); } } OpRegImm(kOpAdd, rs_rSP, frame_size_); diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index c959510025..12775e1a9e 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -357,11 +357,11 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor if (MIPS_FPREG(r_dest.GetReg())) { DCHECK(MIPS_SINGLEREG(r_dest.GetReg())); - DCHECK((size == kWord) || (size == kSingle)); + DCHECK((size == k32) || (size == kSingle)); size = kSingle; } else { if (size == kSingle) - size = kWord; + size = k32; } if (!scale) { @@ -375,7 +375,8 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor case kSingle: opcode = kMipsFlwc1; break; - case kWord: + case k32: + case kReference: opcode = kMipsLw; break; case kUnsignedHalf: @@ -408,11 +409,11 @@ LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto if (MIPS_FPREG(r_src.GetReg())) { DCHECK(MIPS_SINGLEREG(r_src.GetReg())); - DCHECK((size == kWord) || (size == kSingle)); + DCHECK((size == k32) || (size == kSingle)); size = kSingle; } else { if (size == kSingle) - size = kWord; + size = k32; } if (!scale) { @@ -426,7 +427,8 @@ LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto case kSingle: opcode = kMipsFswc1; break; - case kWord: + case k32: + case kReference: opcode = kMipsSw; break; case kUnsignedHalf: @@ -463,7 +465,7 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora bool pair = false; switch (size) { - case kLong: + case k64: case kDouble: pair = true; opcode = kMipsLw; @@ -481,8 +483,9 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora short_form = IS_SIMM16_2WORD(displacement); DCHECK_EQ((displacement & 0x3), 0); break; - case kWord: + case k32: case kSingle: + case kReference: opcode = kMipsLw; if (MIPS_FPREG(r_dest.GetReg())) { opcode = kMipsFlwc1; @@ -544,13 +547,17 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg) { + // TODO: base this on target. + if (size == kWord) { + size = k32; + } return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size, s_reg); } LIR* MipsMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg) { - return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), kLong, s_reg); + return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg); } LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, @@ -563,7 +570,7 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, bool pair = false; switch (size) { - case kLong: + case k64: case kDouble: pair = true; opcode = kMipsSw; @@ -580,8 +587,9 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, short_form = IS_SIMM16_2WORD(displacement); DCHECK_EQ((displacement & 0x3), 0); break; - case kWord: + case k32: case kSingle: + case kReference: opcode = kMipsSw; if (MIPS_FPREG(r_src.GetReg())) { opcode = kMipsFswc1; @@ -635,11 +643,15 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) { + // TODO: base this on target. + if (size == kWord) { + size = k32; + } return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size); } LIR* MipsMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { - return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), kLong); + return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), k64); } LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) { diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 6fcdf70b12..6d3848841a 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -63,14 +63,14 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { } else { reg_arg_high = AllocTemp(); int offset_high = offset + sizeof(uint32_t); - LoadWordDisp(TargetReg(kSp), offset_high, reg_arg_high); + Load32Disp(TargetReg(kSp), offset_high, reg_arg_high); } } // If the low part is not in a register yet, we need to load it. if (!reg_arg_low.Valid()) { reg_arg_low = AllocTemp(); - LoadWordDisp(TargetReg(kSp), offset, reg_arg_low); + Load32Disp(TargetReg(kSp), offset, reg_arg_low); } if (wide) { @@ -96,7 +96,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { if (reg.Valid()) { OpRegCopy(rl_dest.reg, reg); } else { - LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg); + Load32Disp(TargetReg(kSp), offset, rl_dest.reg); } } else { RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position); @@ -107,10 +107,10 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { } else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) { OpRegCopy(rl_dest.reg, reg_arg_low); int offset_high = offset + sizeof(uint32_t); - LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHigh()); + Load32Disp(TargetReg(kSp), offset_high, rl_dest.reg.GetHigh()); } else if (!reg_arg_low.Valid() && reg_arg_high.Valid()) { OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high); - LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetLow()); + Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow()); } else { LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg, INVALID_SREG); } @@ -137,7 +137,7 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { if (wide) { LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg, INVALID_SREG); } else { - LoadWordDisp(reg_obj, data.field_offset, rl_dest.reg); + Load32Disp(reg_obj, data.field_offset, rl_dest.reg); } if (data.is_volatile) { // Without context sensitive analysis, we must issue the most conservative barriers. @@ -175,7 +175,7 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) { if (wide) { StoreBaseDispWide(reg_obj, data.field_offset, reg_src); } else { - StoreBaseDisp(reg_obj, data.field_offset, reg_src, kWord); + Store32Disp(reg_obj, data.field_offset, reg_src); } if (data.is_volatile) { // A load might follow the volatile store so insert a StoreLoad barrier. @@ -449,7 +449,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list rl_src[0] = LoadValue(rl_src[0], kCoreReg); GenNullCheck(rl_src[0].reg, opt_flags); rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadWordDisp(rl_src[0].reg, len_offset, rl_result.reg); + Load32Disp(rl_src[0].reg, len_offset, rl_result.reg); MarkPossibleNullPointerException(opt_flags); StoreValue(rl_dest, rl_result); break; @@ -562,11 +562,13 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list } case Instruction::AGET_WIDE: - GenArrayGet(opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3); + GenArrayGet(opt_flags, k64, rl_src[0], rl_src[1], rl_dest, 3); break; - case Instruction::AGET: case Instruction::AGET_OBJECT: - GenArrayGet(opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2); + GenArrayGet(opt_flags, kReference, rl_src[0], rl_src[1], rl_dest, 2); + break; + case Instruction::AGET: + GenArrayGet(opt_flags, k32, rl_src[0], rl_src[1], rl_dest, 2); break; case Instruction::AGET_BOOLEAN: GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0); @@ -581,10 +583,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1); break; case Instruction::APUT_WIDE: - GenArrayPut(opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3, false); + GenArrayPut(opt_flags, k64, rl_src[1], rl_src[2], rl_src[0], 3, false); break; case Instruction::APUT: - GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2, false); + GenArrayPut(opt_flags, k32, rl_src[1], rl_src[2], rl_src[0], 2, false); break; case Instruction::APUT_OBJECT: { bool is_null = mir_graph_->IsConstantNullRef(rl_src[0]); @@ -597,7 +599,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list if (is_null || is_safe) { // Store of constant null doesn't require an assignability test and can be generated inline // without fixed register usage or a card mark. - GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2, !is_null); + GenArrayPut(opt_flags, kReference, rl_src[1], rl_src[2], rl_src[0], 2, !is_null); } else { GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0]); } @@ -613,15 +615,15 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list break; case Instruction::IGET_OBJECT: - GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true); + GenIGet(mir, opt_flags, kReference, rl_dest, rl_src[0], false, true); break; case Instruction::IGET_WIDE: - GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false); + GenIGet(mir, opt_flags, k64, rl_dest, rl_src[0], true, false); break; case Instruction::IGET: - GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false); + GenIGet(mir, opt_flags, k32, rl_dest, rl_src[0], false, false); break; case Instruction::IGET_CHAR: @@ -638,15 +640,15 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list break; case Instruction::IPUT_WIDE: - GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false); + GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1], true, false); break; case Instruction::IPUT_OBJECT: - GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true); + GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1], false, true); break; case Instruction::IPUT: - GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false); + GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1], false, false); break; case Instruction::IPUT_BOOLEAN: @@ -1097,8 +1099,6 @@ void Mir2Lir::MethodMIR2LIR() { cu_->NewTimingSplit("Launchpads"); HandleSuspendLaunchPads(); - - HandleThrowLaunchPads(); } // diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 1f69eb5aa2..8d593ae664 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -109,6 +109,11 @@ typedef uint32_t CodeOffset; // Native code offset in bytes. #define REG_USE23 (REG_USE2 | REG_USE3) #define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3) +// TODO: #includes need a cleanup +#ifndef INVALID_SREG +#define INVALID_SREG (-1) +#endif + struct BasicBlock; struct CallInfo; struct CompilationUnit; @@ -554,12 +559,11 @@ class Mir2Lir : public Backend { RegisterInfo* GetRegInfo(int reg); // Shared by all targets - implemented in gen_common.cc. - void AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume = nullptr); + void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src, RegLocation rl_dest, int lit); bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); void HandleSuspendLaunchPads(); - void HandleThrowLaunchPads(); void HandleSlowPaths(); void GenBarrier(); void GenDivZeroException(); @@ -576,7 +580,6 @@ class Mir2Lir : public Backend { LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind); LIR* GenNullCheck(RegStorage m_reg, int opt_flags); LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); - LIR* GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2, ThrowKind kind); void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, LIR* fall_through); void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, @@ -725,14 +728,42 @@ class Mir2Lir : public Backend { RegLocation LoadCurrMethod(); void LoadCurrMethodDirect(RegStorage r_tgt); LIR* LoadConstant(RegStorage r_dest, int value); - LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest); + // Natural word size. + LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { + return LoadBaseDisp(r_base, displacement, r_dest, kWord, INVALID_SREG); + } + // Load 32 bits, regardless of target. + LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { + return LoadBaseDisp(r_base, displacement, r_dest, k32, INVALID_SREG); + } + // Load a reference at base + displacement and decompress into register. + LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest) { + return LoadBaseDisp(r_base, displacement, r_dest, kReference, INVALID_SREG); + } + // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); + // Load Dalvik value with 64-bit memory storage. RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); + // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. void LoadValueDirect(RegLocation rl_src, RegStorage r_dest); + // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest); + // Load Dalvik value with 64-bit memory storage. void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest); + // Load Dalvik value with 64-bit memory storage. void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest); - LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src); + // Store an item of natural word size. + LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { + return StoreBaseDisp(r_base, displacement, r_src, kWord); + } + // Store an uncompressed reference into a compressed 32-bit container. + LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src) { + return StoreBaseDisp(r_base, displacement, r_src, kReference); + } + // Store 32 bits, regardless of target. + LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { + return StoreBaseDisp(r_base, displacement, r_src, k32); + } /** * @brief Used to do the final store in the destination as per bytecode semantics. @@ -935,8 +966,6 @@ class Mir2Lir : public Backend { RegLocation rl_src2) = 0; virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0; - virtual LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, - int offset, ThrowKind kind) = 0; virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div) = 0; virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, @@ -1246,7 +1275,6 @@ class Mir2Lir : public Backend { MIRGraph* const mir_graph_; GrowableArray<SwitchTable*> switch_tables_; GrowableArray<FillArrayData*> fill_array_data_; - GrowableArray<LIR*> throw_launchpads_; GrowableArray<LIR*> suspend_launchpads_; GrowableArray<RegisterInfo*> tempreg_info_; GrowableArray<RegisterInfo*> reginfo_map_; diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 729b30d621..00831099fc 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -239,7 +239,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { // mov esp, ebp // in case a signal comes in that's not using an alternate signal stack and the large frame may // have moved us outside of the reserved area at the end of the stack. - // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad + // cmp rX86_SP, fs:[stack_end_]; jcc throw_slowpath OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset<4>()); LIR* branch = OpCondBranch(kCondUlt, nullptr); AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_ - 4)); @@ -251,7 +251,8 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { // We have been asked to save the address of the method start for later use. setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rX86_ARG0); int displacement = SRegOffset(base_of_code_->s_reg_low); - setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0, kWord); + // Native pointer - must be natural word size. + setup_method_address_[1] = StoreWordDisp(rs_rX86_SP, displacement, rs_rX86_ARG0); } FreeTemp(rX86_ARG0); diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index b802591347..fb61627c9e 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -127,10 +127,6 @@ class X86Mir2Lir FINAL : public Mir2Lir { RegLocation rl_src2); void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); - LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset, - ThrowKind kind); - LIR* GenMemImmedCheck(ConditionCode c_code, RegStorage base, int offset, int check_value, - ThrowKind kind); // TODO: collapse reg_lo, reg_hi RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div); RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div); diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index ee5387f050..f7b0c9d892 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -193,7 +193,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do } else { rl_result = EvalLoc(rl_dest, kFPReg, true); - LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg); + Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg); StoreFinalValue(rl_dest, rl_result); } diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index a23a3bf6b8..5ba9709187 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -24,34 +24,6 @@ namespace art { /* - * Perform register memory operation. - */ -LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, - int offset, ThrowKind kind) { - LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, - current_dalvik_offset_, reg1.GetReg(), base.GetReg(), offset); - OpRegMem(kOpCmp, reg1, base, offset); - LIR* branch = OpCondBranch(c_code, tgt); - // Remember branch target - will process later - throw_launchpads_.Insert(tgt); - return branch; -} - -/* - * Perform a compare of memory to immediate value - */ -LIR* X86Mir2Lir::GenMemImmedCheck(ConditionCode c_code, RegStorage base, int offset, - int check_value, ThrowKind kind) { - LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, - current_dalvik_offset_, base.GetReg(), check_value, 0); - NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base.GetReg(), offset, check_value); - LIR* branch = OpCondBranch(c_code, tgt); - // Remember branch target - will process later - throw_launchpads_.Insert(tgt); - return branch; -} - -/* * Compare two 64-bit values * x = y return 0 * x < y return -1 @@ -704,15 +676,15 @@ bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_src_address = info->args[0]; // long address rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1] - RegLocation rl_dest = size == kLong ? InlineTargetWide(info) : InlineTarget(info); + RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info); RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - if (size == kLong) { + if (size == k64) { // Unaligned access is allowed on x86. LoadBaseDispWide(rl_address.reg, 0, rl_result.reg, INVALID_SREG); StoreValueWide(rl_dest, rl_result); } else { - DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); + DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); // Unaligned access is allowed on x86. LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG); StoreValue(rl_dest, rl_result); @@ -725,12 +697,12 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1] RegLocation rl_src_value = info->args[2]; // [size] value RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); - if (size == kLong) { + if (size == k64) { // Unaligned access is allowed on x86. RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg); StoreBaseDispWide(rl_address.reg, 0, rl_value.reg); } else { - DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); + DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); // Unaligned access is allowed on x86. RegLocation rl_value = LoadValue(rl_src_value, kCoreReg); StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size); @@ -780,6 +752,7 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { int srcObjSp = IsInReg(this, rl_src_obj, rs_rSI) ? 0 : (IsInReg(this, rl_src_obj, rs_rDI) ? 4 : (SRegOffset(rl_src_obj.s_reg_low) + push_offset)); + // FIXME: needs 64-bit update. LoadWordDisp(TargetReg(kSp), srcObjSp, rs_rDI); int srcOffsetSp = IsInReg(this, rl_src_offset, rs_rSI) ? 0 : (IsInReg(this, rl_src_offset, rs_rDI) ? 4 @@ -1024,7 +997,7 @@ void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg()); break; case 1: - LoadBaseDisp(rs_rX86_SP, displacement, dest, kWord, sreg); + LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, sreg); break; default: m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(), rX86_SP, @@ -1130,7 +1103,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg()); } else { LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, - kWord, GetSRegHi(rl_src1.s_reg_low)); + k32, GetSRegHi(rl_src1.s_reg_low)); } if (is_square) { @@ -1153,7 +1126,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg()); } else { LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, - kWord, GetSRegHi(rl_src2.s_reg_low)); + k32, GetSRegHi(rl_src2.s_reg_low)); } // EAX <- EAX * 1L (2H * 1L) @@ -1185,7 +1158,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetLowReg()); } else { LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, - kWord, rl_src2.s_reg_low); + k32, rl_src2.s_reg_low); } // EDX:EAX <- 2L * 1L (double precision) @@ -1405,7 +1378,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, rl_array = LoadValue(rl_array, kCoreReg); int data_offset; - if (size == kLong || size == kDouble) { + if (size == k64 || size == kDouble) { data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); @@ -1434,7 +1407,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } } rl_result = EvalLoc(rl_dest, reg_class, true); - if ((size == kLong) || (size == kDouble)) { + if ((size == k64) || (size == kDouble)) { LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg.GetLow(), rl_result.reg.GetHigh(), size, INVALID_SREG); StoreValueWide(rl_dest, rl_result); @@ -1455,7 +1428,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, int len_offset = mirror::Array::LengthOffset().Int32Value(); int data_offset; - if (size == kLong || size == kDouble) { + if (size == k64 || size == kDouble) { data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); } else { data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); @@ -1484,7 +1457,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, GenArrayBoundsCheck(rl_index.reg, rl_array.reg, len_offset); } } - if ((size == kLong) || (size == kDouble)) { + if ((size == k64) || (size == kDouble)) { rl_src = LoadValueWide(rl_src, reg_class); } else { rl_src = LoadValue(rl_src, reg_class); @@ -1871,22 +1844,22 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, if (rl_method.location == kLocPhysReg) { if (use_declaring_class) { - LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); } else { - LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class); - LoadWordDisp(check_class, offset_of_type, check_class); + LoadRefDisp(check_class, offset_of_type, check_class); } } else { LoadCurrMethodDirect(check_class); if (use_declaring_class) { - LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); } else { - LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class); - LoadWordDisp(check_class, offset_of_type, check_class); + LoadRefDisp(check_class, offset_of_type, check_class); } } @@ -1927,17 +1900,17 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k LoadValueDirectFixed(rl_src, TargetReg(kArg0)); } else if (use_declaring_class) { LoadValueDirectFixed(rl_src, TargetReg(kArg0)); - LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg); } else { // Load dex cache entry into class_reg (kArg2). LoadValueDirectFixed(rl_src, TargetReg(kArg0)); - LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg); int32_t offset_of_type = mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) * type_idx); - LoadWordDisp(class_reg, offset_of_type, class_reg); + LoadRefDisp(class_reg, offset_of_type, class_reg); if (!can_assume_type_is_in_dex_cache) { // Need to test presence of type in dex cache at runtime. LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); @@ -1961,7 +1934,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k /* Load object->klass_. */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); + LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */ LIR* branchover = nullptr; if (type_known_final) { diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 5a8ad7a2b4..3e3fa72150 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -369,12 +369,13 @@ void X86Mir2Lir::FlushRegWide(RegStorage reg) { } void X86Mir2Lir::FlushReg(RegStorage reg) { + // FIXME: need to handle 32 bits in 64-bit register as well as wide values held in single reg. DCHECK(!reg.IsPair()); RegisterInfo* info = GetRegInfo(reg.GetReg()); if (info->live && info->dirty) { info->dirty = false; int v_reg = mir_graph_->SRegToVReg(info->s_reg); - StoreBaseDisp(rs_rX86_SP, VRegOffset(v_reg), reg, kWord); + StoreBaseDisp(rs_rX86_SP, VRegOffset(v_reg), reg, k32); } } @@ -1033,14 +1034,14 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. // Does the character fit in 16 bits? - LIR* launchpad_branch = nullptr; + LIR* slowpath_branch = nullptr; if (rl_char.is_const) { // We need the value in EAX. LoadConstantNoClobber(rs_rAX, char_value); } else { // Character is not a constant; compare at runtime. LoadValueDirectFixed(rl_char, rs_rAX); - launchpad_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); + slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); } // From here down, we know that we are looking for a char that fits in 16 bits. @@ -1061,7 +1062,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { NewLIR1(kX86Push32R, rDI); // Compute the number of words to search in to rCX. - LoadWordDisp(rs_rDX, count_offset, rs_rCX); + Load32Disp(rs_rDX, count_offset, rs_rCX); LIR *length_compare = nullptr; int start_value = 0; bool is_index_on_stack = false; @@ -1101,7 +1102,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { } else { // Load the start index from stack, remembering that we pushed EDI. int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); - LoadWordDisp(rs_rX86_SP, displacement, rs_rBX); + Load32Disp(rs_rX86_SP, displacement, rs_rBX); OpRegReg(kOpXor, rs_rDI, rs_rDI); OpRegReg(kOpCmp, rs_rBX, rs_rDI); OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI); @@ -1120,8 +1121,8 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { // Load the address of the string into EBX. // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. - LoadWordDisp(rs_rDX, value_offset, rs_rDI); - LoadWordDisp(rs_rDX, offset_offset, rs_rBX); + Load32Disp(rs_rDX, value_offset, rs_rDI); + Load32Disp(rs_rDX, offset_offset, rs_rBX); OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset); // Now compute into EDI where the search will start. @@ -1167,9 +1168,9 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { NewLIR1(kX86Pop32R, rDI); // Out of line code returns here. - if (launchpad_branch != nullptr) { + if (slowpath_branch != nullptr) { LIR *return_point = NewLIR0(kPseudoTargetLabel); - AddIntrinsicLaunchpad(info, launchpad_branch, return_point); + AddIntrinsicSlowPath(info, slowpath_branch, return_point); } StoreValue(rl_dest, rl_return); diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index e9faa7ff53..00bebd2983 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -554,7 +554,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int bool is64bit = false; X86OpCode opcode = kX86Nop; switch (size) { - case kLong: + case k64: case kDouble: // TODO: use regstorage attributes here. is64bit = true; @@ -567,8 +567,9 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int // TODO: double store is to unaligned address DCHECK_EQ((displacement & 0x3), 0); break; - case kWord: + case k32: case kSingle: + case kReference: // TODO: update for reference decompression on 64-bit targets. opcode = is_array ? kX86Mov32RA : kX86Mov32RM; if (X86_FPREG(r_dest.GetReg())) { opcode = is_array ? kX86MovssRA : kX86MovssRM; @@ -669,6 +670,10 @@ LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, int s_reg) { + // TODO: base this on target. + if (size == kWord) { + size = k32; + } return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, RegStorage::InvalidReg(), size, s_reg); } @@ -676,7 +681,7 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, LIR* X86Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg) { return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, - r_dest.GetLow(), r_dest.GetHigh(), kLong, s_reg); + r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg); } LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, @@ -690,7 +695,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int bool is64bit = false; X86OpCode opcode = kX86Nop; switch (size) { - case kLong: + case k64: case kDouble: is64bit = true; if (X86_FPREG(r_src.GetReg())) { @@ -702,8 +707,9 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int // TODO: double store is to unaligned address DCHECK_EQ((displacement & 0x3), 0); break; - case kWord: + case k32: case kSingle: + case kReference: opcode = is_array ? kX86Mov32AR : kX86Mov32MR; if (X86_FPREG(r_src.GetReg())) { opcode = is_array ? kX86MovssAR : kX86MovssMR; @@ -763,13 +769,17 @@ LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) { - return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, - RegStorage::InvalidReg(), size, INVALID_SREG); + // TODO: base this on target. + if (size == kWord) { + size = k32; + } + return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, + RegStorage::InvalidReg(), size, INVALID_SREG); } LIR* X86Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, - r_src.GetLow(), r_src.GetHigh(), kLong, INVALID_SREG); + r_src.GetLow(), r_src.GetHigh(), k64, INVALID_SREG); } /* diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h index a7d1f0e5a5..b33a62e67f 100644 --- a/compiler/utils/growable_array.h +++ b/compiler/utils/growable_array.h @@ -31,7 +31,6 @@ enum OatListKind { kGrowableArrayDfsOrder, kGrowableArrayDfsPostOrder, kGrowableArrayDomPostOrderTraversal, - kGrowableArrayThrowLaunchPads, kGrowableArraySuspendLaunchPads, kGrowableArraySwitchTables, kGrowableArrayFillArrayData, diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 7d02c7c8a8..9507e1207a 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -1493,7 +1493,7 @@ void X86_64Assembler::EmitOptionalRex(bool force, bool w, bool r, bool x, bool b } void X86_64Assembler::EmitOptionalRex32(CpuRegister reg) { - EmitOptionalRex(false, false, reg.NeedsRex(), false, false); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); } void X86_64Assembler::EmitOptionalRex32(CpuRegister dst, CpuRegister src) { @@ -1540,8 +1540,9 @@ void X86_64Assembler::EmitOptionalRex32(XmmRegister dst, const Operand& operand) } void X86_64Assembler::EmitRex64(CpuRegister reg) { - EmitOptionalRex(false, true, reg.NeedsRex(), false, false); + EmitOptionalRex(false, true, false, false, reg.NeedsRex()); } + void X86_64Assembler::EmitRex64(CpuRegister dst, CpuRegister src) { EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex()); } |