diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/dex/quick/arm/call_arm.cc | 28 | ||||
| -rw-r--r-- | compiler/dex/quick/arm/utility_arm.cc | 14 | ||||
| -rw-r--r-- | compiler/dex/quick/codegen_util.cc | 3 | ||||
| -rw-r--r-- | compiler/dex/quick/gen_common.cc | 14 | ||||
| -rw-r--r-- | compiler/dex/quick/gen_invoke.cc | 28 | ||||
| -rw-r--r-- | compiler/dex/quick/mir_to_lir.cc | 1 | ||||
| -rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 1 |
7 files changed, 72 insertions, 17 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 175fc06efb..d6724f1382 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -183,15 +183,18 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. if (kArchVariantHasGoodBranchPredictor) { - LIR* null_check_branch; + LIR* null_check_branch = nullptr; if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { null_check_branch = nullptr; // No null check. } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. - null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); + if (Runtime::Current()->ExplicitNullChecks()) { + null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); + } } LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2); NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2); + MarkPossibleNullPointerException(opt_flags); LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL); NewLIR4(kThumb2Strex, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2); LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL); @@ -216,8 +219,8 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { // Explicit null-check as slow-path is entered using an IT. GenNullCheck(rs_r0, opt_flags); LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2); - MarkPossibleNullPointerException(opt_flags); NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2); + MarkPossibleNullPointerException(opt_flags); OpRegImm(kOpCmp, rs_r1, 0); OpIT(kCondEq, ""); NewLIR4(kThumb2Strex/*eq*/, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2); @@ -241,7 +244,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { FlushAllRegs(); LoadValueDirectFixed(rl_src, rs_r0); // Get obj LockCallTemps(); // Prepare for explicit register usage - LIR* null_check_branch; + LIR* null_check_branch = nullptr; LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), rs_r2); constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. if (kArchVariantHasGoodBranchPredictor) { @@ -249,9 +252,12 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { null_check_branch = nullptr; // No null check. } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. - null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); + if (Runtime::Current()->ExplicitNullChecks()) { + null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); + } } LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); + MarkPossibleNullPointerException(opt_flags); LoadConstantNoClobber(rs_r3, 0); LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL); StoreWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3); @@ -404,11 +410,17 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { } } else { // Implicit stack overflow check. - // Generate a load from [sp, #-framesize]. If this is in the stack + // Generate a load from [sp, #-overflowsize]. If this is in the stack // redzone we will get a segmentation fault. - OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); - LoadWordDisp(rs_rARM_SP, 0, rs_rARM_LR); + // + // Caveat coder: if someone changes the kStackOverflowReservedBytes value + // we need to make sure that it's loadable in an immediate field of + // a sub instruction. Otherwise we will get a temp allocation and the + // code size will increase. + OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, Thread::kStackOverflowReservedBytes); + LoadWordDisp(rs_r12, 0, rs_r12); MarkPossibleStackOverflowException(); + OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); } } else { OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 1ec0a2c65d..8df5b25ebd 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -828,6 +828,7 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag int encoded_disp = displacement; bool already_generated = false; int dest_low_reg = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg(); + bool null_pointer_safepoint = false; switch (size) { case kDouble: case kLong: @@ -848,6 +849,7 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag displacement >> 2); } else { load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), kWord, s_reg); + null_pointer_safepoint = true; LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), kWord, INVALID_SREG); } already_generated = true; @@ -939,6 +941,11 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag // TODO: in future may need to differentiate Dalvik accesses w/ spills if (r_base == rs_rARM_SP) { AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit()); + } else { + // We might need to generate a safepoint if we have two store instructions (wide or double). + if (!Runtime::Current()->ExplicitNullChecks() && null_pointer_safepoint) { + MarkSafepointPC(load); + } } return load; } @@ -965,6 +972,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora int encoded_disp = displacement; bool already_generated = false; int src_low_reg = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg(); + bool null_pointer_safepoint = false; switch (size) { case kLong: case kDouble: @@ -974,6 +982,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora displacement >> 2); } else { store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), kWord); + null_pointer_safepoint = true; StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), kWord); } already_generated = true; @@ -1061,6 +1070,11 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora // TODO: In future, may need to differentiate Dalvik & spill accesses if (r_base == rs_rARM_SP) { AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit()); + } else { + // We might need to generate a safepoint if we have two store instructions (wide or double). + if (!Runtime::Current()->ExplicitNullChecks() && null_pointer_safepoint) { + MarkSafepointPC(store); + } } return store; } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 717ad86a75..4c6c7a45b4 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -702,7 +702,8 @@ void Mir2Lir::CreateNativeGcMap() { uint32_t native_offset = it.NativePcOffset(); uint32_t dex_pc = it.DexPc(); const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); - CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc; + CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc << + ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file); native_gc_map_builder.AddEntry(native_offset, references); } } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 2afa5ca815..866ce5f397 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -74,14 +74,19 @@ LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, T /* Perform null-check on a register. */ LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { if (Runtime::Current()->ExplicitNullChecks()) { - if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { - return NULL; - } - return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer); + return GenExplicitNullCheck(m_reg, opt_flags); } return nullptr; } +/* Perform an explicit null-check on a register. */ +LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { + if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { + return NULL; + } + return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer); +} + void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { if (!Runtime::Current()->ExplicitNullChecks()) { if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { @@ -732,6 +737,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); rl_result = EvalLoc(rl_dest, reg_class, true); LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG); + MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // Without context sensitive analysis, we must issue the most conservative barriers. // In this case, either a load or store may follow so we issue both barriers. diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index a0242d514d..7689b51f96 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -781,7 +781,17 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, type, skip_this); if (pcrLabel) { - *pcrLabel = GenNullCheck(TargetReg(kArg1), info->opt_flags); + if (Runtime::Current()->ExplicitNullChecks()) { + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); + } else { + *pcrLabel = nullptr; + // In lieu of generating a check for kArg1 being null, we need to + // perform a load when doing implicit checks. + RegStorage tmp = AllocTemp(); + LoadWordDisp(TargetReg(kArg1), 0, tmp); + MarkPossibleNullPointerException(info->opt_flags); + FreeTemp(tmp); + } } return call_state; } @@ -987,7 +997,17 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); if (pcrLabel) { - *pcrLabel = GenNullCheck(TargetReg(kArg1), info->opt_flags); + if (Runtime::Current()->ExplicitNullChecks()) { + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags); + } else { + *pcrLabel = nullptr; + // In lieu of generating a check for kArg1 being null, we need to + // perform a load when doing implicit checks. + RegStorage tmp = AllocTemp(); + LoadWordDisp(TargetReg(kArg1), 0, tmp); + MarkPossibleNullPointerException(info->opt_flags); + FreeTemp(tmp); + } } return call_state; } @@ -1299,7 +1319,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { LoadValueDirectFixed(rl_start, reg_start); } RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)); - GenNullCheck(reg_ptr, info->opt_flags); + GenExplicitNullCheck(reg_ptr, info->opt_flags); LIR* high_code_point_branch = rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr); // NOTE: not a safepoint @@ -1337,7 +1357,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { LoadValueDirectFixed(rl_cmp, reg_cmp); RegStorage r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : RegStorage::InvalidReg(); - GenNullCheck(reg_this, info->opt_flags); + GenExplicitNullCheck(reg_this, info->opt_flags); info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. // TUNING: check if rl_cmp.s_reg_low is already null checked LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr); diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index e81a037c7b..cd3dadbc74 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -446,6 +446,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list GenNullCheck(rl_src[0].reg, opt_flags); rl_result = EvalLoc(rl_dest, kCoreReg, true); LoadWordDisp(rl_src[0].reg, len_offset, rl_result.reg); + MarkPossibleNullPointerException(opt_flags); StoreValue(rl_dest, rl_result); break; diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 8614151698..10f431f938 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -564,6 +564,7 @@ class Mir2Lir : public Backend { void ForceImplicitNullCheck(RegStorage reg, int opt_flags); LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind); LIR* GenNullCheck(RegStorage m_reg, int opt_flags); + LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); LIR* GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2, ThrowKind kind); void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, LIR* fall_through); |