diff options
126 files changed, 1976 insertions, 1642 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index a34058864c..ee51fcd92e 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -273,7 +273,7 @@ define define-art-gtest-rule-host .PHONY: $$(gtest_rule) $$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) $$(gtest_deps) - $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \ + $(hide) ($$(call ART_TEST_SKIP,$$@) && LD_PRELOAD=libsigchain$$(ART_HOST_SHLIB_EXTENSION) $$< && $$(call ART_TEST_PASSED,$$@)) \ || $$(call ART_TEST_FAILED,$$@) ART_TEST_HOST_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule) diff --git a/build/Android.oat.mk b/build/Android.oat.mk index 61a2cde534..10936a45d6 100644 --- a/build/Android.oat.mk +++ b/build/Android.oat.mk @@ -48,11 +48,6 @@ ifneq ($(HOST_PREFER_32_BIT),true) $(eval $(call create-core-oat-host-rules,2ND_)) endif -IMPLICIT_CHECKS_arm := null,stack -IMPLICIT_CHECKS_arm64 := none -IMPLICIT_CHECKS_x86 := none -IMPLICIT_CHECKS_x86_64 := none -IMPLICIT_CHECKS_mips := none define create-core-oat-target-rules $$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENCY) @echo "target dex2oat: $$@ ($$?)" @@ -63,7 +58,6 @@ $$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENC --oat-location=$$($(1)TARGET_CORE_OAT) --image=$$($(1)TARGET_CORE_IMG_OUT) \ --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(1)TARGET_ARCH) \ --instruction-set-features=$$($(1)TARGET_INSTRUCTION_SET_FEATURES) \ - --implicit-checks=$(IMPLICIT_CHECKS_$($(1)TARGET_ARCH)) \ --android-root=$$(PRODUCT_OUT)/system --include-patch-information # This "renaming" eases declaration in art/Android.mk diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 01e17bf44f..6b96e929aa 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -190,7 +190,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { null_check_branch = nullptr; // No null check. } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); } } @@ -261,7 +261,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { null_check_branch = nullptr; // No null check. } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); } } @@ -362,7 +362,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { Thread::kStackOverflowSignalReservedBytes; bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes); if (!skip_overflow_check) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { if (!large_frame) { /* Load stack limit */ LockTemp(rs_r12); @@ -401,7 +401,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { const int spill_size = spill_count * 4; const int frame_size_without_spills = frame_size_ - spill_size; if (!skip_overflow_check) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { class StackOverflowSlowPath : public LIRSlowPath { public: StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace) diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index 8117c62954..d946ee39ef 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -202,7 +202,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { null_check_branch = nullptr; // No null check. } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); } } @@ -250,7 +250,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { null_check_branch = nullptr; // No null check. } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); } } @@ -338,7 +338,7 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) const int frame_size_without_spills = frame_size_ - spill_size; if (!skip_overflow_check) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { if (!large_frame) { // Load stack limit LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9); @@ -371,7 +371,7 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) } if (!skip_overflow_check) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { class StackOverflowSlowPath: public LIRSlowPath { public: StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) : diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index c62b4fe372..f51145cfe9 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -100,7 +100,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { RegStorage r_src, OpSize size) OVERRIDE; void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE; LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, - int offset, int check_value, LIR* target) OVERRIDE; + int offset, int check_value, LIR* target, LIR** compare) OVERRIDE; // Required for target - register utilities. RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE; diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index 6dc4a7ab51..2b78e81f46 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -296,7 +296,8 @@ LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_ LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, int offset, int check_value, - LIR* target) { + LIR* target, LIR** compare) { + DCHECK(compare == nullptr); // It is possible that temp register is 64-bit. (ArgReg or RefReg) // Always compare 32-bit value no matter what temp_reg is. if (temp_reg.Is64Bit()) { diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc index c3bbb4497b..f1dc77ad28 100644 --- a/compiler/dex/quick/arm64/target_arm64.cc +++ b/compiler/dex/quick/arm64/target_arm64.cc @@ -1191,7 +1191,7 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); if (pcrLabel) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 60d25890d4..463f277e54 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1172,9 +1172,12 @@ bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { } LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, - int offset, int check_value, LIR* target) { + int offset, int check_value, LIR* target, LIR** compare) { // Handle this for architectures that can't compare to memory. - Load32Disp(base_reg, offset, temp_reg); + LIR* inst = Load32Disp(base_reg, offset, temp_reg); + if (compare != nullptr) { + *compare = inst; + } LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); return branch; } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 5d1b1fb072..502859a72b 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -176,7 +176,7 @@ LIR* Mir2Lir::GenNullCheck(RegStorage reg) { /* Perform null-check on a register. */ LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { return GenExplicitNullCheck(m_reg, opt_flags); } return nullptr; @@ -191,16 +191,17 @@ LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { } void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { - if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { return; } + // Insert after last instruction. MarkSafepointPC(last_lir_insn_); } } void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) { - if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { return; } @@ -209,13 +210,13 @@ void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) { } void Mir2Lir::MarkPossibleStackOverflowException() { - if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) { + if (cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { MarkSafepointPC(last_lir_insn_); } } void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { - if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { return; } @@ -622,7 +623,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), - mirror::Class::kStatusInitialized, NULL); + mirror::Class::kStatusInitialized, nullptr, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, @@ -718,7 +719,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), - mirror::Class::kStatusInitialized, NULL); + mirror::Class::kStatusInitialized, nullptr, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, @@ -2196,7 +2197,7 @@ class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { /* Check if we need to check for pending suspend request */ void Mir2Lir::GenSuspendTest(int opt_flags) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) { if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { return; } @@ -2216,7 +2217,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) { /* Check if we need to check for pending suspend request */ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) { if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { OpUnconditionalBranch(target); return; diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 9dedeae071..8ce6e1a206 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -957,21 +957,35 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, type, skip_this); if (pcrLabel) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; + if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && + (info->opt_flags & MIR_IGNORE_NULL_CHECK)) { + return call_state; + } // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. - RegStorage tmp = AllocTemp(); - Load32Disp(TargetReg(kArg1, kRef), 0, tmp); - MarkPossibleNullPointerException(info->opt_flags); - FreeTemp(tmp); + GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } } return call_state; } +// Default implementation of implicit null pointer check. +// Overridden by arch specific as necessary. +void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) { + if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { + return; + } + RegStorage tmp = AllocTemp(); + Load32Disp(reg, 0, tmp); + MarkPossibleNullPointerException(opt_flags); + FreeTemp(tmp); +} + + /* * May have 0+ arguments (also used for jumbo). Note that * source virtual registers may be in physical registers, so may @@ -1186,16 +1200,17 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); if (pcrLabel) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; + if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && + (info->opt_flags & MIR_IGNORE_NULL_CHECK)) { + return call_state; + } // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. - RegStorage tmp = AllocTemp(); - Load32Disp(TargetReg(kArg1, kRef), 0, tmp); - MarkPossibleNullPointerException(info->opt_flags); - FreeTemp(tmp); + GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } } return call_state; @@ -1353,11 +1368,14 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { // On x86, we can compare to memory directly // Set up a launch pad to allow retry in case of bounds violation */ if (rl_idx.is_const) { + LIR* comparison; range_check_branch = OpCmpMemImmBranch( kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset, - mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr); - } else { + mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison); + MarkPossibleNullPointerExceptionAfter(0, comparison); + } else { OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset); + MarkPossibleNullPointerException(0); range_check_branch = OpCondBranch(kCondUge, nullptr); } } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index b1f0b13e81..ed7fcdd556 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -18,7 +18,6 @@ #include "dex/dataflow_iterator-inl.h" #include "dex/quick/dex_file_method_inliner.h" #include "mir_to_lir-inl.h" -#include "object_utils.h" #include "thread-inl.h" namespace art { diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 33e980fec7..5b5663353c 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -838,6 +838,7 @@ class Mir2Lir : public Backend { LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind); LIR* GenNullCheck(RegStorage m_reg, int opt_flags); LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); + virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags); void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, LIR* fall_through); void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, @@ -1148,10 +1149,12 @@ class Mir2Lir : public Backend { * @param base_reg The register holding the base address. * @param offset The offset from the base. * @param check_value The immediate to compare to. + * @param target branch target (or nullptr) + * @param compare output for getting LIR for comparison (or nullptr) * @returns The branch instruction that was generated. */ virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, - int offset, int check_value, LIR* target); + int offset, int check_value, LIR* target, LIR** compare); // Required for target - codegen helpers. virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index 8df5b6dfdf..ebe3f0a9fc 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -271,21 +271,22 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0, { kX86Shrd64RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { REX_W, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64RRI", "!0r,!1r,!2d" }, { kX86Shrd64MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64MRI", "[!0r+!1d],!2r,!3d" }, - { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8RI", "!0r,!1d" }, - { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8MI", "[!0r+!1d],!2d" }, - { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" }, - { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16RI", "!0r,!1d" }, - { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16MI", "[!0r+!1d],!2d" }, - { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" }, - { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32RI", "!0r,!1d" }, - { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32MI", "[!0r+!1d],!2d" }, - { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" }, + { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8RI", "!0r,!1d" }, + { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8MI", "[!0r+!1d],!2d" }, + { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" }, + { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16RI", "!0r,!1d" }, + { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16MI", "[!0r+!1d],!2d" }, + { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" }, + { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32RI", "!0r,!1d" }, + { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32MI", "[!0r+!1d],!2d" }, + { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" }, { kX86Test64RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64RI", "!0r,!1d" }, { kX86Test64MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64MI", "[!0r+!1d],!2d" }, { kX86Test64AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64AI", "[!0r+!1r<<!2d+!3d],!4d" }, - { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" }, + { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" }, { kX86Test64RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { REX_W, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test64RR", "!0r,!1r" }, + { kX86Test32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RM", "!0r,[!1r+!1d]" }, #define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \ reg, reg_kind, reg_flags, \ diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index b7441d7649..40dd9cc105 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -222,15 +222,28 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { LockTemp(rs_rX86_ARG1); LockTemp(rs_rX86_ARG2); - /* Build frame, return address already on stack */ - stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set)); - /* * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - const bool skip_overflow_check = mir_graph_->MethodIsLeaf() && - !IsLargeFrame(frame_size_, cu_->target64 ? kX86_64 : kX86); + InstructionSet isa = cu_->target64 ? kX86_64 : kX86; + const bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, isa); + + // If we doing an implicit stack overflow check, perform the load immediately + // before the stack pointer is decremented and anything is saved. + if (!skip_overflow_check && + cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { + // Implicit stack overflow check. + // test eax,[esp + -overflow] + int overflow = GetStackOverflowReservedBytes(isa); + NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow); + MarkPossibleStackOverflowException(); + } + + /* Build frame, return address already on stack */ + stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - + GetInstructionSetPointerSize(cu_->instruction_set)); + NewLIR0(kPseudoMethodEntry); /* Spill core callee saves */ SpillCoreRegs(); @@ -260,25 +273,27 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { private: const size_t sp_displace_; }; - // TODO: for large frames we should do something like: - // spill ebp - // lea ebp, [esp + frame_size] - // cmp ebp, fs:[stack_end_] - // jcc stack_overflow_exception - // mov esp, ebp - // in case a signal comes in that's not using an alternate signal stack and the large frame may - // have moved us outside of the reserved area at the end of the stack. - // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath - if (cu_->target64) { - OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>()); - } else { - OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>()); - } - LIR* branch = OpCondBranch(kCondUlt, nullptr); - AddSlowPath( + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { + // TODO: for large frames we should do something like: + // spill ebp + // lea ebp, [esp + frame_size] + // cmp ebp, fs:[stack_end_] + // jcc stack_overflow_exception + // mov esp, ebp + // in case a signal comes in that's not using an alternate signal stack and the large frame + // may have moved us outside of the reserved area at the end of the stack. + // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath + if (cu_->target64) { + OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>()); + } else { + OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>()); + } + LIR* branch = OpCondBranch(kCondUlt, nullptr); + AddSlowPath( new(arena_)StackOverflowSlowPath(this, branch, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set))); + } } FlushIns(ArgLocs, rl_method); @@ -318,4 +333,14 @@ void X86Mir2Lir::GenSpecialExitSequence() { NewLIR0(kX86Ret); } +void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) { + if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { + return; + } + // Implicit null pointer check. + // test eax,[arg1+0] + NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0); + MarkPossibleNullPointerException(opt_flags); +} + } // namespace art diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 1e80247996..cf4521ae61 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -85,6 +85,7 @@ class X86Mir2Lir : public Mir2Lir { LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, RegStorage r_src, OpSize size) OVERRIDE; void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg); + void GenImplicitNullCheck(RegStorage reg, int opt_flags); // Required for target - register utilities. RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE; @@ -803,9 +804,11 @@ class X86Mir2Lir : public Mir2Lir { * @param base_reg The register holding the base address. * @param offset The offset from the base. * @param check_value The immediate to compare to. + * @param target branch target (or nullptr) + * @param compare output for getting LIR for comparison (or nullptr) */ LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, - int offset, int check_value, LIR* target); + int offset, int check_value, LIR* target, LIR** compare); /* * Can this operation be using core registers without temporaries? diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 2f27482e55..3f1df189de 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -1098,6 +1098,7 @@ void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index, }; OpRegMem(kOpCmp, index, array_base, len_offset); + MarkPossibleNullPointerException(0); LIR* branch = OpCondBranch(kCondUge, nullptr); AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, array_base, len_offset)); @@ -1140,6 +1141,7 @@ void X86Mir2Lir::GenArrayBoundsCheck(int32_t index, }; NewLIR3(IS_SIMM8(index) ? kX86Cmp32MI8 : kX86Cmp32MI, array_base.GetReg(), len_offset, index); + MarkPossibleNullPointerException(0); LIR* branch = OpCondBranch(kCondLs, nullptr); AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, array_base, len_offset)); diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 1e4494bd30..06001d7f9d 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -884,8 +884,12 @@ RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { } LIR* X86Mir2Lir::CheckSuspendUsingLoad() { - LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; - return nullptr; + // First load the pointer in fs:[suspend-trigger] into eax + // Then use a test instruction to indirect via that address. + NewLIR2(kX86Mov32RT, rs_rAX.GetReg(), cu_->target64 ? + Thread::ThreadSuspendTriggerOffset<8>().Int32Value() : + Thread::ThreadSuspendTriggerOffset<4>().Int32Value()); + return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0); } uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { @@ -1250,6 +1254,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { // Is the string non-NULL? LoadValueDirectFixed(rl_obj, rs_rDX); GenNullCheck(rs_rDX, info->opt_flags); + // uint32_t opt_flags = info->opt_flags; info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. // Does the character fit in 16 bits? @@ -1276,12 +1281,20 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { // Character is in EAX. // Object pointer is in EDX. + // Compute the number of words to search in to rCX. + Load32Disp(rs_rDX, count_offset, rs_rCX); + + // Possible signal here due to null pointer dereference. + // Note that the signal handler will expect the top word of + // the stack to be the ArtMethod*. If the PUSH edi instruction + // below is ahead of the load above then this will not be true + // and the signal handler will not work. + MarkPossibleNullPointerException(0); + // We need to preserve EDI, but have no spare registers, so push it on the stack. // We have to remember that all stack addresses after this are offset by sizeof(EDI). NewLIR1(kX86Push32R, rs_rDI.GetReg()); - // Compute the number of words to search in to rCX. - Load32Disp(rs_rDX, count_offset, rs_rCX); LIR *length_compare = nullptr; int start_value = 0; bool is_index_on_stack = false; @@ -2678,7 +2691,7 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); if (pcrLabel) { - if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { + if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 045e58e5a7..047a65d585 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -684,9 +684,9 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int } else { DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here. if (r_base == r_dest.GetLow()) { - load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), + load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), displacement + HIWORD_OFFSET); - load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); + load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); } else { load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), @@ -712,16 +712,16 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int if (r_dest.GetHigh() == r_index) { // We can't use either register for the first load. RegStorage temp = AllocTemp(); - load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, + load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, displacement + HIWORD_OFFSET); - load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, + load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, displacement + LOWORD_OFFSET); OpRegCopy(r_dest.GetHigh(), temp); FreeTemp(temp); } else { - load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, + load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, displacement + HIWORD_OFFSET); - load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, + load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, displacement + LOWORD_OFFSET); } } else { @@ -744,6 +744,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int } } + // Always return first load generated as this might cause a fault if base is nullptr. return load; } @@ -878,9 +879,12 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r } LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, - int offset, int check_value, LIR* target) { - NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset, - check_value); + int offset, int check_value, LIR* target, LIR** compare) { + LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), + offset, check_value); + if (compare != nullptr) { + *compare = inst; + } LIR* branch = OpCondBranch(cond, target); return branch; } diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h index 56573810ca..17f9b916d4 100644 --- a/compiler/dex/quick/x86/x86_lir.h +++ b/compiler/dex/quick/x86/x86_lir.h @@ -499,6 +499,7 @@ enum X86OpCode { UnaryOpcode(kX86Test, RI, MI, AI), kX86Test32RR, kX86Test64RR, + kX86Test32RM, UnaryOpcode(kX86Not, R, M, A), UnaryOpcode(kX86Neg, R, M, A), UnaryOpcode(kX86Mul, DaR, DaM, DaA), diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h index e175d37914..89295f2786 100644 --- a/compiler/driver/compiler_driver-inl.h +++ b/compiler/driver/compiler_driver-inl.h @@ -20,12 +20,10 @@ #include "compiler_driver.h" #include "dex/compiler_ir.h" -#include "mirror/art_field.h" +#include "field_helper.h" #include "mirror/art_field-inl.h" -#include "mirror/art_method.h" #include "mirror/art_method-inl.h" #include "mirror/class_loader.h" -#include "mirror/dex_cache.h" #include "mirror/dex_cache-inl.h" #include "mirror/art_field-inl.h" #include "scoped_thread_state_change.h" diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 7014c3b3f8..9e88c8d875 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -35,7 +35,7 @@ #include "dex/quick/dex_file_method_inliner.h" #include "driver/compiler_options.h" #include "jni_internal.h" -#include "object_utils.h" +#include "object_lock.h" #include "profiler.h" #include "runtime.h" #include "gc/accounting/card_table-inl.h" diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index 92b2feeb7f..c0f91d1646 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -56,9 +56,9 @@ class CompilerOptions { include_patch_information_(kDefaultIncludePatchInformation), top_k_profile_threshold_(kDefaultTopKProfileThreshold), include_debug_symbols_(kDefaultIncludeDebugSymbols), - explicit_null_checks_(true), - explicit_so_checks_(true), - explicit_suspend_checks_(true) + implicit_null_checks_(false), + implicit_so_checks_(false), + implicit_suspend_checks_(false) #ifdef ART_SEA_IR_MODE , sea_ir_mode_(false) #endif @@ -74,9 +74,9 @@ class CompilerOptions { bool include_patch_information, double top_k_profile_threshold, bool include_debug_symbols, - bool explicit_null_checks, - bool explicit_so_checks, - bool explicit_suspend_checks + bool implicit_null_checks, + bool implicit_so_checks, + bool implicit_suspend_checks #ifdef ART_SEA_IR_MODE , bool sea_ir_mode #endif @@ -91,9 +91,9 @@ class CompilerOptions { include_patch_information_(include_patch_information), top_k_profile_threshold_(top_k_profile_threshold), include_debug_symbols_(include_debug_symbols), - explicit_null_checks_(explicit_null_checks), - explicit_so_checks_(explicit_so_checks), - explicit_suspend_checks_(explicit_suspend_checks) + implicit_null_checks_(implicit_null_checks), + implicit_so_checks_(implicit_so_checks), + implicit_suspend_checks_(implicit_suspend_checks) #ifdef ART_SEA_IR_MODE , sea_ir_mode_(sea_ir_mode) #endif @@ -160,28 +160,28 @@ class CompilerOptions { return include_debug_symbols_; } - bool GetExplicitNullChecks() const { - return explicit_null_checks_; + bool GetImplicitNullChecks() const { + return implicit_null_checks_; } - void SetExplicitNullChecks(bool new_val) { - explicit_null_checks_ = new_val; + void SetImplicitNullChecks(bool new_val) { + implicit_null_checks_ = new_val; } - bool GetExplicitStackOverflowChecks() const { - return explicit_so_checks_; + bool GetImplicitStackOverflowChecks() const { + return implicit_so_checks_; } - void SetExplicitStackOverflowChecks(bool new_val) { - explicit_so_checks_ = new_val; + void SetImplicitStackOverflowChecks(bool new_val) { + implicit_so_checks_ = new_val; } - bool GetExplicitSuspendChecks() const { - return explicit_suspend_checks_; + bool GetImplicitSuspendChecks() const { + return implicit_suspend_checks_; } - void SetExplicitSuspendChecks(bool new_val) { - explicit_suspend_checks_ = new_val; + void SetImplicitSuspendChecks(bool new_val) { + implicit_suspend_checks_ = new_val; } #ifdef ART_SEA_IR_MODE @@ -208,9 +208,9 @@ class CompilerOptions { // When using a profile file only the top K% of the profiled samples will be compiled. double top_k_profile_threshold_; bool include_debug_symbols_; - bool explicit_null_checks_; - bool explicit_so_checks_; - bool explicit_suspend_checks_; + bool implicit_null_checks_; + bool implicit_so_checks_; + bool implicit_suspend_checks_; #ifdef ART_SEA_IR_MODE bool sea_ir_mode_; #endif diff --git a/compiler/image_test.cc b/compiler/image_test.cc index 982e6d4f2c..fe4fcd4177 100644 --- a/compiler/image_test.cc +++ b/compiler/image_test.cc @@ -25,7 +25,6 @@ #include "elf_fixup.h" #include "gc/space/image_space.h" #include "image_writer.h" -#include "implicit_check_options.h" #include "lock_word.h" #include "mirror/object-inl.h" #include "oat_writer.h" @@ -81,8 +80,6 @@ TEST_F(ImageTest, WriteRead) { t.NewTiming("WriteElf"); ScopedObjectAccess soa(Thread::Current()); SafeMap<std::string, std::string> key_value_store; - key_value_store.Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey, - ImplicitCheckOptions::Serialize(true, true, true)); OatWriter oat_writer(class_linker->GetBootClassPath(), 0, 0, compiler_driver_.get(), &timings, &key_value_store); bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(), @@ -144,9 +141,6 @@ TEST_F(ImageTest, WriteRead) { std::string image("-Ximage:"); image.append(image_location.GetFilename()); options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL))); - // Turn off implicit checks for this runtime, as we compiled the image with them off. - std::string explicit_checks("-implicit-checks:none"); - options.push_back(std::make_pair(explicit_checks.c_str(), reinterpret_cast<void*>(NULL))); if (!Runtime::Create(options, false)) { LOG(FATAL) << "Failed to create runtime"; diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 38b4100ebe..8ef2964270 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -51,7 +51,6 @@ #include "mirror/string-inl.h" #include "oat.h" #include "oat_file.h" -#include "object_utils.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "handle_scope-inl.h" diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 8aa7b76980..84f0b3c001 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -20,7 +20,6 @@ #include "dex/quick/dex_file_to_method_inliner_map.h" #include "dex/quick_compiler_callbacks.h" #include "entrypoints/quick/quick_entrypoints.h" -#include "implicit_check_options.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" @@ -118,8 +117,6 @@ TEST_F(OatTest, WriteRead) { ScratchFile tmp; SafeMap<std::string, std::string> key_value_store; key_value_store.Put(OatHeader::kImageLocationKey, "lue.art"); - key_value_store.Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey, - ImplicitCheckOptions::Serialize(true, true, true)); OatWriter oat_writer(class_linker->GetBootClassPath(), 42U, 4096U, diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 3f2f925a02..6d861d4bfe 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -49,7 +49,6 @@ #include "gc/space/image_space.h" #include "gc/space/space-inl.h" #include "image_writer.h" -#include "implicit_check_options.h" #include "leb128.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" @@ -57,7 +56,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "oat_writer.h" -#include "object_utils.h" #include "os.h" #include "runtime.h" #include "ScopedLocalRef.h" @@ -743,20 +741,6 @@ void ParseDouble(const std::string& option, char after_char, *parsed_value = value; } -void CheckExplicitCheckOptions(InstructionSet isa, bool* explicit_null_checks, - bool* explicit_so_checks, bool* explicit_suspend_checks) { - switch (isa) { - case kArm: - case kThumb2: - break; // All checks implemented, leave as is. - - default: // No checks implemented, reset all to explicit checks. - *explicit_null_checks = true; - *explicit_so_checks = true; - *explicit_suspend_checks = true; - } -} - static int dex2oat(int argc, char** argv) { #if defined(__linux__) && defined(__arm__) int major, minor; @@ -840,10 +824,10 @@ static int dex2oat(int argc, char** argv) { bool watch_dog_enabled = !kIsTargetBuild; bool generate_gdb_information = kIsDebugBuild; - bool explicit_null_checks = true; - bool explicit_so_checks = true; - bool explicit_suspend_checks = true; - bool has_explicit_checks_options = false; + // Checks are all explicit until we know the architecture. + bool implicit_null_checks = false; + bool implicit_so_checks = false; + bool implicit_suspend_checks = false; for (int i = 0; i < argc; i++) { const StringPiece option(argv[i]); @@ -1020,31 +1004,6 @@ static int dex2oat(int argc, char** argv) { } else if (option.starts_with("--dump-cfg-passes=")) { std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data(); PassDriverMEOpts::SetDumpPassList(dump_passes); - } else if (option.starts_with("--implicit-checks=")) { - std::string checks = option.substr(strlen("--implicit-checks=")).data(); - std::vector<std::string> checkvec; - Split(checks, ',', checkvec); - for (auto& str : checkvec) { - std::string val = Trim(str); - if (val == "none") { - explicit_null_checks = true; - explicit_so_checks = true; - explicit_suspend_checks = true; - } else if (val == "null") { - explicit_null_checks = false; - } else if (val == "suspend") { - explicit_suspend_checks = false; - } else if (val == "stack") { - explicit_so_checks = false; - } else if (val == "all") { - explicit_null_checks = false; - explicit_so_checks = false; - explicit_suspend_checks = false; - } else { - Usage("--implicit-checks passed non-recognized value %s", val.c_str()); - } - } - has_explicit_checks_options = true; } else if (option == "--include-patch-information") { include_patch_information = true; explicit_include_patch_information = true; @@ -1177,14 +1136,25 @@ static int dex2oat(int argc, char** argv) { Usage("Unknown --compiler-filter value %s", compiler_filter_string); } - ImplicitCheckOptions::CheckISASupport(instruction_set, &explicit_null_checks, &explicit_so_checks, - &explicit_suspend_checks); - if (!explicit_include_patch_information) { include_patch_information = (compiler_kind == Compiler::kQuick && CompilerOptions::kDefaultIncludePatchInformation); } + // Set the compilation target's implicit checks options. + switch (instruction_set) { + case kArm: + case kThumb2: + case kX86: + implicit_null_checks = true; + implicit_so_checks = true; + break; + + default: + // Defaults are correct. + break; + } + std::unique_ptr<CompilerOptions> compiler_options(new CompilerOptions(compiler_filter, huge_method_threshold, large_method_threshold, @@ -1195,9 +1165,9 @@ static int dex2oat(int argc, char** argv) { include_patch_information, top_k_profile_threshold, include_debug_symbols, - explicit_null_checks, - explicit_so_checks, - explicit_suspend_checks + implicit_null_checks, + implicit_so_checks, + implicit_suspend_checks #ifdef ART_SEA_IR_MODE , compiler_options.sea_ir_ = true; @@ -1248,7 +1218,7 @@ static int dex2oat(int argc, char** argv) { } std::unique_ptr<VerificationResults> verification_results(new VerificationResults( - compiler_options.get())); + compiler_options.get())); DexFileToMethodInlinerMap method_inliner_map; QuickCompilerCallbacks callbacks(verification_results.get(), &method_inliner_map); runtime_options.push_back(std::make_pair("compilercallbacks", &callbacks)); @@ -1271,18 +1241,6 @@ static int dex2oat(int argc, char** argv) { } std::unique_ptr<Dex2Oat> dex2oat(p_dex2oat); - // TODO: Not sure whether it's a good idea to allow anything else but the runtime option in - // this case at all, as we'll have to throw away produced code for a mismatch. - if (!has_explicit_checks_options) { - if (ImplicitCheckOptions::CheckForCompiling(kRuntimeISA, instruction_set, &explicit_null_checks, - &explicit_so_checks, &explicit_suspend_checks)) { - compiler_options->SetExplicitNullChecks(explicit_null_checks); - compiler_options->SetExplicitStackOverflowChecks(explicit_so_checks); - compiler_options->SetExplicitSuspendChecks(explicit_suspend_checks); - } - } - - // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, // give it away now so that we don't starve GC. Thread* self = Thread::Current(); @@ -1385,14 +1343,6 @@ static int dex2oat(int argc, char** argv) { std::unique_ptr<SafeMap<std::string, std::string> > key_value_store( new SafeMap<std::string, std::string>()); - // Insert implicit check options. - key_value_store->Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey, - ImplicitCheckOptions::Serialize(compiler_options->GetExplicitNullChecks(), - compiler_options-> - GetExplicitStackOverflowChecks(), - compiler_options-> - GetExplicitSuspendChecks())); - // Insert some compiler things. std::ostringstream oss; for (int i = 0; i < argc; ++i) { diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 88340222cf..b8f20f3650 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -29,6 +29,7 @@ #include "dex_file-inl.h" #include "dex_instruction.h" #include "disassembler.h" +#include "field_helper.h" #include "gc_map.h" #include "gc/space/image_space.h" #include "gc/space/large_object_space.h" @@ -45,7 +46,6 @@ #include "noop_compiler_callbacks.h" #include "oat.h" #include "oat_file-inl.h" -#include "object_utils.h" #include "os.h" #include "runtime.h" #include "safe_map.h" diff --git a/runtime/Android.mk b/runtime/Android.mk index 9d42eeae21..d2fc2298e2 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -19,291 +19,294 @@ LOCAL_PATH := $(call my-dir) include art/build/Android.common_build.mk LIBART_COMMON_SRC_FILES := \ - atomic.cc.arm \ - barrier.cc \ - base/allocator.cc \ - base/bit_vector.cc \ - base/hex_dump.cc \ - base/logging.cc \ - base/mutex.cc \ - base/scoped_flock.cc \ - base/stringpiece.cc \ - base/stringprintf.cc \ - base/timing_logger.cc \ - base/unix_file/fd_file.cc \ - base/unix_file/mapped_file.cc \ - base/unix_file/null_file.cc \ - base/unix_file/random_access_file_utils.cc \ - base/unix_file/string_file.cc \ - check_jni.cc \ - class_linker.cc \ - common_throws.cc \ - debugger.cc \ - dex_file.cc \ - dex_file_verifier.cc \ - dex_instruction.cc \ - elf_file.cc \ - gc/allocator/dlmalloc.cc \ - gc/allocator/rosalloc.cc \ - gc/accounting/card_table.cc \ - gc/accounting/gc_allocator.cc \ - gc/accounting/heap_bitmap.cc \ - gc/accounting/mod_union_table.cc \ - gc/accounting/remembered_set.cc \ - gc/accounting/space_bitmap.cc \ - gc/collector/concurrent_copying.cc \ - gc/collector/garbage_collector.cc \ - gc/collector/immune_region.cc \ - gc/collector/mark_compact.cc \ - gc/collector/mark_sweep.cc \ - gc/collector/partial_mark_sweep.cc \ - gc/collector/semi_space.cc \ - gc/collector/sticky_mark_sweep.cc \ - gc/gc_cause.cc \ - gc/heap.cc \ - gc/reference_processor.cc \ - gc/reference_queue.cc \ - gc/space/bump_pointer_space.cc \ - gc/space/dlmalloc_space.cc \ - gc/space/image_space.cc \ - gc/space/large_object_space.cc \ - gc/space/malloc_space.cc \ - gc/space/rosalloc_space.cc \ - gc/space/space.cc \ - gc/space/zygote_space.cc \ - hprof/hprof.cc \ - image.cc \ - indirect_reference_table.cc \ - instruction_set.cc \ - instrumentation.cc \ - intern_table.cc \ - interpreter/interpreter.cc \ - interpreter/interpreter_common.cc \ - interpreter/interpreter_switch_impl.cc \ - jdwp/jdwp_event.cc \ - jdwp/jdwp_expand_buf.cc \ - jdwp/jdwp_handler.cc \ - jdwp/jdwp_main.cc \ - jdwp/jdwp_request.cc \ - jdwp/jdwp_socket.cc \ - jdwp/object_registry.cc \ - jni_internal.cc \ - jobject_comparator.cc \ - mem_map.cc \ - memory_region.cc \ - mirror/art_field.cc \ - mirror/art_method.cc \ - mirror/array.cc \ - mirror/class.cc \ - mirror/dex_cache.cc \ - mirror/object.cc \ - mirror/reference.cc \ - mirror/stack_trace_element.cc \ - mirror/string.cc \ - mirror/throwable.cc \ - monitor.cc \ - native/dalvik_system_DexFile.cc \ - native/dalvik_system_VMDebug.cc \ - native/dalvik_system_VMRuntime.cc \ - native/dalvik_system_VMStack.cc \ - native/dalvik_system_ZygoteHooks.cc \ - native/java_lang_Class.cc \ - native/java_lang_DexCache.cc \ - native/java_lang_Object.cc \ - native/java_lang_Runtime.cc \ - native/java_lang_String.cc \ - native/java_lang_System.cc \ - native/java_lang_Thread.cc \ - native/java_lang_Throwable.cc \ - native/java_lang_VMClassLoader.cc \ - native/java_lang_ref_Reference.cc \ - native/java_lang_reflect_Array.cc \ - native/java_lang_reflect_Constructor.cc \ - native/java_lang_reflect_Field.cc \ - native/java_lang_reflect_Method.cc \ - native/java_lang_reflect_Proxy.cc \ - native/java_util_concurrent_atomic_AtomicLong.cc \ - native/org_apache_harmony_dalvik_ddmc_DdmServer.cc \ - native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc \ - native/sun_misc_Unsafe.cc \ - oat.cc \ - oat_file.cc \ - offsets.cc \ - os_linux.cc \ - parsed_options.cc \ - primitive.cc \ - quick_exception_handler.cc \ - quick/inline_method_analyser.cc \ - reference_table.cc \ - reflection.cc \ - runtime.cc \ - signal_catcher.cc \ - stack.cc \ - thread.cc \ - thread_list.cc \ - thread_pool.cc \ - throw_location.cc \ - trace.cc \ - transaction.cc \ - profiler.cc \ - fault_handler.cc \ - utf.cc \ - utils.cc \ - verifier/dex_gc_map.cc \ - verifier/instruction_flags.cc \ - verifier/method_verifier.cc \ - verifier/reg_type.cc \ - verifier/reg_type_cache.cc \ - verifier/register_line.cc \ - well_known_classes.cc \ - zip_archive.cc + atomic.cc.arm \ + barrier.cc \ + base/allocator.cc \ + base/bit_vector.cc \ + base/hex_dump.cc \ + base/logging.cc \ + base/mutex.cc \ + base/scoped_flock.cc \ + base/stringpiece.cc \ + base/stringprintf.cc \ + base/timing_logger.cc \ + base/unix_file/fd_file.cc \ + base/unix_file/mapped_file.cc \ + base/unix_file/null_file.cc \ + base/unix_file/random_access_file_utils.cc \ + base/unix_file/string_file.cc \ + check_jni.cc \ + class_linker.cc \ + common_throws.cc \ + debugger.cc \ + dex_file.cc \ + dex_file_verifier.cc \ + dex_instruction.cc \ + elf_file.cc \ + field_helper.cc \ + gc/allocator/dlmalloc.cc \ + gc/allocator/rosalloc.cc \ + gc/accounting/card_table.cc \ + gc/accounting/gc_allocator.cc \ + gc/accounting/heap_bitmap.cc \ + gc/accounting/mod_union_table.cc \ + gc/accounting/remembered_set.cc \ + gc/accounting/space_bitmap.cc \ + gc/collector/concurrent_copying.cc \ + gc/collector/garbage_collector.cc \ + gc/collector/immune_region.cc \ + gc/collector/mark_compact.cc \ + gc/collector/mark_sweep.cc \ + gc/collector/partial_mark_sweep.cc \ + gc/collector/semi_space.cc \ + gc/collector/sticky_mark_sweep.cc \ + gc/gc_cause.cc \ + gc/heap.cc \ + gc/reference_processor.cc \ + gc/reference_queue.cc \ + gc/space/bump_pointer_space.cc \ + gc/space/dlmalloc_space.cc \ + gc/space/image_space.cc \ + gc/space/large_object_space.cc \ + gc/space/malloc_space.cc \ + gc/space/rosalloc_space.cc \ + gc/space/space.cc \ + gc/space/zygote_space.cc \ + hprof/hprof.cc \ + image.cc \ + indirect_reference_table.cc \ + instruction_set.cc \ + instrumentation.cc \ + intern_table.cc \ + interpreter/interpreter.cc \ + interpreter/interpreter_common.cc \ + interpreter/interpreter_switch_impl.cc \ + jdwp/jdwp_event.cc \ + jdwp/jdwp_expand_buf.cc \ + jdwp/jdwp_handler.cc \ + jdwp/jdwp_main.cc \ + jdwp/jdwp_request.cc \ + jdwp/jdwp_socket.cc \ + jdwp/object_registry.cc \ + jni_internal.cc \ + jobject_comparator.cc \ + mem_map.cc \ + memory_region.cc \ + method_helper.cc \ + mirror/art_field.cc \ + mirror/art_method.cc \ + mirror/array.cc \ + mirror/class.cc \ + mirror/dex_cache.cc \ + mirror/object.cc \ + mirror/reference.cc \ + mirror/stack_trace_element.cc \ + mirror/string.cc \ + mirror/throwable.cc \ + monitor.cc \ + native/dalvik_system_DexFile.cc \ + native/dalvik_system_VMDebug.cc \ + native/dalvik_system_VMRuntime.cc \ + native/dalvik_system_VMStack.cc \ + native/dalvik_system_ZygoteHooks.cc \ + native/java_lang_Class.cc \ + native/java_lang_DexCache.cc \ + native/java_lang_Object.cc \ + native/java_lang_Runtime.cc \ + native/java_lang_String.cc \ + native/java_lang_System.cc \ + native/java_lang_Thread.cc \ + native/java_lang_Throwable.cc \ + native/java_lang_VMClassLoader.cc \ + native/java_lang_ref_Reference.cc \ + native/java_lang_reflect_Array.cc \ + native/java_lang_reflect_Constructor.cc \ + native/java_lang_reflect_Field.cc \ + native/java_lang_reflect_Method.cc \ + native/java_lang_reflect_Proxy.cc \ + native/java_util_concurrent_atomic_AtomicLong.cc \ + native/org_apache_harmony_dalvik_ddmc_DdmServer.cc \ + native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc \ + native/sun_misc_Unsafe.cc \ + oat.cc \ + oat_file.cc \ + object_lock.cc \ + offsets.cc \ + os_linux.cc \ + parsed_options.cc \ + primitive.cc \ + quick_exception_handler.cc \ + quick/inline_method_analyser.cc \ + reference_table.cc \ + reflection.cc \ + runtime.cc \ + signal_catcher.cc \ + stack.cc \ + thread.cc \ + thread_list.cc \ + thread_pool.cc \ + throw_location.cc \ + trace.cc \ + transaction.cc \ + profiler.cc \ + fault_handler.cc \ + utf.cc \ + utils.cc \ + verifier/dex_gc_map.cc \ + verifier/instruction_flags.cc \ + verifier/method_verifier.cc \ + verifier/reg_type.cc \ + verifier/reg_type_cache.cc \ + verifier/register_line.cc \ + well_known_classes.cc \ + zip_archive.cc LIBART_COMMON_SRC_FILES += \ - arch/context.cc \ - arch/memcmp16.cc \ - arch/arm/registers_arm.cc \ - arch/arm64/registers_arm64.cc \ - arch/x86/registers_x86.cc \ - arch/mips/registers_mips.cc \ - entrypoints/entrypoint_utils.cc \ - entrypoints/interpreter/interpreter_entrypoints.cc \ - entrypoints/jni/jni_entrypoints.cc \ - entrypoints/math_entrypoints.cc \ - entrypoints/portable/portable_alloc_entrypoints.cc \ - entrypoints/portable/portable_cast_entrypoints.cc \ - entrypoints/portable/portable_dexcache_entrypoints.cc \ - entrypoints/portable/portable_field_entrypoints.cc \ - entrypoints/portable/portable_fillarray_entrypoints.cc \ - entrypoints/portable/portable_invoke_entrypoints.cc \ - entrypoints/portable/portable_jni_entrypoints.cc \ - entrypoints/portable/portable_lock_entrypoints.cc \ - entrypoints/portable/portable_thread_entrypoints.cc \ - entrypoints/portable/portable_throw_entrypoints.cc \ - entrypoints/portable/portable_trampoline_entrypoints.cc \ - entrypoints/quick/quick_alloc_entrypoints.cc \ - entrypoints/quick/quick_cast_entrypoints.cc \ - entrypoints/quick/quick_deoptimization_entrypoints.cc \ - entrypoints/quick/quick_dexcache_entrypoints.cc \ - entrypoints/quick/quick_field_entrypoints.cc \ - entrypoints/quick/quick_fillarray_entrypoints.cc \ - entrypoints/quick/quick_instrumentation_entrypoints.cc \ - entrypoints/quick/quick_jni_entrypoints.cc \ - entrypoints/quick/quick_lock_entrypoints.cc \ - entrypoints/quick/quick_math_entrypoints.cc \ - entrypoints/quick/quick_thread_entrypoints.cc \ - entrypoints/quick/quick_throw_entrypoints.cc \ - entrypoints/quick/quick_trampoline_entrypoints.cc + arch/context.cc \ + arch/memcmp16.cc \ + arch/arm/registers_arm.cc \ + arch/arm64/registers_arm64.cc \ + arch/x86/registers_x86.cc \ + arch/mips/registers_mips.cc \ + entrypoints/entrypoint_utils.cc \ + entrypoints/interpreter/interpreter_entrypoints.cc \ + entrypoints/jni/jni_entrypoints.cc \ + entrypoints/math_entrypoints.cc \ + entrypoints/portable/portable_alloc_entrypoints.cc \ + entrypoints/portable/portable_cast_entrypoints.cc \ + entrypoints/portable/portable_dexcache_entrypoints.cc \ + entrypoints/portable/portable_field_entrypoints.cc \ + entrypoints/portable/portable_fillarray_entrypoints.cc \ + entrypoints/portable/portable_invoke_entrypoints.cc \ + entrypoints/portable/portable_jni_entrypoints.cc \ + entrypoints/portable/portable_lock_entrypoints.cc \ + entrypoints/portable/portable_thread_entrypoints.cc \ + entrypoints/portable/portable_throw_entrypoints.cc \ + entrypoints/portable/portable_trampoline_entrypoints.cc \ + entrypoints/quick/quick_alloc_entrypoints.cc \ + entrypoints/quick/quick_cast_entrypoints.cc \ + entrypoints/quick/quick_deoptimization_entrypoints.cc \ + entrypoints/quick/quick_dexcache_entrypoints.cc \ + entrypoints/quick/quick_field_entrypoints.cc \ + entrypoints/quick/quick_fillarray_entrypoints.cc \ + entrypoints/quick/quick_instrumentation_entrypoints.cc \ + entrypoints/quick/quick_jni_entrypoints.cc \ + entrypoints/quick/quick_lock_entrypoints.cc \ + entrypoints/quick/quick_math_entrypoints.cc \ + entrypoints/quick/quick_thread_entrypoints.cc \ + entrypoints/quick/quick_throw_entrypoints.cc \ + entrypoints/quick/quick_trampoline_entrypoints.cc # Source files that only compile with GCC. LIBART_GCC_ONLY_SRC_FILES := \ - interpreter/interpreter_goto_table_impl.cc + interpreter/interpreter_goto_table_impl.cc LIBART_TARGET_LDFLAGS := LIBART_HOST_LDFLAGS := LIBART_TARGET_SRC_FILES := \ - $(LIBART_COMMON_SRC_FILES) \ - base/logging_android.cc \ - jdwp/jdwp_adb.cc \ - monitor_android.cc \ - runtime_android.cc \ - thread_android.cc + $(LIBART_COMMON_SRC_FILES) \ + base/logging_android.cc \ + jdwp/jdwp_adb.cc \ + monitor_android.cc \ + runtime_android.cc \ + thread_android.cc LIBART_TARGET_SRC_FILES_arm := \ - arch/arm/context_arm.cc.arm \ - arch/arm/entrypoints_init_arm.cc \ - arch/arm/jni_entrypoints_arm.S \ - arch/arm/memcmp16_arm.S \ - arch/arm/portable_entrypoints_arm.S \ - arch/arm/quick_entrypoints_arm.S \ - arch/arm/arm_sdiv.S \ - arch/arm/thread_arm.cc \ - arch/arm/fault_handler_arm.cc + arch/arm/context_arm.cc.arm \ + arch/arm/entrypoints_init_arm.cc \ + arch/arm/jni_entrypoints_arm.S \ + arch/arm/memcmp16_arm.S \ + arch/arm/portable_entrypoints_arm.S \ + arch/arm/quick_entrypoints_arm.S \ + arch/arm/arm_sdiv.S \ + arch/arm/thread_arm.cc \ + arch/arm/fault_handler_arm.cc LIBART_TARGET_SRC_FILES_arm64 := \ - arch/arm64/context_arm64.cc \ - arch/arm64/entrypoints_init_arm64.cc \ - arch/arm64/jni_entrypoints_arm64.S \ - arch/arm64/memcmp16_arm64.S \ - arch/arm64/portable_entrypoints_arm64.S \ - arch/arm64/quick_entrypoints_arm64.S \ - arch/arm64/thread_arm64.cc \ - monitor_pool.cc \ - arch/arm64/fault_handler_arm64.cc + arch/arm64/context_arm64.cc \ + arch/arm64/entrypoints_init_arm64.cc \ + arch/arm64/jni_entrypoints_arm64.S \ + arch/arm64/memcmp16_arm64.S \ + arch/arm64/portable_entrypoints_arm64.S \ + arch/arm64/quick_entrypoints_arm64.S \ + arch/arm64/thread_arm64.cc \ + monitor_pool.cc \ + arch/arm64/fault_handler_arm64.cc LIBART_SRC_FILES_x86 := \ - arch/x86/context_x86.cc \ - arch/x86/entrypoints_init_x86.cc \ - arch/x86/jni_entrypoints_x86.S \ - arch/x86/portable_entrypoints_x86.S \ - arch/x86/quick_entrypoints_x86.S \ - arch/x86/thread_x86.cc \ - arch/x86/fault_handler_x86.cc + arch/x86/context_x86.cc \ + arch/x86/entrypoints_init_x86.cc \ + arch/x86/jni_entrypoints_x86.S \ + arch/x86/portable_entrypoints_x86.S \ + arch/x86/quick_entrypoints_x86.S \ + arch/x86/thread_x86.cc \ + arch/x86/fault_handler_x86.cc LIBART_TARGET_SRC_FILES_x86 := \ - $(LIBART_SRC_FILES_x86) + $(LIBART_SRC_FILES_x86) LIBART_SRC_FILES_x86_64 := \ - arch/x86_64/context_x86_64.cc \ - arch/x86_64/entrypoints_init_x86_64.cc \ - arch/x86_64/jni_entrypoints_x86_64.S \ - arch/x86_64/portable_entrypoints_x86_64.S \ - arch/x86_64/quick_entrypoints_x86_64.S \ - arch/x86_64/thread_x86_64.cc \ - monitor_pool.cc \ - arch/x86_64/fault_handler_x86_64.cc + arch/x86_64/context_x86_64.cc \ + arch/x86_64/entrypoints_init_x86_64.cc \ + arch/x86_64/jni_entrypoints_x86_64.S \ + arch/x86_64/portable_entrypoints_x86_64.S \ + arch/x86_64/quick_entrypoints_x86_64.S \ + arch/x86_64/thread_x86_64.cc \ + monitor_pool.cc \ + arch/x86_64/fault_handler_x86_64.cc LIBART_TARGET_SRC_FILES_x86_64 := \ - $(LIBART_SRC_FILES_x86_64) \ + $(LIBART_SRC_FILES_x86_64) \ LIBART_TARGET_SRC_FILES_mips := \ - arch/mips/context_mips.cc \ - arch/mips/entrypoints_init_mips.cc \ - arch/mips/jni_entrypoints_mips.S \ - arch/mips/memcmp16_mips.S \ - arch/mips/portable_entrypoints_mips.S \ - arch/mips/quick_entrypoints_mips.S \ - arch/mips/thread_mips.cc \ - arch/mips/fault_handler_mips.cc + arch/mips/context_mips.cc \ + arch/mips/entrypoints_init_mips.cc \ + arch/mips/jni_entrypoints_mips.S \ + arch/mips/memcmp16_mips.S \ + arch/mips/portable_entrypoints_mips.S \ + arch/mips/quick_entrypoints_mips.S \ + arch/mips/thread_mips.cc \ + arch/mips/fault_handler_mips.cc ifeq ($(TARGET_ARCH),mips64) $(info TODOMips64: $(LOCAL_PATH)/Android.mk Add mips64 specific runtime files) endif # TARGET_ARCH != mips64 LIBART_HOST_SRC_FILES := \ - $(LIBART_COMMON_SRC_FILES) \ - base/logging_linux.cc \ - monitor_linux.cc \ - runtime_linux.cc \ - thread_linux.cc + $(LIBART_COMMON_SRC_FILES) \ + base/logging_linux.cc \ + monitor_linux.cc \ + runtime_linux.cc \ + thread_linux.cc LIBART_HOST_SRC_FILES_32 := \ - $(LIBART_SRC_FILES_x86) + $(LIBART_SRC_FILES_x86) LIBART_HOST_SRC_FILES_64 := \ - $(LIBART_SRC_FILES_x86_64) + $(LIBART_SRC_FILES_x86_64) LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ - arch/x86_64/registers_x86_64.h \ - base/mutex.h \ - dex_file.h \ - dex_instruction.h \ - gc/collector/gc_type.h \ - gc/space/space.h \ - gc/heap.h \ - indirect_reference_table.h \ - instruction_set.h \ - invoke_type.h \ - jdwp/jdwp.h \ - jdwp/jdwp_constants.h \ - lock_word.h \ - mirror/class.h \ - oat.h \ - object_callbacks.h \ - quick/inline_method_analyser.h \ - thread.h \ - thread_state.h \ - verifier/method_verifier.h + arch/x86_64/registers_x86_64.h \ + base/mutex.h \ + dex_file.h \ + dex_instruction.h \ + gc/collector/gc_type.h \ + gc/space/space.h \ + gc/heap.h \ + indirect_reference_table.h \ + instruction_set.h \ + invoke_type.h \ + jdwp/jdwp.h \ + jdwp/jdwp_constants.h \ + lock_word.h \ + mirror/class.h \ + oat.h \ + object_callbacks.h \ + quick/inline_method_analyser.h \ + thread.h \ + thread_state.h \ + verifier/method_verifier.h LIBART_CFLAGS := ifeq ($(ART_USE_PORTABLE_COMPILER),true) @@ -413,6 +416,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT LOCAL_STATIC_LIBRARIES := libziparchive libz else # host LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils + LOCAL_SHARED_LIBRARIES += libsigchain LOCAL_LDLIBS += -ldl -lpthread ifeq ($$(HOST_OS),linux) LOCAL_LDLIBS += -lrt diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc index 2a82129511..e22c56ec69 100644 --- a/runtime/arch/arm/fault_handler_arm.cc +++ b/runtime/arch/arm/fault_handler_arm.cc @@ -46,9 +46,10 @@ static uint32_t GetInstructionSize(uint8_t* pc) { return instr_size; } -void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method, +void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, + mirror::ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { - struct ucontext *uc = (struct ucontext *)context; + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); *out_sp = static_cast<uintptr_t>(sc->arm_sp); VLOG(signals) << "sp: " << *out_sp; @@ -114,7 +115,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) { uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value(); uint16_t checkinst2 = 0x6800; - struct ucontext *uc = (struct ucontext *)context; + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->arm_pc); uint8_t* ptr1 = ptr2 - 4; @@ -178,7 +179,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) { // to the overflow region below the protected region. bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) { - struct ucontext *uc = (struct ucontext *)context; + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc; VLOG(signals) << "sigcontext: " << std::hex << sc; @@ -205,7 +206,7 @@ bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) { } // We know this is a stack overflow. We need to move the sp to the overflow region - // the exists below the protected region. Determine the address of the next + // that exists below the protected region. Determine the address of the next // available valid address below the protected region. uintptr_t prevsp = sp; sp = pregion; diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc index 74c3023aff..34eede605c 100644 --- a/runtime/arch/arm64/fault_handler_arm64.cc +++ b/runtime/arch/arm64/fault_handler_arm64.cc @@ -29,7 +29,8 @@ namespace art { -void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method, +void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, + mirror::ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { } diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc index 1ecd7d964b..5a64a698f1 100644 --- a/runtime/arch/mips/fault_handler_mips.cc +++ b/runtime/arch/mips/fault_handler_mips.cc @@ -29,7 +29,8 @@ namespace art { -void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method, +void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, + mirror::ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { } diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc index 7c1980e57b..435f280a6b 100644 --- a/runtime/arch/x86/fault_handler_x86.cc +++ b/runtime/arch/x86/fault_handler_x86.cc @@ -21,7 +21,21 @@ #include "globals.h" #include "base/logging.h" #include "base/hex_dump.h" +#include "mirror/art_method.h" +#include "mirror/art_method-inl.h" +#include "thread.h" +#include "thread-inl.h" +#if defined(__APPLE__) +#define ucontext __darwin_ucontext +#define CTX_ESP uc_mcontext->__ss.__esp +#define CTX_EIP uc_mcontext->__ss.__eip +#define CTX_EAX uc_mcontext->__ss.__eax +#else +#define CTX_ESP uc_mcontext.gregs[REG_ESP] +#define CTX_EIP uc_mcontext.gregs[REG_EIP] +#define CTX_EAX uc_mcontext.gregs[REG_EAX] +#endif // // X86 specific fault handler functions. @@ -29,19 +43,292 @@ namespace art { -void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method, +extern "C" void art_quick_throw_null_pointer_exception(); +extern "C" void art_quick_throw_stack_overflow_from_signal(); +extern "C" void art_quick_test_suspend(); + +// From the x86 disassembler... +enum SegmentPrefix { + kCs = 0x2e, + kSs = 0x36, + kDs = 0x3e, + kEs = 0x26, + kFs = 0x64, + kGs = 0x65, +}; + +// Get the size of an instruction in bytes. +static uint32_t GetInstructionSize(uint8_t* pc) { + uint8_t* instruction_start = pc; + bool have_prefixes = true; + bool two_byte = false; + + // Skip all the prefixes. + do { + switch (*pc) { + // Group 1 - lock and repeat prefixes: + case 0xF0: + case 0xF2: + case 0xF3: + // Group 2 - segment override prefixes: + case kCs: + case kSs: + case kDs: + case kEs: + case kFs: + case kGs: + // Group 3 - operand size override: + case 0x66: + // Group 4 - address size override: + case 0x67: + break; + default: + have_prefixes = false; + break; + } + if (have_prefixes) { + pc++; + } + } while (have_prefixes); + +#if defined(__x86_64__) + // Skip REX is present. + if (*pc >= 0x40 && *pc <= 0x4F) { + ++pc; + } +#endif + + // Check for known instructions. + uint32_t known_length = 0; + switch (*pc) { + case 0x83: // cmp [r + v], b: 4 byte instruction + known_length = 4; + break; + } + + if (known_length > 0) { + VLOG(signals) << "known instruction with length " << known_length; + return known_length; + } + + // Unknown instruction, work out length. + + // Work out if we have a ModR/M byte. + uint8_t opcode = *pc++; + if (opcode == 0xf) { + two_byte = true; + opcode = *pc++; + } + + bool has_modrm = false; // Is ModR/M byte present? + uint8_t hi = opcode >> 4; // Opcode high nybble. + uint8_t lo = opcode & 0b1111; // Opcode low nybble. + + // From the Intel opcode tables. + if (two_byte) { + has_modrm = true; // TODO: all of these? + } else if (hi < 4) { + has_modrm = lo < 4 || (lo >= 8 && lo <= 0xb); + } else if (hi == 6) { + has_modrm = lo == 3 || lo == 9 || lo == 0xb; + } else if (hi == 8) { + has_modrm = lo != 0xd; + } else if (hi == 0xc) { + has_modrm = lo == 1 || lo == 2 || lo == 6 || lo == 7; + } else if (hi == 0xd) { + has_modrm = lo < 4; + } else if (hi == 0xf) { + has_modrm = lo == 6 || lo == 7; + } + + if (has_modrm) { + uint8_t modrm = *pc++; + uint8_t mod = (modrm >> 6) & 0b11; + uint8_t reg = (modrm >> 3) & 0b111; + switch (mod) { + case 0: + break; + case 1: + if (reg == 4) { + // SIB + 1 byte displacement. + pc += 2; + } else { + pc += 1; + } + break; + case 2: + // SIB + 4 byte displacement. + pc += 5; + break; + case 3: + break; + } + } + + VLOG(signals) << "calculated X86 instruction size is " << (pc - instruction_start); + return pc - instruction_start; +} + +void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, + mirror::ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); + *out_sp = static_cast<uintptr_t>(uc->CTX_ESP); + VLOG(signals) << "sp: " << std::hex << *out_sp; + if (*out_sp == 0) { + return; + } + + // In the case of a stack overflow, the stack is not valid and we can't + // get the method from the top of the stack. However it's in EAX. + uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); + uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>( + reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86)); + if (overflow_addr == fault_addr) { + *out_method = reinterpret_cast<mirror::ArtMethod*>(uc->CTX_EAX); + } else { + // The method is at the top of the stack. + *out_method = reinterpret_cast<mirror::ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]); + } + + uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP); + VLOG(signals) << HexDump(pc, 32, true, "PC "); + + uint32_t instr_size = GetInstructionSize(pc); + *out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size); } bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) { - return false; + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP); + uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP); + + uint32_t instr_size = GetInstructionSize(pc); + // We need to arrange for the signal handler to return to the null pointer + // exception generator. The return address must be the address of the + // next instruction (this instruction + instruction size). The return address + // is on the stack at the top address of the current frame. + + // Push the return address onto the stack. + uint32_t retaddr = reinterpret_cast<uint32_t>(pc + instr_size); + uint32_t* next_sp = reinterpret_cast<uint32_t*>(sp - 4); + *next_sp = retaddr; + uc->CTX_ESP = reinterpret_cast<uint32_t>(next_sp); + + uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception); + VLOG(signals) << "Generating null pointer exception"; + return true; } +// A suspend check is done using the following instruction sequence: +// 0xf720f1df: 648B058C000000 mov eax, fs:[0x8c] ; suspend_trigger +// .. some intervening instructions. +// 0xf720f1e6: 8500 test eax, [eax] + +// The offset from fs is Thread::ThreadSuspendTriggerOffset(). +// To check for a suspend check, we examine the instructions that caused +// the fault. bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) { + // These are the instructions to check for. The first one is the mov eax, fs:[xxx] + // where xxx is the offset of the suspend trigger. + uint32_t trigger = Thread::ThreadSuspendTriggerOffset<4>().Int32Value(); + + VLOG(signals) << "Checking for suspension point"; + uint8_t checkinst1[] = {0x64, 0x8b, 0x05, static_cast<uint8_t>(trigger & 0xff), + static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0}; + uint8_t checkinst2[] = {0x85, 0x00}; + + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP); + uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP); + + if (pc[0] != checkinst2[0] || pc[1] != checkinst2[1]) { + // Second instruction is not correct (test eax,[eax]). + VLOG(signals) << "Not a suspension point"; + return false; + } + + // The first instruction can a little bit up the stream due to load hoisting + // in the compiler. + uint8_t* limit = pc - 100; // Compiler will hoist to a max of 20 instructions. + uint8_t* ptr = pc - sizeof(checkinst1); + bool found = false; + while (ptr > limit) { + if (memcmp(ptr, checkinst1, sizeof(checkinst1)) == 0) { + found = true; + break; + } + ptr -= 1; + } + + if (found) { + VLOG(signals) << "suspend check match"; + + // We need to arrange for the signal handler to return to the null pointer + // exception generator. The return address must be the address of the + // next instruction (this instruction + 2). The return address + // is on the stack at the top address of the current frame. + + // Push the return address onto the stack. + uint32_t retaddr = reinterpret_cast<uint32_t>(pc + 2); + uint32_t* next_sp = reinterpret_cast<uint32_t*>(sp - 4); + *next_sp = retaddr; + uc->CTX_ESP = reinterpret_cast<uint32_t>(next_sp); + + uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend); + + // Now remove the suspend trigger that caused this fault. + Thread::Current()->RemoveSuspendTrigger(); + VLOG(signals) << "removed suspend trigger invoking test suspend"; + return true; + } + VLOG(signals) << "Not a suspend check match, first instruction mismatch"; return false; } +// The stack overflow check is done using the following instruction: +// test eax, [esp+ -xxx] +// where 'xxx' is the size of the overflow area. +// +// This is done before any frame is established in the method. The return +// address for the previous method is on the stack at ESP. + bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) { - return false; + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP); + + uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); + VLOG(signals) << "fault_addr: " << std::hex << fault_addr; + VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp << + ", fault_addr: " << fault_addr; + + uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86); + + Thread* self = Thread::Current(); + uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) - + Thread::kStackOverflowProtectedSize; + + // Check that the fault address is the value expected for a stack overflow. + if (fault_addr != overflow_addr) { + VLOG(signals) << "Not a stack overflow"; + return false; + } + + // We know this is a stack overflow. We need to move the sp to the overflow region + // that exists below the protected region. Determine the address of the next + // available valid address below the protected region. + VLOG(signals) << "setting sp to overflow region at " << std::hex << pregion; + + // Since the compiler puts the implicit overflow + // check before the callee save instructions, the SP is already pointing to + // the previous frame. + + // Tell the stack overflow code where the new stack pointer should be. + uc->CTX_EAX = pregion; + + // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal. + uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal); + + return true; } } // namespace art diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 24b9e465e8..68f46ad26c 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -173,6 +173,21 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode */ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode +// On entry to this function, EAX contains the ESP value for the overflow region. +DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal + // Here, the ESP is above the protected region. We need to create a + // callee save frame and then move ESP down to the overflow region. + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + mov %esp, %ecx // get current stack pointer + mov %eax, %esp // move ESP to the overflow region. + PUSH ecx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + SETUP_GOT_NOSAVE // clobbers ebx (harmless here) + call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP) + int3 // unreached +END_FUNCTION art_quick_throw_stack_overflow_from_signal + /* * Called by managed code, saves callee saves and then calls artThrowException * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. diff --git a/runtime/arch/x86_64/fault_handler_x86_64.cc b/runtime/arch/x86_64/fault_handler_x86_64.cc index 233d3c7d1a..88ae7f3711 100644 --- a/runtime/arch/x86_64/fault_handler_x86_64.cc +++ b/runtime/arch/x86_64/fault_handler_x86_64.cc @@ -29,7 +29,8 @@ namespace art { -void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method, +void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, + mirror::ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { } diff --git a/runtime/base/macros.h b/runtime/base/macros.h index fe5a2ef4fd..fae9271d9e 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -176,6 +176,7 @@ char (&ArraySizeHelper(T (&array)[N]))[N]; #endif #define PURE __attribute__ ((__pure__)) +#define WARN_UNUSED __attribute__((warn_unused_result)) template<typename T> void UNUSED(const T&) {} diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index fefb907422..a530594d7c 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -23,6 +23,7 @@ #include "class_linker.h" #include "class_linker-inl.h" #include "dex_file-inl.h" +#include "field_helper.h" #include "gc/space/space.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" @@ -31,7 +32,6 @@ #include "mirror/object_array-inl.h" #include "mirror/string-inl.h" #include "mirror/throwable.h" -#include "object_utils.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "thread.h" @@ -209,7 +209,7 @@ class ScopedCheck { // obj will be NULL. Otherwise, obj should always be non-NULL // and valid. if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) { - Runtime::Current()->GetHeap()->DumpSpaces(); + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); JniAbortF(function_name_, "field operation on invalid %s: %p", ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object); return; @@ -248,7 +248,7 @@ class ScopedCheck { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mirror::Object* o = soa_.Decode<mirror::Object*>(java_object); if (o == nullptr || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) { - Runtime::Current()->GetHeap()->DumpSpaces(); + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); JniAbortF(function_name_, "field operation on invalid %s: %p", ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object); return; @@ -628,7 +628,7 @@ class ScopedCheck { mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object); if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) { - Runtime::Current()->GetHeap()->DumpSpaces(); + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); JniAbortF(function_name_, "%s is an invalid %s: %p (%p)", what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj); return false; @@ -682,7 +682,7 @@ class ScopedCheck { mirror::Array* a = soa_.Decode<mirror::Array*>(java_array); if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) { - Runtime::Current()->GetHeap()->DumpSpaces(); + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)", ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a); } else if (!a->IsArrayInstance()) { @@ -703,7 +703,7 @@ class ScopedCheck { } mirror::ArtField* f = soa_.DecodeField(fid); if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) { - Runtime::Current()->GetHeap()->DumpSpaces(); + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); JniAbortF(function_name_, "invalid jfieldID: %p", fid); return nullptr; } @@ -717,7 +717,7 @@ class ScopedCheck { } mirror::ArtMethod* m = soa_.DecodeMethod(mid); if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) { - Runtime::Current()->GetHeap()->DumpSpaces(); + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); JniAbortF(function_name_, "invalid jmethodID: %p", mid); return nullptr; } @@ -738,7 +738,7 @@ class ScopedCheck { mirror::Object* o = soa_.Decode<mirror::Object*>(java_object); if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) { - Runtime::Current()->GetHeap()->DumpSpaces(); + Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); // TODO: when we remove work_around_app_jni_bugs, this should be impossible. JniAbortF(function_name_, "native code passing in reference to invalid %s: %p", ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object); diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index a40a2e4cd2..25eb3a342d 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -24,7 +24,6 @@ #include "mirror/dex_cache-inl.h" #include "mirror/iftable.h" #include "mirror/object_array.h" -#include "object_utils.h" #include "handle_scope-inl.h" namespace art { diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 2e51cf83b1..2c11f8b89c 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -42,8 +42,10 @@ #include "intern_table.h" #include "interpreter/interpreter.h" #include "leb128.h" +#include "method_helper.h" #include "oat.h" #include "oat_file.h" +#include "object_lock.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class.h" @@ -57,7 +59,6 @@ #include "mirror/reference-inl.h" #include "mirror/stack_trace_element.h" #include "mirror/string-inl.h" -#include "object_utils.h" #include "os.h" #include "runtime.h" #include "entrypoints/entrypoint_utils.h" @@ -824,9 +825,20 @@ bool ClassLinker::OpenDexFilesFromOat(const char* dex_location, const char* oat_ } } else { // TODO: What to lock here? + bool obsolete_file_cleanup_failed; open_oat_file.reset(FindOatFileContainingDexFileFromDexLocation(dex_location, dex_location_checksum_pointer, - kRuntimeISA, error_msgs)); + kRuntimeISA, error_msgs, + &obsolete_file_cleanup_failed)); + // There's no point in going forward and eventually try to regenerate the + // file if we couldn't remove the obsolete one. Mostly likely we will fail + // with the same error when trying to write the new file. + // In case the clean up failure is due to permission issues it's *mandatory* + // to stop to avoid regenerating under the wrong user. + // TODO: should we maybe do this only when we get permission issues? (i.e. EACCESS). + if (obsolete_file_cleanup_failed) { + return false; + } } needs_registering = true; } @@ -1084,7 +1096,9 @@ const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation( const char* dex_location, const uint32_t* const dex_location_checksum, InstructionSet isa, - std::vector<std::string>* error_msgs) { + std::vector<std::string>* error_msgs, + bool* obsolete_file_cleanup_failed) { + *obsolete_file_cleanup_failed = false; // Look for an existing file next to dex. for example, for // /foo/bar/baz.jar, look for /foo/bar/<isa>/baz.odex. std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa)); @@ -1111,9 +1125,18 @@ const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation( if (oat_file != nullptr) { return oat_file; } + if (!open_failed && TEMP_FAILURE_RETRY(unlink(cache_location.c_str())) != 0) { - PLOG(FATAL) << "Failed to remove obsolete oat file from " << cache_location; + std::string error_msg = StringPrintf("Failed to remove obsolete file from %s when searching" + "for dex file %s: %s", + cache_location.c_str(), dex_location, strerror(errno)); + error_msgs->push_back(error_msg); + VLOG(class_linker) << error_msg; + // Let the caller know that we couldn't remove the obsolete file. + // This is a good indication that further writes may fail as well. + *obsolete_file_cleanup_failed = true; } + std::string compound_msg = StringPrintf("Failed to open oat file from %s (error '%s') or %s " "(error '%s').", odex_filename.c_str(), error_msg.c_str(), cache_location.c_str(), cache_error_msg.c_str()); diff --git a/runtime/class_linker.h b/runtime/class_linker.h index d9b3d2541c..c17f88d6d8 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -571,7 +571,8 @@ class ClassLinker { const OatFile* FindOatFileContainingDexFileFromDexLocation(const char* location, const uint32_t* const location_checksum, InstructionSet isa, - std::vector<std::string>* error_msgs) + std::vector<std::string>* error_msgs, + bool* obsolete_file_cleanup_failed) LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_); // Find a verify an oat file with the given dex file. Will return nullptr when the oat file @@ -634,7 +635,7 @@ class ClassLinker { // retire a class, the version of the class in the table is returned and this may differ from // the class passed in. mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass) - __attribute__((warn_unused_result)) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 7930b4876e..21fe0067ed 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -23,6 +23,7 @@ #include "common_runtime_test.h" #include "dex_file.h" #include "entrypoints/entrypoint_utils-inl.h" +#include "field_helper.h" #include "gc/heap.h" #include "mirror/art_field-inl.h" #include "mirror/art_method.h" diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 8de3068dca..970593d119 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -16,6 +16,8 @@ #include "common_throws.h" +#include <sstream> + #include "base/logging.h" #include "class_linker-inl.h" #include "dex_file-inl.h" @@ -25,12 +27,9 @@ #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "thread.h" #include "verifier/method_verifier.h" -#include <sstream> - namespace art { static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer) diff --git a/runtime/debugger.cc b/runtime/debugger.cc index c95be0154a..4cf4c099b2 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -25,11 +25,13 @@ #include "class_linker-inl.h" #include "dex_file-inl.h" #include "dex_instruction.h" +#include "field_helper.h" #include "gc/accounting/card_table-inl.h" #include "gc/space/large_object_space.h" #include "gc/space/space-inl.h" #include "handle_scope.h" #include "jdwp/object_registry.h" +#include "method_helper.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class.h" @@ -39,7 +41,6 @@ #include "mirror/object_array-inl.h" #include "mirror/string-inl.h" #include "mirror/throwable.h" -#include "object_utils.h" #include "quick/inline_method_analyser.h" #include "reflection.h" #include "safe_map.h" diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 482ad47d5d..90c8fcf980 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -30,7 +30,6 @@ #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/throwable.h" -#include "object_utils.h" #include "handle_scope-inl.h" #include "thread.h" @@ -38,10 +37,9 @@ namespace art { // TODO: Fix no thread safety analysis when GCC can handle template specialization. template <const bool kAccessCheck> -ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, - mirror::ArtMethod* method, - Thread* self, bool* slow_path) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, + mirror::ArtMethod* method, + Thread* self, bool* slow_path) { mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx); if (UNLIKELY(klass == NULL)) { klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); @@ -88,9 +86,9 @@ ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, } // TODO: Fix no thread safety analysis when annotalysis is smarter. -ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, - Thread* self, bool* slow_path) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, + Thread* self, + bool* slow_path) { if (UNLIKELY(!klass->IsInitialized())) { StackHandleScope<1> hs(self); Handle<mirror::Class> h_class(hs.NewHandle(klass)); @@ -118,11 +116,10 @@ ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(m // check. // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter. template <bool kAccessCheck, bool kInstrumented> -ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, - mirror::ArtMethod* method, - Thread* self, - gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, + mirror::ArtMethod* method, + Thread* self, + gc::AllocatorType allocator_type) { bool slow_path = false; mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path); if (UNLIKELY(slow_path)) { @@ -138,11 +135,10 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_id // Given the context of a calling Method and a resolved class, create an instance. // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter. template <bool kInstrumented> -ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass, - mirror::ArtMethod* method, - Thread* self, - gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass, + mirror::ArtMethod* method, + Thread* self, + gc::AllocatorType allocator_type) { DCHECK(klass != nullptr); bool slow_path = false; klass = CheckClassInitializedForObjectAlloc(klass, self, &slow_path); @@ -161,11 +157,10 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror:: // Given the context of a calling Method and an initialized class, create an instance. // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter. template <bool kInstrumented> -ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass, - mirror::ArtMethod* method, - Thread* self, - gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass, + mirror::ArtMethod* method, + Thread* self, + gc::AllocatorType allocator_type) { DCHECK(klass != nullptr); // Pass in false since the object can not be finalizable. return klass->Alloc<kInstrumented, false>(self, allocator_type); @@ -174,11 +169,10 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirro // TODO: Fix no thread safety analysis when GCC can handle template specialization. template <bool kAccessCheck> -ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, - mirror::ArtMethod* method, - int32_t component_count, - bool* slow_path) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, + mirror::ArtMethod* method, + int32_t component_count, + bool* slow_path) { if (UNLIKELY(component_count < 0)) { ThrowNegativeArraySizeException(component_count); *slow_path = true; @@ -211,12 +205,11 @@ ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, // check. // TODO: Fix no thread safety analysis when GCC can handle template specialization. template <bool kAccessCheck, bool kInstrumented> -ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, - mirror::ArtMethod* method, - int32_t component_count, - Thread* self, - gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, + mirror::ArtMethod* method, + int32_t component_count, + Thread* self, + gc::AllocatorType allocator_type) { bool slow_path = false; mirror::Class* klass = CheckArrayAlloc<kAccessCheck>(type_idx, method, component_count, &slow_path); @@ -234,12 +227,11 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, } template <bool kAccessCheck, bool kInstrumented> -ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, - mirror::ArtMethod* method, - int32_t component_count, - Thread* self, - gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS { +static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, + mirror::ArtMethod* method, + int32_t component_count, + Thread* self, + gc::AllocatorType allocator_type) { DCHECK(klass != nullptr); if (UNLIKELY(component_count < 0)) { ThrowNegativeArraySizeException(component_count); @@ -476,8 +468,7 @@ EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface); // Fast path field resolution that can't initialize classes or throw exceptions. static inline mirror::ArtField* FindFieldFast(uint32_t field_idx, mirror::ArtMethod* referrer, - FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FindFieldType type, size_t expected_size) { mirror::ArtField* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); if (UNLIKELY(resolved_field == nullptr)) { @@ -534,8 +525,7 @@ static inline mirror::ArtField* FindFieldFast(uint32_t field_idx, static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_object, mirror::ArtMethod* referrer, - bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool access_check, InvokeType type) { bool is_direct = type == kStatic || type == kDirect; if (UNLIKELY(this_object == NULL && !is_direct)) { return NULL; @@ -576,8 +566,7 @@ static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx, static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, mirror::ArtMethod* referrer, Thread* self, bool can_run_clinit, - bool verify_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool verify_access) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); if (UNLIKELY(klass == nullptr)) { @@ -611,14 +600,12 @@ static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, } static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, - uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t string_idx) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); return class_linker->ResolveString(string_idx, referrer); } -static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - NO_THREAD_SAFETY_ANALYSIS /* SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) */ { +static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) { // Save any pending exception over monitor exit call. mirror::Throwable* saved_exception = NULL; ThrowLocation saved_throw_location; @@ -642,27 +629,7 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) } } -static inline void CheckReferenceResult(mirror::Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (o == NULL) { - return; - } - mirror::ArtMethod* m = self->GetCurrentMethod(NULL); - if (o == kInvalidIndirectRefObject) { - JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); - } - // Make sure that the result is an instance of the type this method was expected to return. - StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> h_m(hs.NewHandle(m)); - mirror::Class* return_type = MethodHelper(h_m).GetReturnType(); - - if (!o->InstanceOf(return_type)) { - JniAbortF(NULL, "attempt to return an instance of %s from %s", PrettyTypeOf(o).c_str(), - PrettyMethod(h_m.Get()).c_str()); - } -} - -static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static inline void CheckSuspend(Thread* thread) { for (;;) { if (thread->ReadFlag(kCheckpointRequest)) { thread->RunCheckpointFunction(); diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index d029df2c7f..0fa0e410bf 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -20,11 +20,11 @@ #include "class_linker-inl.h" #include "dex_file-inl.h" #include "gc/accounting/card_table-inl.h" +#include "method_helper-inl.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "mirror/object_array-inl.h" #include "reflection.h" #include "scoped_thread_state_change.h" @@ -139,6 +139,25 @@ void ThrowStackOverflowError(Thread* self) { self->ResetDefaultStackEnd(!explicit_overflow_check); // Return to default stack size. } +void CheckReferenceResult(mirror::Object* o, Thread* self) { + if (o == NULL) { + return; + } + mirror::ArtMethod* m = self->GetCurrentMethod(NULL); + if (o == kInvalidIndirectRefObject) { + JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); + } + // Make sure that the result is an instance of the type this method was expected to return. + StackHandleScope<1> hs(self); + Handle<mirror::ArtMethod> h_m(hs.NewHandle(m)); + mirror::Class* return_type = MethodHelper(h_m).GetReturnType(); + + if (!o->InstanceOf(return_type)) { + JniAbortF(NULL, "attempt to return an instance of %s from %s", PrettyTypeOf(o).c_str(), + PrettyMethod(h_m.Get()).c_str()); + } +} + JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty, jobject rcvr_jobj, jobject interface_method_jobj, std::vector<jvalue>& args) { diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 11a67ac5dd..c5d67aa1df 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -45,12 +45,12 @@ template <const bool kAccessCheck> ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, mirror::ArtMethod* method, Thread* self, bool* slow_path) - NO_THREAD_SAFETY_ANALYSIS; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: Fix no thread safety analysis when annotalysis is smarter. ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, Thread* self, bool* slow_path) - NO_THREAD_SAFETY_ANALYSIS; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. @@ -61,7 +61,8 @@ template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::ArtMethod* method, Thread* self, - gc::AllocatorType allocator_type); + gc::AllocatorType allocator_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Given the context of a calling Method and a resolved class, create an instance. // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter. @@ -70,7 +71,7 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror:: mirror::ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Given the context of a calling Method and an initialized class, create an instance. // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter. @@ -78,7 +79,8 @@ template <bool kInstrumented> ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass, mirror::ArtMethod* method, Thread* self, - gc::AllocatorType allocator_type); + gc::AllocatorType allocator_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: Fix no thread safety analysis when GCC can handle template specialization. @@ -87,7 +89,7 @@ ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, bool* slow_path) - NO_THREAD_SAFETY_ANALYSIS; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If // it cannot be resolved, throw an error. If it can, use it to create an array. @@ -100,7 +102,7 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, int32_t component_count, Thread* self, gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, @@ -108,7 +110,7 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Cl int32_t component_count, Thread* self, gc::AllocatorType allocator_type) - NO_THREAD_SAFETY_ANALYSIS; + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, @@ -171,10 +173,11 @@ static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +// TODO: annotalysis disabled as monitor semantics are maintained in Java code. static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + NO_THREAD_SAFETY_ANALYSIS; -static inline void CheckReferenceResult(mirror::Object* o, Thread* self) +void CheckReferenceResult(mirror::Object* o, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc index 329c175986..64faf76213 100644 --- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc +++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc @@ -18,7 +18,6 @@ #include "interpreter/interpreter.h" #include "mirror/art_method-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "reflection.h" #include "runtime.h" #include "stack.h" diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc index bae4023c1d..edb3b723ef 100644 --- a/runtime/entrypoints/jni/jni_entrypoints.cc +++ b/runtime/entrypoints/jni/jni_entrypoints.cc @@ -18,7 +18,6 @@ #include "entrypoints/entrypoint_utils.h" #include "mirror/art_method-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc index 9e36a05841..be6231cae5 100644 --- a/runtime/entrypoints/portable/portable_throw_entrypoints.cc +++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc @@ -80,7 +80,6 @@ extern "C" int32_t art_portable_find_catch_block_from_code(mirror::ArtMethod* cu } mirror::Class* exception_type = exception->GetClass(); StackHandleScope<1> hs(self); - MethodHelper mh(hs.NewHandle(current_method)); const DexFile::CodeItem* code_item = current_method->GetCodeItem(); DCHECK_LT(ti_offset, code_item->tries_size_); const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); @@ -98,7 +97,8 @@ extern "C" int32_t art_portable_find_catch_block_from_code(mirror::ArtMethod* cu break; } // Does this catch exception type apply? - mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); + mirror::Class* iter_exception_type = + current_method->GetDexCacheResolvedTypes()->Get(iter_type_idx); if (UNLIKELY(iter_exception_type == NULL)) { // TODO: check, the verifier (class linker?) should take care of resolving all exception // classes early. diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc index 7ee869b67f..9f75b0fcf0 100644 --- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc +++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc @@ -22,7 +22,6 @@ #include "interpreter/interpreter.h" #include "mirror/art_method-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "scoped_thread_state_change.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc index 47fb9d66f8..f9f62c2721 100644 --- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc @@ -21,7 +21,6 @@ #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "stack.h" #include "thread.h" #include "verifier/method_verifier.h" diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 30e86097a6..653724989a 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -21,7 +21,6 @@ #include "mirror/object.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "scoped_thread_state_change.h" #include "thread.h" #include "verify_object-inl.h" diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index 4dcb1c8dc6..879010e36e 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -18,7 +18,6 @@ #include "common_throws.h" #include "entrypoints/entrypoint_utils-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "thread.h" #include "well_known_classes.h" diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index f7cb1263f2..338bd06f7c 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -27,7 +27,6 @@ #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "runtime.h" #include "scoped_thread_state_change.h" diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc index 3112bc0a28..1b916284c5 100644 --- a/runtime/fault_handler.cc +++ b/runtime/fault_handler.cc @@ -15,23 +15,13 @@ */ #include "fault_handler.h" + #include <sys/mman.h> #include <sys/ucontext.h> -#include "base/macros.h" -#include "globals.h" -#include "base/logging.h" -#include "base/hex_dump.h" -#include "thread.h" -#include "mirror/art_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/dex_cache.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "object_utils.h" -#include "scoped_thread_state_change.h" -#ifdef HAVE_ANDROID_OS +#include "mirror/art_method.h" +#include "mirror/class.h" #include "sigchain.h" -#endif +#include "thread-inl.h" #include "verify_object-inl.h" namespace art { @@ -47,6 +37,7 @@ void art_sigsegv_fault() { // Signal handler called on SIGSEGV. static void art_fault_handler(int sig, siginfo_t* info, void* context) { + // std::cout << "handling fault in ART handler\n"; fault_manager.HandleFault(sig, info, context); } @@ -55,10 +46,6 @@ FaultManager::FaultManager() { } FaultManager::~FaultManager() { -#ifdef HAVE_ANDROID_OS - UnclaimSignalChain(SIGSEGV); -#endif - sigaction(SIGSEGV, &oldaction_, nullptr); // Restore old handler. } @@ -72,11 +59,12 @@ void FaultManager::Init() { #endif // Set our signal handler now. - sigaction(SIGSEGV, &action, &oldaction_); -#ifdef HAVE_ANDROID_OS + int e = sigaction(SIGSEGV, &action, &oldaction_); + if (e != 0) { + VLOG(signals) << "Failed to claim SEGV: " << strerror(errno); + } // Make sure our signal handler is called before any user handlers. ClaimSignalChain(SIGSEGV, &oldaction_); -#endif } void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) { @@ -84,8 +72,12 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) { // // If malloc calls abort, it will be holding its lock. // If the handler tries to call malloc, it will deadlock. + + // Also, there is only an 8K stack available here to logging can cause memory + // overwrite issues if you are unlucky. If you want to enable logging and + // are getting crashes, allocate more space for the alternate signal stack. VLOG(signals) << "Handling fault"; - if (IsInGeneratedCode(context, true)) { + if (IsInGeneratedCode(info, context, true)) { VLOG(signals) << "in generated code, looking for handler"; for (const auto& handler : generated_code_handlers_) { VLOG(signals) << "invoking Action on handler " << handler; @@ -101,11 +93,8 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) { } art_sigsegv_fault(); -#ifdef HAVE_ANDROID_OS + // Pass this on to the next handler in the chain, or the default if none. InvokeUserSignalHandler(sig, info, context); -#else - oldaction_.sa_sigaction(sig, info, context); -#endif } void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) { @@ -132,7 +121,7 @@ void FaultManager::RemoveHandler(FaultHandler* handler) { // This function is called within the signal handler. It checks that // the mutator_lock is held (shared). No annotalysis is done. -bool FaultManager::IsInGeneratedCode(void* context, bool check_dex_pc) { +bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool check_dex_pc) { // We can only be running Java code in the current thread if it // is in Runnable state. VLOG(signals) << "Checking for generated code"; @@ -161,7 +150,7 @@ bool FaultManager::IsInGeneratedCode(void* context, bool check_dex_pc) { // Get the architecture specific method address and return address. These // are in architecture specific files in arch/<arch>/fault_handler_<arch>. - GetMethodAndReturnPCAndSP(context, &method_obj, &return_pc, &sp); + GetMethodAndReturnPCAndSP(siginfo, context, &method_obj, &return_pc, &sp); // If we don't have a potential method, we're outta here. VLOG(signals) << "potential method: " << method_obj; @@ -242,12 +231,12 @@ JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandl bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) { // Make sure that we are in the generated code, but we may not have a dex pc. - if (manager_->IsInGeneratedCode(context, false)) { + if (manager_->IsInGeneratedCode(siginfo, context, false)) { LOG(ERROR) << "Dumping java stack trace for crash in generated code"; mirror::ArtMethod* method = nullptr; uintptr_t return_pc = 0; uintptr_t sp = 0; - manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp); + manager_->GetMethodAndReturnPCAndSP(siginfo, context, &method, &return_pc, &sp); Thread* self = Thread::Current(); // Inside of generated code, sp[0] is the method, so sp is the frame. StackReference<mirror::ArtMethod>* frame = diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h index 026f5b9c4a..71c99771cd 100644 --- a/runtime/fault_handler.h +++ b/runtime/fault_handler.h @@ -43,9 +43,10 @@ class FaultManager { void HandleFault(int sig, siginfo_t* info, void* context); void AddHandler(FaultHandler* handler, bool generated_code); void RemoveHandler(FaultHandler* handler); - void GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method, + void GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, mirror::ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp); - bool IsInGeneratedCode(void *context, bool check_dex_pc) NO_THREAD_SAFETY_ANALYSIS; + bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc) + NO_THREAD_SAFETY_ANALYSIS; private: std::vector<FaultHandler*> generated_code_handlers_; diff --git a/runtime/field_helper.cc b/runtime/field_helper.cc new file mode 100644 index 0000000000..40daa6db3f --- /dev/null +++ b/runtime/field_helper.cc @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "field_helper.h" + +#include "class_linker-inl.h" +#include "dex_file.h" +#include "mirror/dex_cache.h" +#include "runtime.h" +#include "thread-inl.h" + +namespace art { + +mirror::Class* FieldHelper::GetType(bool resolve) { + uint32_t field_index = field_->GetDexFieldIndex(); + if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) { + return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), + field_->GetTypeDescriptor()); + } + const DexFile* dex_file = field_->GetDexFile(); + const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index); + mirror::Class* type = field_->GetDexCache()->GetResolvedType(field_id.type_idx_); + if (resolve && (type == nullptr)) { + type = Runtime::Current()->GetClassLinker()->ResolveType(field_id.type_idx_, field_.Get()); + CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); + } + return type; +} + +const char* FieldHelper::GetDeclaringClassDescriptor() { + uint32_t field_index = field_->GetDexFieldIndex(); + if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) { + DCHECK(field_->IsStatic()); + DCHECK_LT(field_index, 2U); + // 0 == Class[] interfaces; 1 == Class[][] throws; + declaring_class_descriptor_ = field_->GetDeclaringClass()->GetDescriptor(); + return declaring_class_descriptor_.c_str(); + } + const DexFile* dex_file = field_->GetDexFile(); + const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index); + return dex_file->GetFieldDeclaringClassDescriptor(field_id); +} + +} // namespace art diff --git a/runtime/field_helper.h b/runtime/field_helper.h new file mode 100644 index 0000000000..5eae55e09c --- /dev/null +++ b/runtime/field_helper.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_FIELD_HELPER_H_ +#define ART_RUNTIME_FIELD_HELPER_H_ + +#include "base/macros.h" +#include "handle.h" +#include "mirror/art_field.h" + +namespace art { + +class FieldHelper { + public: + explicit FieldHelper(Handle<mirror::ArtField> f) : field_(f) {} + + void ChangeField(mirror::ArtField* new_f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(new_f != nullptr); + field_.Assign(new_f); + } + + mirror::ArtField* GetField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return field_.Get(); + } + + mirror::Class* GetType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // The returned const char* is only guaranteed to be valid for the lifetime of the FieldHelper. + // If you need it longer, copy it into a std::string. + const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + private: + Handle<mirror::ArtField> field_; + std::string declaring_class_descriptor_; + + DISALLOW_COPY_AND_ASSIGN(FieldHelper); +}; + +} // namespace art + +#endif // ART_RUNTIME_FIELD_HELPER_H_ diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index 228d1dc668..2686af0529 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -185,7 +185,7 @@ class CheckReferenceVisitor { << from_space->GetGcRetentionPolicy(); LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy(); - heap->DumpSpaces(); + heap->DumpSpaces(LOG(INFO)); LOG(FATAL) << "FATAL ERROR"; } } diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index 1e9556a076..fc4213e8c6 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -23,14 +23,6 @@ #include "atomic.h" #include "base/logging.h" -#include "dex_file-inl.h" -#include "heap_bitmap.h" -#include "mirror/art_field-inl.h" -#include "mirror/class-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "object_utils.h" -#include "space_bitmap-inl.h" #include "utils.h" namespace art { diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index c0aa43ea41..39d1f9e132 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -16,6 +16,13 @@ #include "space_bitmap-inl.h" +#include "base/stringprintf.h" +#include "mem_map.h" +#include "mirror/object-inl.h" +#include "mirror/class.h" +#include "mirror/art_field.h" +#include "mirror/object_array.h" + namespace art { namespace gc { namespace accounting { @@ -46,6 +53,9 @@ SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, u } template<size_t kAlignment> +SpaceBitmap<kAlignment>::~SpaceBitmap() {} + +template<size_t kAlignment> SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create( const std::string& name, byte* heap_begin, size_t heap_capacity) { // Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord. diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 6d1ba870db..a3073bda07 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -54,8 +54,7 @@ class SpaceBitmap { static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map, byte* heap_begin, size_t heap_capacity); - ~SpaceBitmap() { - } + ~SpaceBitmap(); // <offset> is the difference from .base to a pointer address. // <index> is the index of .bits that contains the bit representing diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h index 974952d992..104ed36014 100644 --- a/runtime/gc/collector/mark_sweep-inl.h +++ b/runtime/gc/collector/mark_sweep-inl.h @@ -32,10 +32,7 @@ namespace collector { template<typename MarkVisitor, typename ReferenceVisitor> inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor, const ReferenceVisitor& ref_visitor) { - if (kIsDebugBuild && !IsMarked(obj)) { - heap_->DumpSpaces(); - LOG(FATAL) << "Scanning unmarked object " << obj; - } + DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces(); obj->VisitReferences<false>(visitor, ref_visitor); if (kCountScannedTypes) { mirror::Class* klass = obj->GetClass<kVerifyNone>(); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 7e97b3b16b..95530be202 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -313,10 +313,8 @@ void MarkSweep::FindDefaultSpaceBitmap() { } } } - if (current_space_bitmap_ == nullptr) { - heap_->DumpSpaces(); - LOG(FATAL) << "Could not find a default mark bitmap"; - } + CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" + << heap_->DumpSpaces(); } void MarkSweep::ExpandMarkStack() { @@ -943,12 +941,9 @@ mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg void MarkSweep::VerifyIsLive(const Object* obj) { if (!heap_->GetLiveBitmap()->Test(obj)) { - if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == - heap_->allocation_stack_->End()) { - // Object not found! - heap_->DumpSpaces(); - LOG(FATAL) << "Found dead object " << obj; - } + accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get(); + CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) != + allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); } } diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h index 47682cc584..922a71ceb2 100644 --- a/runtime/gc/collector/semi_space-inl.h +++ b/runtime/gc/collector/semi_space-inl.h @@ -64,34 +64,25 @@ inline void SemiSpace::MarkObject( // Verify all the objects have the correct forward pointer installed. obj->AssertReadBarrierPointer(); } - if (!immune_region_.ContainsObject(obj)) { - if (from_space_->HasAddress(obj)) { - mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj); - // If the object has already been moved, return the new forward address. - if (UNLIKELY(forward_address == nullptr)) { - forward_address = MarkNonForwardedObject(obj); - DCHECK(forward_address != nullptr); - // Make sure to only update the forwarding address AFTER you copy the object so that the - // monitor word doesn't Get stomped over. - obj->SetLockWord( - LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false); - // Push the object onto the mark stack for later processing. - MarkStackPush(forward_address); - } - obj_ptr->Assign(forward_address); - } else { - BitmapSetSlowPathVisitor visitor(this); - if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) { - // If a bump pointer space only collection, we should not - // reach here as we don't/won't mark the objects in the - // non-moving space (except for the promoted objects.) Note - // the non-moving space is added to the immune space. - DCHECK(!generational_ || whole_heap_collection_); - } - if (!mark_bitmap_->Set(obj, visitor)) { - // This object was not previously marked. - MarkStackPush(obj); - } + if (from_space_->HasAddress(obj)) { + mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj); + // If the object has already been moved, return the new forward address. + if (UNLIKELY(forward_address == nullptr)) { + forward_address = MarkNonForwardedObject(obj); + DCHECK(forward_address != nullptr); + // Make sure to only update the forwarding address AFTER you copy the object so that the + // monitor word doesn't Get stomped over. + obj->SetLockWord( + LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false); + // Push the object onto the mark stack for later processing. + MarkStackPush(forward_address); + } + obj_ptr->Assign(forward_address); + } else if (!collect_from_space_only_ && !immune_region_.ContainsObject(obj)) { + BitmapSetSlowPathVisitor visitor(this); + if (!mark_bitmap_->Set(obj, visitor)) { + // This object was not previously marked. + MarkStackPush(obj); } } } diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index cabfe2176c..c7c567f2bd 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -63,23 +63,23 @@ void SemiSpace::BindBitmaps() { WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); // Mark all of the spaces we never collect as immune. for (const auto& space : GetHeap()->GetContinuousSpaces()) { - if (space->GetLiveBitmap() != nullptr) { - if (space == to_space_) { - CHECK(to_space_->IsContinuousMemMapAllocSpace()); - to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); - } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect - || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect - // Add the main free list space and the non-moving - // space to the immune space if a bump pointer space - // only collection. - || (generational_ && !whole_heap_collection_ && - (space == GetHeap()->GetNonMovingSpace() || - space == GetHeap()->GetPrimaryFreeListSpace()))) { - CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; + if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect || + space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { + CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; + } else if (space->GetLiveBitmap() != nullptr) { + if (space == to_space_ || collect_from_space_only_) { + if (collect_from_space_only_) { + // Bind the main free list space and the non-moving space to the immune space if a bump + // pointer space only collection. + CHECK(space == to_space_ || space == GetHeap()->GetPrimaryFreeListSpace() || + space == GetHeap()->GetNonMovingSpace()); + } + CHECK(space->IsContinuousMemMapAllocSpace()); + space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); } } } - if (generational_ && !whole_heap_collection_) { + if (collect_from_space_only_) { // We won't collect the large object space if a bump pointer space only collection. is_large_object_space_immune_ = true; } @@ -95,7 +95,7 @@ SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_pref bytes_promoted_(0), bytes_promoted_since_last_whole_heap_collection_(0), large_object_bytes_allocated_at_last_whole_heap_collection_(0), - whole_heap_collection_(true), + collect_from_space_only_(generational), collector_name_(name_), swap_semi_spaces_(true) { } @@ -147,6 +147,10 @@ void SemiSpace::InitializePhase() { ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); mark_bitmap_ = heap_->GetMarkBitmap(); } + if (generational_) { + promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace(); + } + fallback_space_ = GetHeap()->GetNonMovingSpace(); } void SemiSpace::ProcessReferences(Thread* self) { @@ -180,9 +184,9 @@ void SemiSpace::MarkingPhase() { GetCurrentIteration()->GetClearSoftReferences()) { // If an explicit, native allocation-triggered, or last attempt // collection, collect the whole heap. - whole_heap_collection_ = true; + collect_from_space_only_ = false; } - if (whole_heap_collection_) { + if (!collect_from_space_only_) { VLOG(heap) << "Whole heap collection"; name_ = collector_name_ + " whole"; } else { @@ -191,7 +195,7 @@ void SemiSpace::MarkingPhase() { } } - if (!generational_ || whole_heap_collection_) { + if (!collect_from_space_only_) { // If non-generational, always clear soft references. // If generational, clear soft references if a whole heap collection. GetCurrentIteration()->SetClearSoftReferences(true); @@ -227,8 +231,6 @@ void SemiSpace::MarkingPhase() { { WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); MarkRoots(); - // Mark roots of immune spaces. - UpdateAndMarkModUnion(); // Recursively mark remaining objects. MarkReachableObjects(); } @@ -259,46 +261,6 @@ void SemiSpace::MarkingPhase() { } } -void SemiSpace::UpdateAndMarkModUnion() { - for (auto& space : heap_->GetContinuousSpaces()) { - // If the space is immune then we need to mark the references to other spaces. - if (immune_region_.ContainsSpace(space)) { - accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); - if (table != nullptr) { - // TODO: Improve naming. - TimingLogger::ScopedTiming t( - space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : - "UpdateAndMarkImageModUnionTable", - GetTimings()); - table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); - } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { - DCHECK(kUseRememberedSet); - // If a bump pointer space only collection, the non-moving - // space is added to the immune space. The non-moving space - // doesn't have a mod union table, but has a remembered - // set. Its dirty cards will be scanned later in - // MarkReachableObjects(). - DCHECK(generational_ && !whole_heap_collection_ && - (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) - << "Space " << space->GetName() << " " - << "generational_=" << generational_ << " " - << "whole_heap_collection_=" << whole_heap_collection_ << " "; - } else { - DCHECK(!kUseRememberedSet); - // If a bump pointer space only collection, the non-moving - // space is added to the immune space. But the non-moving - // space doesn't have a mod union table. Instead, its live - // bitmap will be scanned later in MarkReachableObjects(). - DCHECK(generational_ && !whole_heap_collection_ && - (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) - << "Space " << space->GetName() << " " - << "generational_=" << generational_ << " " - << "whole_heap_collection_=" << whole_heap_collection_ << " "; - } - } - } -} - class SemiSpaceScanObjectVisitor { public: explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} @@ -355,20 +317,30 @@ void SemiSpace::MarkReachableObjects() { heap_->MarkAllocStackAsLive(live_stack); live_stack->Reset(); } - t.NewTiming("UpdateAndMarkRememberedSets"); for (auto& space : heap_->GetContinuousSpaces()) { - // If the space is immune and has no mod union table (the - // non-moving space when the bump pointer space only collection is - // enabled,) then we need to scan its live bitmap or dirty cards as roots - // (including the objects on the live stack which have just marked - // in the live bitmap above in MarkAllocStackAsLive().) - if (immune_region_.ContainsSpace(space) && - heap_->FindModUnionTableFromSpace(space) == nullptr) { - DCHECK(generational_ && !whole_heap_collection_ && - (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); - accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); - if (kUseRememberedSet) { - DCHECK(rem_set != nullptr); + // If the space is immune then we need to mark the references to other spaces. + accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); + if (table != nullptr) { + // TODO: Improve naming. + TimingLogger::ScopedTiming t2( + space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : + "UpdateAndMarkImageModUnionTable", + GetTimings()); + table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); + DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr); + } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) { + // If the space has no mod union table (the non-moving space and main spaces when the bump + // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty + // cards as roots (including the objects on the live stack which have just marked in the live + // bitmap above in MarkAllocStackAsLive().) + DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()) + << "Space " << space->GetName() << " " + << "generational_=" << generational_ << " " + << "collect_from_space_only_=" << collect_from_space_only_; + accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space); + CHECK_EQ(rem_set != nullptr, kUseRememberedSet); + if (rem_set != nullptr) { + TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings()); rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback, from_space_, this); if (kIsDebugBuild) { @@ -383,7 +355,7 @@ void SemiSpace::MarkReachableObjects() { visitor); } } else { - DCHECK(rem_set == nullptr); + TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings()); accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); SemiSpaceScanObjectVisitor visitor(this); live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), @@ -393,9 +365,10 @@ void SemiSpace::MarkReachableObjects() { } } + CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_); if (is_large_object_space_immune_) { TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings()); - DCHECK(generational_ && !whole_heap_collection_); + DCHECK(collect_from_space_only_); // Delay copying the live set to the marked set until here from // BindBitmaps() as the large objects on the allocation stack may // be newly added to the live set above in MarkAllocStackAsLive(). @@ -506,19 +479,20 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size } mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { - size_t object_size = obj->SizeOf(); + const size_t object_size = obj->SizeOf(); size_t bytes_allocated; mirror::Object* forward_address = nullptr; if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { // If it's allocated before the last GC (older), move // (pseudo-promote) it to the main free list space (as sort // of an old generation.) - space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); - forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated, - nullptr); + forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, + nullptr); if (UNLIKELY(forward_address == nullptr)) { // If out of space, fall back to the to-space. forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); + // No logic for marking the bitmap, so it must be null. + DCHECK(to_space_->GetLiveBitmap() == nullptr); } else { bytes_promoted_ += bytes_allocated; // Dirty the card at the destionation as it may contain @@ -526,12 +500,12 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { // space. GetHeap()->WriteBarrierEveryFieldOf(forward_address); // Handle the bitmaps marking. - accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); + accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap(); DCHECK(live_bitmap != nullptr); - accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); + accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap(); DCHECK(mark_bitmap != nullptr); DCHECK(!live_bitmap->Test(forward_address)); - if (!whole_heap_collection_) { + if (collect_from_space_only_) { // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. DCHECK_EQ(live_bitmap, mark_bitmap); @@ -559,12 +533,23 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { mark_bitmap->Set(forward_address); } } - DCHECK(forward_address != nullptr); } else { // If it's allocated after the last GC (younger), copy it to the to-space. forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); + if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) { + to_space_live_bitmap_->Set(forward_address); + } + } + // If it's still null, attempt to use the fallback space. + if (UNLIKELY(forward_address == nullptr)) { + forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, + nullptr); + CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space."; + accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap(); + if (bitmap != nullptr) { + bitmap->Set(forward_address); + } } - CHECK(forward_address != nullptr) << "Out of memory in the to-space."; ++objects_moved_; bytes_moved_ += bytes_allocated; // Copy over the object and add it to the mark stack since we still need to update its @@ -579,11 +564,10 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { } forward_address->AssertReadBarrierPointer(); } - if (to_space_live_bitmap_ != nullptr) { - to_space_live_bitmap_->Set(forward_address); - } DCHECK(to_space_->HasAddress(forward_address) || - (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); + fallback_space_->HasAddress(forward_address) || + (generational_ && promo_dest_space_->HasAddress(forward_address))) + << forward_address << "\n" << GetHeap()->DumpSpaces(); return forward_address; } @@ -648,7 +632,7 @@ void SemiSpace::SweepSystemWeaks() { } bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { - return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); + return space != from_space_ && space != to_space_; } void SemiSpace::Sweep(bool swap_bitmaps) { @@ -714,22 +698,20 @@ void SemiSpace::ScanObject(Object* obj) { // Scan anything that's on the mark stack. void SemiSpace::ProcessMarkStack() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); - space::MallocSpace* promo_dest_space = nullptr; accounting::ContinuousSpaceBitmap* live_bitmap = nullptr; - if (generational_ && !whole_heap_collection_) { + if (collect_from_space_only_) { // If a bump pointer space only collection (and the promotion is // enabled,) we delay the live-bitmap marking of promoted objects // from MarkObject() until this function. - promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); - live_bitmap = promo_dest_space->GetLiveBitmap(); + live_bitmap = promo_dest_space_->GetLiveBitmap(); DCHECK(live_bitmap != nullptr); - accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); + accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap(); DCHECK(mark_bitmap != nullptr); DCHECK_EQ(live_bitmap, mark_bitmap); } while (!mark_stack_->IsEmpty()) { Object* obj = mark_stack_->PopBack(); - if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { + if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) { // obj has just been promoted. Mark the live bitmap for it, // which is delayed from MarkObject(). DCHECK(!live_bitmap->Test(obj)); @@ -742,16 +724,12 @@ void SemiSpace::ProcessMarkStack() { inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { // All immune objects are assumed marked. - if (immune_region_.ContainsObject(obj)) { - return obj; - } if (from_space_->HasAddress(obj)) { // Returns either the forwarding address or nullptr. return GetForwardingAddressInFromSpace(obj); - } else if (to_space_->HasAddress(obj)) { - // Should be unlikely. - // Already forwarded, must be marked. - return obj; + } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) || + to_space_->HasAddress(obj)) { + return obj; // Already forwarded, must be marked. } return mark_bitmap_->Test(obj) ? obj : nullptr; } @@ -777,9 +755,9 @@ void SemiSpace::FinishPhase() { if (generational_) { // Decide whether to do a whole heap collection or a bump pointer // only space collection at the next collection by updating - // whole_heap_collection. - if (!whole_heap_collection_) { - // Enable whole_heap_collection if the bytes promoted since the + // collect_from_space_only_. + if (collect_from_space_only_) { + // Disable collect_from_space_only_ if the bytes promoted since the // last whole heap collection or the large object bytes // allocated exceeds a threshold. bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; @@ -792,14 +770,14 @@ void SemiSpace::FinishPhase() { current_los_bytes_allocated >= last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold; if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) { - whole_heap_collection_ = true; + collect_from_space_only_ = false; } } else { // Reset the counters. bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; large_object_bytes_allocated_at_last_whole_heap_collection_ = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated(); - whole_heap_collection_ = false; + collect_from_space_only_ = true; } } // Clear all of the spaces' mark bitmaps. diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index 7f6d1dc01f..71a83f2624 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -244,9 +244,14 @@ class SemiSpace : public GarbageCollector { // large objects were allocated at the last whole heap collection. uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_; - // Used for the generational mode. When true, collect the whole - // heap. When false, collect only the bump pointer spaces. - bool whole_heap_collection_; + // Used for generational mode. When true, we only collect the from_space_. + bool collect_from_space_only_; + + // The space which we are promoting into, only used for GSS. + space::ContinuousMemMapAllocSpace* promo_dest_space_; + + // The space which we copy to if the to_space_ is full. + space::ContinuousMemMapAllocSpace* fallback_space_; // How many objects and bytes we moved, used so that we don't need to Get the size of the // to_space_ when calculating how many objects and bytes we freed. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 4ec9bc2f6a..0934921929 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -58,7 +58,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/reference-inl.h" -#include "object_utils.h" #include "os.h" #include "reflection.h" #include "runtime.h" @@ -1126,7 +1125,13 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack, return false; } -void Heap::DumpSpaces(std::ostream& stream) { +std::string Heap::DumpSpaces() const { + std::ostringstream oss; + DumpSpaces(oss); + return oss.str(); +} + +void Heap::DumpSpaces(std::ostream& stream) const { for (const auto& space : continuous_spaces_) { accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); @@ -1159,10 +1164,7 @@ void Heap::VerifyObjectBody(mirror::Object* obj) { if (verify_object_mode_ > kVerifyObjectModeFast) { // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock. - if (!IsLiveObjectLocked(obj)) { - DumpSpaces(); - LOG(FATAL) << "Object is dead: " << obj; - } + CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces(); } } @@ -2354,7 +2356,7 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) { accounting::RememberedSet* remembered_set = table_pair.second; remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": "); } - DumpSpaces(); + DumpSpaces(LOG(ERROR)); } return visitor.GetFailureCount(); } @@ -2471,12 +2473,7 @@ bool Heap::VerifyMissingCardMarks() { visitor(*it); } } - - if (visitor.Failed()) { - DumpSpaces(); - return false; - } - return true; + return !visitor.Failed(); } void Heap::SwapStacks(Thread* self) { @@ -2574,9 +2571,8 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); SwapStacks(self); // Sort the live stack so that we can quickly binary search it later. - if (!VerifyMissingCardMarks()) { - LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; - } + CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName() + << " missing card mark verification failed\n" << DumpSpaces(); SwapStacks(self); } if (verify_mod_union_table_) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index b20795369e..0da113f2c6 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -539,7 +539,8 @@ class Heap { } } - void DumpSpaces(std::ostream& stream = LOG(INFO)); + std::string DumpSpaces() const WARN_UNUSED; + void DumpSpaces(std::ostream& stream) const; // Dump object should only be used by the signal handler. void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index fff4df1e0e..71c8eb5a84 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -407,11 +407,11 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { // Clear the space back to an empty space. virtual void Clear() = 0; - accounting::ContinuousSpaceBitmap* GetLiveBitmap() const { + accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE { return live_bitmap_.get(); } - accounting::ContinuousSpaceBitmap* GetMarkBitmap() const { + accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE { return mark_bitmap_.get(); } diff --git a/runtime/handle.h b/runtime/handle.h index 7e13601af9..f70faf40d0 100644 --- a/runtime/handle.h +++ b/runtime/handle.h @@ -28,29 +28,40 @@ class Thread; template<class T> class Handle; +// Handles are memory locations that contain GC roots. As the mirror::Object*s within a handle are +// GC visible then the GC may move the references within them, something that couldn't be done with +// a wrap pointer. Handles are generally allocated within HandleScopes. ConstHandle is a super-class +// of Handle and doesn't support assignment operations. template<class T> class ConstHandle { public: ConstHandle() : reference_(nullptr) { } - ConstHandle(const ConstHandle<T>& handle) ALWAYS_INLINE : reference_(handle.reference_) { + + ALWAYS_INLINE ConstHandle(const ConstHandle<T>& handle) : reference_(handle.reference_) { } - ConstHandle<T>& operator=(const ConstHandle<T>& handle) ALWAYS_INLINE { + + ALWAYS_INLINE ConstHandle<T>& operator=(const ConstHandle<T>& handle) { reference_ = handle.reference_; return *this; } - explicit ConstHandle(StackReference<T>* reference) ALWAYS_INLINE : reference_(reference) { + + ALWAYS_INLINE explicit ConstHandle(StackReference<T>* reference) : reference_(reference) { } - T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + + ALWAYS_INLINE T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return *Get(); } - T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + + ALWAYS_INLINE T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return Get(); } - T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + + ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return reference_->AsMirrorPtr(); } - jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + + ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) { // Special case so that we work with NullHandles. return nullptr; @@ -73,8 +84,8 @@ class ConstHandle { StackReference<T>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { return reference_; } - const StackReference<T>* GetReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { + ALWAYS_INLINE const StackReference<T>* GetReference() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return reference_; } @@ -86,47 +97,54 @@ class ConstHandle { template<size_t kNumReferences> friend class StackHandleScope; }; +// Handles that support assignment. template<class T> class Handle : public ConstHandle<T> { public: Handle() { } - Handle(const Handle<T>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE + + ALWAYS_INLINE Handle(const Handle<T>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstHandle<T>(handle.reference_) { } - Handle<T>& operator=(const Handle<T>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { + + ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ConstHandle<T>::operator=(handle); return *this; } - explicit Handle(StackReference<T>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE : ConstHandle<T>(reference) { + + ALWAYS_INLINE explicit Handle(StackReference<T>* reference) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : ConstHandle<T>(reference) { } - T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + + ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { StackReference<T>* ref = ConstHandle<T>::GetReference(); T* const old = ref->AsMirrorPtr(); ref->Assign(reference); return old; } + template<typename S> + explicit Handle(const Handle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : ConstHandle<T>(handle) { + } + protected: template<typename S> explicit Handle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstHandle<T>(reference) { } - template<typename S> - explicit Handle(const Handle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : ConstHandle<T>(handle) { - } private: friend class BuildGenericJniFrameVisitor; - template<class S> friend class Handle; friend class HandleScope; template<class S> friend class HandleWrapper; template<size_t kNumReferences> friend class StackHandleScope; }; +// A special case of Handle that only holds references to null. template<class T> class NullHandle : public Handle<T> { public: diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index 62c761475a..7bc811db87 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -17,7 +17,7 @@ #ifndef ART_RUNTIME_HANDLE_SCOPE_INL_H_ #define ART_RUNTIME_HANDLE_SCOPE_INL_H_ -#include "handle_scope-inl.h" +#include "handle_scope.h" #include "handle.h" #include "thread.h" diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index 2fd42d2350..42ef77927c 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -30,8 +30,9 @@ class Object; class Thread; -// HandleScopes can be allocated within the bridge frame between managed and native code backed by -// stack storage or manually allocated in native. +// HandleScopes are scoped objects containing a number of Handles. They are used to allocate +// handles, for these handles (and the objects contained within them) to be visible/roots for the +// GC. It is most common to stack allocate HandleScopes using StackHandleScope. class PACKED(4) HandleScope { public: ~HandleScope() {} @@ -131,6 +132,7 @@ class PACKED(4) HandleScope { private: template<size_t kNumReferences> friend class StackHandleScope; + DISALLOW_COPY_AND_ASSIGN(HandleScope); }; @@ -153,7 +155,7 @@ class HandleWrapper : public Handle<T> { // Scoped handle storage of a fixed size that is usually stack allocated. template<size_t kNumReferences> -class PACKED(4) StackHandleScope : public HandleScope { +class PACKED(4) StackHandleScope FINAL : public HandleScope { public: explicit StackHandleScope(Thread* self); ~StackHandleScope(); @@ -182,20 +184,29 @@ class PACKED(4) StackHandleScope : public HandleScope { template<class T> Handle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { SetReference(pos_, object); - return Handle<T>(GetHandle(pos_++)); + Handle<T> h(GetHandle(pos_)); + pos_++; + return h; } template<class T> HandleWrapper<T> NewHandleWrapper(T** object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { SetReference(pos_, *object); - Handle<T> h(GetHandle(pos_++)); + Handle<T> h(GetHandle(pos_)); + pos_++; return HandleWrapper<T>(object, h); } private: - // references_storage_ needs to be first so that it matches the address of references_. + // References_storage_ needs to be first so that it appears in the same location as + // HandleScope::references_. StackReference<mirror::Object> references_storage_[kNumReferences]; + + // The thread that the stack handle scope is a linked list upon. The stack handle scope will + // push and pop itself from this thread. Thread* const self_; + + // Position new handles will be created. size_t pos_; template<size_t kNumRefs> friend class StackHandleScope; diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index 33339f8f6c..7e3b6bab26 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -52,7 +52,6 @@ #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "os.h" #include "safe_map.h" #include "scoped_thread_state_change.h" diff --git a/runtime/implicit_check_options.h b/runtime/implicit_check_options.h deleted file mode 100644 index a6595b88e0..0000000000 --- a/runtime/implicit_check_options.h +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (C) 2014 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_ -#define ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_ - -#include "gc/heap.h" -#include "gc/space/image_space.h" -#include "instruction_set.h" -#include "runtime.h" - -#include <string> - -namespace art { - -class ImplicitCheckOptions { - public: - static constexpr const char* kImplicitChecksOatHeaderKey = "implicit-checks"; - - static std::string Serialize(bool explicit_null_checks, bool explicit_stack_overflow_checks, - bool explicit_suspend_checks) { - char tmp[4]; - tmp[0] = explicit_null_checks ? 'N' : 'n'; - tmp[1] = explicit_stack_overflow_checks ? 'O' : 'o'; - tmp[2] = explicit_suspend_checks ? 'S' : 's'; - tmp[3] = 0; - return std::string(tmp); - } - - static bool Parse(const char* str, bool* explicit_null_checks, - bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) { - if (str != nullptr && str[0] != 0 && str[1] != 0 && str[2] != 0 && - (str[0] == 'n' || str[0] == 'N') && - (str[1] == 'o' || str[1] == 'O') && - (str[2] == 's' || str[2] == 'S')) { - *explicit_null_checks = str[0] == 'N'; - *explicit_stack_overflow_checks = str[1] == 'O'; - *explicit_suspend_checks = str[2] == 'S'; - return true; - } else { - return false; - } - } - - // Check whether the given flags are correct with respect to the current runtime and the given - // executable flag. - static bool CheckRuntimeSupport(bool executable, bool explicit_null_checks, - bool explicit_stack_overflow_checks, - bool explicit_suspend_checks, std::string* error_msg) { - if (!executable) { - // Not meant to be run, i.e., either we are compiling or dumping. Just accept. - return true; - } - - Runtime* runtime = Runtime::Current(); - // We really should have a runtime. - DCHECK_NE(static_cast<Runtime*>(nullptr), runtime); - - if (runtime->GetInstrumentation()->IsForcedInterpretOnly()) { - // We are an interpret-only environment. Ignore the check value. - return true; - } - - if (runtime->ExplicitNullChecks() != explicit_null_checks || - runtime->ExplicitStackOverflowChecks() != explicit_stack_overflow_checks || - runtime->ExplicitSuspendChecks() != explicit_suspend_checks) { - if (error_msg != nullptr) { - // Create an error message. - - std::ostringstream os; - os << "Explicit check options do not match runtime: "; - os << runtime->ExplicitNullChecks() << " vs " << explicit_null_checks << " | "; - os << runtime->ExplicitStackOverflowChecks() << " vs " << explicit_stack_overflow_checks - << " | "; - os << runtime->ExplicitSuspendChecks() << " vs " << explicit_suspend_checks; - - *error_msg = os.str(); - } - - // Currently we do not create correct images when pre-opting, so the emulator will fail with - // this change. Once the change is in the tree, REMOVE. - if (true) { - // At least try to log it, though. - if (error_msg != nullptr) { - LOG(WARNING) << *error_msg; - } - return true; - } else { - return false; - } - } - - // Accepted. - return true; - } - - // Check (and override) the flags depending on current support in the ISA. - // Right now will reset all flags to explicit except on ARM. - static void CheckISASupport(InstructionSet isa, bool* explicit_null_checks, - bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) { - switch (isa) { - case kArm: - case kThumb2: - break; // All checks implemented, leave as is. - - default: // No checks implemented, reset all to explicit checks. - *explicit_null_checks = true; - *explicit_stack_overflow_checks = true; - *explicit_suspend_checks = true; - } - } - - static bool CheckForCompiling(InstructionSet host, InstructionSet target, - bool* explicit_null_checks, bool* explicit_stack_overflow_checks, - bool* explicit_suspend_checks) { - // Check the boot image settings. - Runtime* runtime = Runtime::Current(); - if (runtime != nullptr) { - gc::space::ImageSpace* ispace = runtime->GetHeap()->GetImageSpace(); - if (ispace != nullptr) { - const OatFile* oat_file = ispace->GetOatFile(); - if (oat_file != nullptr) { - const char* v = oat_file->GetOatHeader().GetStoreValueByKey(kImplicitChecksOatHeaderKey); - if (!Parse(v, explicit_null_checks, explicit_stack_overflow_checks, - explicit_suspend_checks)) { - LOG(FATAL) << "Should have been able to parse boot image implicit check values"; - } - return true; - } - } - } - - // Check the current runtime. - bool cross_compiling = true; - switch (host) { - case kArm: - case kThumb2: - cross_compiling = target != kArm && target != kThumb2; - break; - default: - cross_compiling = host != target; - break; - } - if (!cross_compiling) { - Runtime* runtime = Runtime::Current(); - *explicit_null_checks = runtime->ExplicitNullChecks(); - *explicit_stack_overflow_checks = runtime->ExplicitStackOverflowChecks(); - *explicit_suspend_checks = runtime->ExplicitSuspendChecks(); - return true; - } - - // Give up. - return false; - } -}; - -} // namespace art - -#endif // ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_ diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index e3d32bb75e..f4eaa61c1e 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -35,7 +35,6 @@ #if !defined(ART_USE_PORTABLE_COMPILER) #include "entrypoints/quick/quick_entrypoints.h" #endif -#include "object_utils.h" #include "os.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 5a03601bce..b35da0cf1b 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -15,6 +15,8 @@ */ #include "interpreter_common.h" + +#include "field_helper.h" #include "mirror/array-inl.h" namespace art { @@ -35,7 +37,6 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst CHECK(self->IsExceptionPending()); return false; } - f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self); Object* obj; if (is_static) { obj = f->GetDeclaringClass(); @@ -46,6 +47,7 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst return false; } } + f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self); // Report this field access to instrumentation if needed. instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); if (UNLIKELY(instrumentation->HasFieldReadListeners())) { @@ -211,7 +213,6 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction CHECK(self->IsExceptionPending()); return false; } - f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self); Object* obj; if (is_static) { obj = f->GetDeclaringClass(); @@ -223,6 +224,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction return false; } } + f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self); uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data); // Report this field access to instrumentation if needed. Since we only have the offset of // the field from the base of the object, we need to look for it first. diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index db42eb06f1..1bcd27e2d4 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -30,6 +30,7 @@ #include "entrypoints/entrypoint_utils-inl.h" #include "gc/accounting/card_table-inl.h" #include "handle_scope-inl.h" +#include "method_helper-inl.h" #include "nth_caller_visitor.h" #include "mirror/art_field-inl.h" #include "mirror/art_method.h" @@ -39,7 +40,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/string-inl.h" -#include "object_utils.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index e97512b6cf..f9c7ec692c 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -41,7 +41,6 @@ #include "mirror/object_array-inl.h" #include "mirror/string-inl.h" #include "mirror/throwable.h" -#include "object_utils.h" #include "parsed_options.h" #include "reflection.h" #include "runtime.h" diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h new file mode 100644 index 0000000000..4f95a28e81 --- /dev/null +++ b/runtime/method_helper-inl.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_METHOD_HELPER_INL_H_ +#define ART_RUNTIME_METHOD_HELPER_INL_H_ + +#include "method_helper.h" + +#include "class_linker.h" +#include "mirror/object_array.h" +#include "runtime.h" +#include "thread-inl.h" + +namespace art { + +inline mirror::Class* MethodHelper::GetClassFromTypeIdx(uint16_t type_idx, bool resolve) { + mirror::ArtMethod* method = GetMethod(); + mirror::Class* type = method->GetDexCacheResolvedTypes()->Get(type_idx); + if (type == nullptr && resolve) { + type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); + CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); + } + return type; +} + +inline mirror::Class* MethodHelper::GetReturnType(bool resolve) { + mirror::ArtMethod* method = GetMethod(); + const DexFile* dex_file = method->GetDexFile(); + const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex()); + const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id); + uint16_t return_type_idx = proto_id.return_type_idx_; + return GetClassFromTypeIdx(return_type_idx, resolve); +} + +inline mirror::String* MethodHelper::ResolveString(uint32_t string_idx) { + mirror::ArtMethod* method = GetMethod(); + mirror::String* s = method->GetDexCacheStrings()->Get(string_idx); + if (UNLIKELY(s == nullptr)) { + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); + s = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), string_idx, + dex_cache); + } + return s; +} + +} // namespace art + +#endif // ART_RUNTIME_METHOD_HELPER_INL_H_ diff --git a/runtime/method_helper.cc b/runtime/method_helper.cc new file mode 100644 index 0000000000..1bd2f9020c --- /dev/null +++ b/runtime/method_helper.cc @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "method_helper-inl.h" + +#include "class_linker.h" +#include "dex_file-inl.h" +#include "handle_scope-inl.h" +#include "mirror/art_method-inl.h" +#include "mirror/dex_cache.h" +#include "runtime.h" + +namespace art { + +mirror::String* MethodHelper::GetNameAsString(Thread* self) { + const DexFile* dex_file = method_->GetDexFile(); + mirror::ArtMethod* method = method_->GetInterfaceMethodIfProxy(); + uint32_t dex_method_idx = method->GetDexMethodIndex(); + const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx); + StackHandleScope<1> hs(self); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); + return Runtime::Current()->GetClassLinker()->ResolveString(*dex_file, method_id.name_idx_, + dex_cache); +} + +bool MethodHelper::HasSameNameAndSignature(MethodHelper* other) { + const DexFile* dex_file = method_->GetDexFile(); + const DexFile::MethodId& mid = dex_file->GetMethodId(GetMethod()->GetDexMethodIndex()); + if (method_->GetDexCache() == other->method_->GetDexCache()) { + const DexFile::MethodId& other_mid = + dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex()); + return mid.name_idx_ == other_mid.name_idx_ && mid.proto_idx_ == other_mid.proto_idx_; + } + const DexFile* other_dex_file = other->method_->GetDexFile(); + const DexFile::MethodId& other_mid = + other_dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex()); + if (!DexFileStringEquals(dex_file, mid.name_idx_, other_dex_file, other_mid.name_idx_)) { + return false; // Name mismatch. + } + return dex_file->GetMethodSignature(mid) == other_dex_file->GetMethodSignature(other_mid); +} + +bool MethodHelper::HasSameSignatureWithDifferentClassLoaders(MethodHelper* other) { + if (UNLIKELY(GetReturnType() != other->GetReturnType())) { + return false; + } + const DexFile::TypeList* types = method_->GetParameterTypeList(); + const DexFile::TypeList* other_types = other->method_->GetParameterTypeList(); + if (types == nullptr) { + return (other_types == nullptr) || (other_types->Size() == 0); + } else if (UNLIKELY(other_types == nullptr)) { + return types->Size() == 0; + } + uint32_t num_types = types->Size(); + if (UNLIKELY(num_types != other_types->Size())) { + return false; + } + for (uint32_t i = 0; i < num_types; ++i) { + mirror::Class* param_type = GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_); + mirror::Class* other_param_type = + other->GetClassFromTypeIdx(other_types->GetTypeItem(i).type_idx_); + if (UNLIKELY(param_type != other_param_type)) { + return false; + } + } + return true; +} + +uint32_t MethodHelper::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtMethod* method = GetMethod(); + const DexFile* dexfile = method->GetDexFile(); + if (dexfile == &other_dexfile) { + return method->GetDexMethodIndex(); + } + const DexFile::MethodId& mid = dexfile->GetMethodId(method->GetDexMethodIndex()); + const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_); + const DexFile::StringId* other_descriptor = + other_dexfile.FindStringId(mid_declaring_class_descriptor); + if (other_descriptor != nullptr) { + const DexFile::TypeId* other_type_id = + other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor)); + if (other_type_id != nullptr) { + const char* mid_name = dexfile->GetMethodName(mid); + const DexFile::StringId* other_name = other_dexfile.FindStringId(mid_name); + if (other_name != nullptr) { + uint16_t other_return_type_idx; + std::vector<uint16_t> other_param_type_idxs; + bool success = other_dexfile.CreateTypeList( + dexfile->GetMethodSignature(mid).ToString(), &other_return_type_idx, + &other_param_type_idxs); + if (success) { + const DexFile::ProtoId* other_sig = + other_dexfile.FindProtoId(other_return_type_idx, other_param_type_idxs); + if (other_sig != nullptr) { + const DexFile::MethodId* other_mid = other_dexfile.FindMethodId( + *other_type_id, *other_name, *other_sig); + if (other_mid != nullptr) { + return other_dexfile.GetIndexForMethodId(*other_mid); + } + } + } + } + } + } + return DexFile::kDexNoIndex; +} + +uint32_t MethodHelper::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile, + uint32_t name_and_signature_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtMethod* method = GetMethod(); + const DexFile* dexfile = method->GetDexFile(); + const uint32_t dex_method_idx = method->GetDexMethodIndex(); + const DexFile::MethodId& mid = dexfile->GetMethodId(dex_method_idx); + const DexFile::MethodId& name_and_sig_mid = other_dexfile.GetMethodId(name_and_signature_idx); + DCHECK_STREQ(dexfile->GetMethodName(mid), other_dexfile.GetMethodName(name_and_sig_mid)); + DCHECK_EQ(dexfile->GetMethodSignature(mid), other_dexfile.GetMethodSignature(name_and_sig_mid)); + if (dexfile == &other_dexfile) { + return dex_method_idx; + } + const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_); + const DexFile::StringId* other_descriptor = + other_dexfile.FindStringId(mid_declaring_class_descriptor); + if (other_descriptor != nullptr) { + const DexFile::TypeId* other_type_id = + other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor)); + if (other_type_id != nullptr) { + const DexFile::MethodId* other_mid = other_dexfile.FindMethodId( + *other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_), + other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_)); + if (other_mid != nullptr) { + return other_dexfile.GetIndexForMethodId(*other_mid); + } + } + } + return DexFile::kDexNoIndex; +} + +} // namespace art diff --git a/runtime/method_helper.h b/runtime/method_helper.h new file mode 100644 index 0000000000..62465be513 --- /dev/null +++ b/runtime/method_helper.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_METHOD_HELPER_H_ +#define ART_RUNTIME_METHOD_HELPER_H_ + +#include "base/macros.h" +#include "handle.h" +#include "mirror/art_method.h" +#include "primitive.h" + +namespace art { + +class MethodHelper { + public: + explicit MethodHelper(Handle<mirror::ArtMethod> m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : method_(m), shorty_(nullptr), shorty_len_(0) { + SetMethod(m.Get()); + } + + void ChangeMethod(mirror::ArtMethod* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(new_m != nullptr); + SetMethod(new_m); + shorty_ = nullptr; + } + + mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return method_->GetInterfaceMethodIfProxy(); + } + + mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const char* result = shorty_; + if (result == nullptr) { + result = method_->GetShorty(&shorty_len_); + shorty_ = result; + } + return result; + } + + uint32_t GetShortyLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (shorty_ == nullptr) { + GetShorty(); + } + return shorty_len_; + } + + // Counts the number of references in the parameter list of the corresponding method. + // Note: Thus does _not_ include "this" for non-static methods. + uint32_t GetNumberOfReferenceArgsWithoutReceiver() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const char* shorty = GetShorty(); + uint32_t refs = 0; + for (uint32_t i = 1; i < shorty_len_ ; ++i) { + if (shorty[i] == 'L') { + refs++; + } + } + + return refs; + } + + // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large + // number of bugs at call sites. + mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // "1 +" because the first in Args is the receiver. + // "- 1" because we don't count the return type. + return (method_->IsStatic() ? 0 : 1) + GetShortyLength() - 1; + } + + // Get the primitive type associated with the given parameter. + Primitive::Type GetParamPrimitiveType(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CHECK_LT(param, NumArgs()); + if (GetMethod()->IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return Primitive::kPrimNot; + } + return Primitive::GetType(GetShorty()[param]); + } + + // Is the specified parameter a long or double, where parameter 0 is 'this' for instance methods. + bool IsParamALongOrDouble(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Primitive::Type type = GetParamPrimitiveType(param); + return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; + } + + // Is the specified parameter a reference, where parameter 0 is 'this' for instance methods. + bool IsParamAReference(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetParamPrimitiveType(param) == Primitive::kPrimNot; + } + + bool HasSameNameAndSignature(MethodHelper* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool HasSameSignatureWithDifferentClassLoaders(MethodHelper* other) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + mirror::Class* GetClassFromTypeIdx(uint16_t type_idx, bool resolve = true) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + mirror::String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // The name_and_signature_idx MUST point to a MethodId with the same name and signature in the + // other_dexfile, such as the method index used to resolve this method in the other_dexfile. + uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile, + uint32_t name_and_signature_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + private: + // Set the method_ field, for proxy methods looking up the interface method via the resolved + // methods table. + void SetMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + method_.Assign(method); + } + + Handle<mirror::ArtMethod> method_; + const char* shorty_; + uint32_t shorty_len_; + + DISALLOW_COPY_AND_ASSIGN(MethodHelper); +}; + +} // namespace art + +#endif // ART_RUNTIME_METHOD_HELPER_H_ diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc index f7b573729b..63f9860278 100644 --- a/runtime/mirror/array.cc +++ b/runtime/mirror/array.cc @@ -25,7 +25,6 @@ #include "object-inl.h" #include "object_array.h" #include "object_array-inl.h" -#include "object_utils.h" #include "handle_scope-inl.h" #include "thread.h" #include "utils.h" diff --git a/runtime/mirror/art_field-inl.h b/runtime/mirror/art_field-inl.h index 90247edc76..00bed92cb3 100644 --- a/runtime/mirror/art_field-inl.h +++ b/runtime/mirror/art_field-inl.h @@ -20,10 +20,10 @@ #include "art_field.h" #include "base/logging.h" +#include "dex_cache.h" #include "gc/accounting/card_table-inl.h" #include "jvalue.h" #include "object-inl.h" -#include "object_utils.h" #include "primitive.h" namespace art { diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc index f2729f660e..da21dfef06 100644 --- a/runtime/mirror/art_field.cc +++ b/runtime/mirror/art_field.cc @@ -20,7 +20,6 @@ #include "gc/accounting/card_table-inl.h" #include "object-inl.h" #include "object_array-inl.h" -#include "object_utils.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "utils.h" diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h index a5b5df6137..01b05a6e6f 100644 --- a/runtime/mirror/art_method-inl.h +++ b/runtime/mirror/art_method-inl.h @@ -19,11 +19,13 @@ #include "art_method.h" +#include "class_linker.h" +#include "dex_cache.h" #include "dex_file.h" #include "entrypoints/entrypoint_utils.h" +#include "method_helper.h" #include "object-inl.h" #include "object_array.h" -#include "object_utils.h" #include "oat.h" #include "quick/quick_method_frame_info.h" #include "read_barrier-inl.h" diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc index 86bac243a0..167f848824 100644 --- a/runtime/mirror/art_method.cc +++ b/runtime/mirror/art_method.cc @@ -27,12 +27,12 @@ #include "interpreter/interpreter.h" #include "jni_internal.h" #include "mapping_table.h" -#include "object-inl.h" -#include "object_array.h" +#include "method_helper.h" #include "object_array-inl.h" +#include "object_array.h" +#include "object-inl.h" #include "scoped_thread_state_change.h" #include "string.h" -#include "object_utils.h" #include "well_known_classes.h" namespace art { diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index be05fb8a9b..fadf80ebcf 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -18,17 +18,16 @@ #include "art_field-inl.h" #include "art_method-inl.h" -#include "class-inl.h" #include "class_linker.h" #include "class_loader.h" +#include "class-inl.h" #include "dex_cache.h" #include "dex_file-inl.h" #include "gc/accounting/card_table-inl.h" -#include "object-inl.h" +#include "handle_scope-inl.h" #include "object_array-inl.h" -#include "object_utils.h" +#include "object-inl.h" #include "runtime.h" -#include "handle_scope-inl.h" #include "thread.h" #include "throwable.h" #include "utils.h" diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index bc5cbcb4f3..961bc64819 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -24,13 +24,13 @@ #include "class.h" #include "class-inl.h" #include "class_linker-inl.h" +#include "field_helper.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" #include "iftable-inl.h" #include "monitor.h" #include "object-inl.h" #include "object_array-inl.h" -#include "object_utils.h" #include "runtime.h" #include "handle_scope-inl.h" #include "throwable.h" diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc index 6efc9e2f54..1c3f1ed5bf 100644 --- a/runtime/mirror/throwable.cc +++ b/runtime/mirror/throwable.cc @@ -23,7 +23,6 @@ #include "object-inl.h" #include "object_array.h" #include "object_array-inl.h" -#include "object_utils.h" #include "stack_trace_element.h" #include "utils.h" #include "well_known_classes.h" diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 5633a77b6f..4b26edac61 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -28,7 +28,6 @@ #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "scoped_thread_state_change.h" #include "thread.h" #include "thread_list.h" @@ -746,7 +745,11 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { contention_count++; Runtime* runtime = Runtime::Current(); if (contention_count <= runtime->GetMaxSpinsBeforeThinkLockInflation()) { - NanoSleep(1000); // Sleep for 1us and re-attempt. + // TODO: Consider switch thread state to kBlocked when we are yielding. + // Use sched_yield instead of NanoSleep since NanoSleep can wait much longer than the + // parameter you pass in. This can cause thread suspension to take excessively long + // make long pauses. See b/16307460. + sched_yield(); } else { contention_count = 0; InflateThinLocked(self, h_obj, lock_word, 0); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index f1a987f80a..b0b64aac16 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -30,7 +30,6 @@ #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "runtime.h" #include "scoped_fast_native_object_access.h" #include "scoped_thread_state_change.h" diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index cede1a0899..e577c2c960 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -21,7 +21,6 @@ #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "scoped_thread_state_change.h" #include "scoped_fast_native_object_access.h" #include "ScopedLocalRef.h" diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc index eae4584019..f94e42b260 100644 --- a/runtime/native/java_lang_reflect_Array.cc +++ b/runtime/native/java_lang_reflect_Array.cc @@ -20,7 +20,6 @@ #include "jni_internal.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "scoped_fast_native_object_access.h" #include "handle_scope-inl.h" diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc index 1981bfd5ab..34cb93ae94 100644 --- a/runtime/native/java_lang_reflect_Constructor.cc +++ b/runtime/native/java_lang_reflect_Constructor.cc @@ -20,7 +20,6 @@ #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "reflection.h" #include "scoped_fast_native_object_access.h" #include "well_known_classes.h" diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index 3564dfdf47..3903ffcd6a 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -18,11 +18,11 @@ #include "class_linker-inl.h" #include "common_throws.h" #include "dex_file-inl.h" +#include "field_helper.h" #include "jni_internal.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" -#include "object_utils.h" #include "reflection.h" #include "scoped_fast_native_object_access.h" diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc index ac602acb21..f029b16746 100644 --- a/runtime/native/java_lang_reflect_Method.cc +++ b/runtime/native/java_lang_reflect_Method.cc @@ -21,7 +21,6 @@ #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "reflection.h" #include "scoped_fast_native_object_access.h" #include "well_known_classes.h" diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 9cefcb6b1a..86c1baef27 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -23,7 +23,6 @@ #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "elf_file.h" -#include "implicit_check_options.h" #include "oat.h" #include "mirror/art_method.h" #include "mirror/art_method-inl.h" @@ -80,36 +79,7 @@ OatFile* OatFile::Open(const std::string& filename, } ret.reset(OpenElfFile(file.get(), location, requested_base, false, executable, error_msg)); } - - if (ret.get() == nullptr) { - return nullptr; - } - - // Embedded options check. Right now only implicit checks. - // TODO: Refactor to somewhere else? - const char* implicit_checks_value = ret->GetOatHeader(). - GetStoreValueByKey(ImplicitCheckOptions::kImplicitChecksOatHeaderKey); - - if (implicit_checks_value == nullptr) { - *error_msg = "Did not find implicit checks value."; - return nullptr; - } - - bool explicit_null_checks, explicit_so_checks, explicit_suspend_checks; - if (ImplicitCheckOptions::Parse(implicit_checks_value, &explicit_null_checks, - &explicit_so_checks, &explicit_suspend_checks)) { - // Check whether the runtime agrees with the recorded checks. - if (ImplicitCheckOptions::CheckRuntimeSupport(executable, explicit_null_checks, - explicit_so_checks, explicit_suspend_checks, - error_msg)) { - return ret.release(); - } else { - return nullptr; - } - } else { - *error_msg = "Failed parsing implicit check options."; - return nullptr; - } + return ret.release(); } OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::string* error_msg) { diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h index 0e6f4d80ae..592deed1a7 100644 --- a/runtime/object_callbacks.h +++ b/runtime/object_callbacks.h @@ -24,6 +24,8 @@ // For size_t. #include <stdlib.h> +#include "base/macros.h" + namespace art { namespace mirror { class Class; @@ -57,8 +59,7 @@ typedef void (RootCallback)(mirror::Object** root, void* arg, uint32_t thread_id // A callback for visiting an object in the heap. typedef void (ObjectCallback)(mirror::Object* obj, void* arg); // A callback used for marking an object, returns the new address of the object if the object moved. -typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) - __attribute__((warn_unused_result)); +typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED; // A callback for verifying roots. typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg, const StackVisitor* visitor, RootType root_type); @@ -68,13 +69,12 @@ typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Refe // A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new // address the object (if the object didn't move, returns the object input parameter). -typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) - __attribute__((warn_unused_result)); +typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED; // Returns true if the object in the heap reference is marked, if it is marked and has moved the // callback updates the heap reference contain the new value. typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object, - void* arg) __attribute__((warn_unused_result)); + void* arg) WARN_UNUSED; typedef void (ProcessMarkStackCallback)(void* arg); } // namespace art diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc new file mode 100644 index 0000000000..f7accc0f31 --- /dev/null +++ b/runtime/object_lock.cc @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "object_lock.h" + +#include "mirror/object-inl.h" +#include "monitor.h" + +namespace art { + +template <typename T> +ObjectLock<T>::ObjectLock(Thread* self, Handle<T> object) : self_(self), obj_(object) { + CHECK(object.Get() != nullptr); + obj_->MonitorEnter(self_); +} + +template <typename T> +ObjectLock<T>::~ObjectLock() { + obj_->MonitorExit(self_); +} + +template <typename T> +void ObjectLock<T>::WaitIgnoringInterrupts() { + Monitor::Wait(self_, obj_.Get(), 0, 0, false, kWaiting); +} + +template <typename T> +void ObjectLock<T>::Notify() { + obj_->Notify(self_); +} + +template <typename T> +void ObjectLock<T>::NotifyAll() { + obj_->NotifyAll(self_); +} + +template class ObjectLock<mirror::Class>; +template class ObjectLock<mirror::Object>; + +} // namespace art diff --git a/runtime/object_lock.h b/runtime/object_lock.h new file mode 100644 index 0000000000..acddc03e29 --- /dev/null +++ b/runtime/object_lock.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_OBJECT_LOCK_H_ +#define ART_RUNTIME_OBJECT_LOCK_H_ + +#include "base/macros.h" +#include "base/mutex.h" +#include "handle.h" + +namespace art { + +class Thread; + +template <typename T> +class ObjectLock { + public: + ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + private: + Thread* const self_; + Handle<T> const obj_; + + DISALLOW_COPY_AND_ASSIGN(ObjectLock); +}; + +} // namespace art + +#endif // ART_RUNTIME_OBJECT_LOCK_H_ diff --git a/runtime/object_utils.h b/runtime/object_utils.h deleted file mode 100644 index 4379b4ad2a..0000000000 --- a/runtime/object_utils.h +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_OBJECT_UTILS_H_ -#define ART_RUNTIME_OBJECT_UTILS_H_ - -#include "class_linker.h" -#include "dex_file.h" -#include "monitor.h" -#include "mirror/art_field.h" -#include "mirror/art_method.h" -#include "mirror/class.h" -#include "mirror/dex_cache.h" -#include "mirror/iftable.h" -#include "mirror/string.h" - -#include "runtime.h" -#include "handle_scope-inl.h" - -#include <string> - -namespace art { - -template <typename T> -class ObjectLock { - public: - ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : self_(self), obj_(object) { - CHECK(object.Get() != nullptr); - obj_->MonitorEnter(self_); - } - - ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - obj_->MonitorExit(self_); - } - - void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Monitor::Wait(self_, obj_.Get(), 0, 0, false, kWaiting); - } - - void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - obj_->Notify(self_); - } - - void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - obj_->NotifyAll(self_); - } - - private: - Thread* const self_; - Handle<T> const obj_; - DISALLOW_COPY_AND_ASSIGN(ObjectLock); -}; - -class FieldHelper { - public: - explicit FieldHelper(Handle<mirror::ArtField> f) : field_(f) {} - - void ChangeField(mirror::ArtField* new_f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(new_f != nullptr); - field_.Assign(new_f); - } - - mirror::ArtField* GetField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return field_.Get(); - } - - mirror::Class* GetType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uint32_t field_index = field_->GetDexFieldIndex(); - if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) { - return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), - field_->GetTypeDescriptor()); - } - const DexFile* dex_file = field_->GetDexFile(); - const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index); - mirror::Class* type = field_->GetDexCache()->GetResolvedType(field_id.type_idx_); - if (resolve && (type == nullptr)) { - type = Runtime::Current()->GetClassLinker()->ResolveType(field_id.type_idx_, field_.Get()); - CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); - } - return type; - } - - // The returned const char* is only guaranteed to be valid for the lifetime of the FieldHelper. - // If you need it longer, copy it into a std::string. - const char* GetDeclaringClassDescriptor() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uint32_t field_index = field_->GetDexFieldIndex(); - if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) { - DCHECK(field_->IsStatic()); - DCHECK_LT(field_index, 2U); - // 0 == Class[] interfaces; 1 == Class[][] throws; - declaring_class_descriptor_ = field_->GetDeclaringClass()->GetDescriptor(); - return declaring_class_descriptor_.c_str(); - } - const DexFile* dex_file = field_->GetDexFile(); - const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index); - return dex_file->GetFieldDeclaringClassDescriptor(field_id); - } - - private: - Handle<mirror::ArtField> field_; - std::string declaring_class_descriptor_; - - DISALLOW_COPY_AND_ASSIGN(FieldHelper); -}; - -class MethodHelper { - public: - explicit MethodHelper(Handle<mirror::ArtMethod> m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : method_(m), shorty_(nullptr), shorty_len_(0) { - SetMethod(m.Get()); - } - - void ChangeMethod(mirror::ArtMethod* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(new_m != nullptr); - SetMethod(new_m); - shorty_ = nullptr; - } - - mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return method_->GetInterfaceMethodIfProxy(); - } - - mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const DexFile* dex_file = method_->GetDexFile(); - mirror::ArtMethod* method = method_->GetInterfaceMethodIfProxy(); - uint32_t dex_method_idx = method->GetDexMethodIndex(); - const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx); - StackHandleScope<1> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); - return Runtime::Current()->GetClassLinker()->ResolveString(*dex_file, method_id.name_idx_, - dex_cache); - } - - const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const char* result = shorty_; - if (result == nullptr) { - result = method_->GetShorty(&shorty_len_); - shorty_ = result; - } - return result; - } - - uint32_t GetShortyLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (shorty_ == nullptr) { - GetShorty(); - } - return shorty_len_; - } - - // Counts the number of references in the parameter list of the corresponding method. - // Note: Thus does _not_ include "this" for non-static methods. - uint32_t GetNumberOfReferenceArgsWithoutReceiver() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const char* shorty = GetShorty(); - uint32_t refs = 0; - for (uint32_t i = 1; i < shorty_len_ ; ++i) { - if (shorty[i] == 'L') { - refs++; - } - } - - return refs; - } - - // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large - // number of bugs at call sites. - mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = GetMethod(); - const DexFile* dex_file = method->GetDexFile(); - const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex()); - const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id); - uint16_t return_type_idx = proto_id.return_type_idx_; - return GetClassFromTypeIdx(return_type_idx, resolve); - } - - size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // "1 +" because the first in Args is the receiver. - // "- 1" because we don't count the return type. - return (method_->IsStatic() ? 0 : 1) + GetShortyLength() - 1; - } - - // Get the primitive type associated with the given parameter. - Primitive::Type GetParamPrimitiveType(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK_LT(param, NumArgs()); - if (GetMethod()->IsStatic()) { - param++; // 0th argument must skip return value at start of the shorty - } else if (param == 0) { - return Primitive::kPrimNot; - } - return Primitive::GetType(GetShorty()[param]); - } - - // Is the specified parameter a long or double, where parameter 0 is 'this' for instance methods. - bool IsParamALongOrDouble(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Primitive::Type type = GetParamPrimitiveType(param); - return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; - } - - // Is the specified parameter a reference, where parameter 0 is 'this' for instance methods. - bool IsParamAReference(size_t param) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetParamPrimitiveType(param) == Primitive::kPrimNot; - } - - bool HasSameNameAndSignature(MethodHelper* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const DexFile* dex_file = method_->GetDexFile(); - const DexFile::MethodId& mid = dex_file->GetMethodId(GetMethod()->GetDexMethodIndex()); - if (method_->GetDexCache() == other->method_->GetDexCache()) { - const DexFile::MethodId& other_mid = - dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex()); - return mid.name_idx_ == other_mid.name_idx_ && mid.proto_idx_ == other_mid.proto_idx_; - } - const DexFile* other_dex_file = other->method_->GetDexFile(); - const DexFile::MethodId& other_mid = - other_dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex()); - if (!DexFileStringEquals(dex_file, mid.name_idx_, other_dex_file, other_mid.name_idx_)) { - return false; // Name mismatch. - } - return dex_file->GetMethodSignature(mid) == other_dex_file->GetMethodSignature(other_mid); - } - - bool HasSameSignatureWithDifferentClassLoaders(MethodHelper* other) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (UNLIKELY(GetReturnType() != other->GetReturnType())) { - return false; - } - const DexFile::TypeList* types = method_->GetParameterTypeList(); - const DexFile::TypeList* other_types = other->method_->GetParameterTypeList(); - if (types == nullptr) { - return (other_types == nullptr) || (other_types->Size() == 0); - } else if (UNLIKELY(other_types == nullptr)) { - return types->Size() == 0; - } - uint32_t num_types = types->Size(); - if (UNLIKELY(num_types != other_types->Size())) { - return false; - } - for (uint32_t i = 0; i < num_types; ++i) { - mirror::Class* param_type = GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_); - mirror::Class* other_param_type = - other->GetClassFromTypeIdx(other_types->GetTypeItem(i).type_idx_); - if (UNLIKELY(param_type != other_param_type)) { - return false; - } - } - return true; - } - - mirror::Class* GetClassFromTypeIdx(uint16_t type_idx, bool resolve = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = GetMethod(); - mirror::Class* type = method->GetDexCacheResolvedTypes()->Get(type_idx); - if (type == nullptr && resolve) { - type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); - CHECK(type != nullptr || Thread::Current()->IsExceptionPending()); - } - return type; - } - - mirror::Class* GetDexCacheResolvedType(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetMethod()->GetDexCacheResolvedTypes()->Get(type_idx); - } - - mirror::String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = GetMethod(); - mirror::String* s = method->GetDexCacheStrings()->Get(string_idx); - if (UNLIKELY(s == nullptr)) { - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); - s = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), string_idx, - dex_cache); - } - return s; - } - - uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = GetMethod(); - const DexFile* dexfile = method->GetDexFile(); - if (dexfile == &other_dexfile) { - return method->GetDexMethodIndex(); - } - const DexFile::MethodId& mid = dexfile->GetMethodId(method->GetDexMethodIndex()); - const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_); - const DexFile::StringId* other_descriptor = - other_dexfile.FindStringId(mid_declaring_class_descriptor); - if (other_descriptor != nullptr) { - const DexFile::TypeId* other_type_id = - other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor)); - if (other_type_id != nullptr) { - const char* mid_name = dexfile->GetMethodName(mid); - const DexFile::StringId* other_name = other_dexfile.FindStringId(mid_name); - if (other_name != nullptr) { - uint16_t other_return_type_idx; - std::vector<uint16_t> other_param_type_idxs; - bool success = other_dexfile.CreateTypeList( - dexfile->GetMethodSignature(mid).ToString(), &other_return_type_idx, - &other_param_type_idxs); - if (success) { - const DexFile::ProtoId* other_sig = - other_dexfile.FindProtoId(other_return_type_idx, other_param_type_idxs); - if (other_sig != nullptr) { - const DexFile::MethodId* other_mid = other_dexfile.FindMethodId( - *other_type_id, *other_name, *other_sig); - if (other_mid != nullptr) { - return other_dexfile.GetIndexForMethodId(*other_mid); - } - } - } - } - } - } - return DexFile::kDexNoIndex; - } - - // The name_and_signature_idx MUST point to a MethodId with the same name and signature in the - // other_dexfile, such as the method index used to resolve this method in the other_dexfile. - uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile, - uint32_t name_and_signature_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = GetMethod(); - const DexFile* dexfile = method->GetDexFile(); - const uint32_t dex_method_idx = method->GetDexMethodIndex(); - const DexFile::MethodId& mid = dexfile->GetMethodId(dex_method_idx); - const DexFile::MethodId& name_and_sig_mid = other_dexfile.GetMethodId(name_and_signature_idx); - DCHECK_STREQ(dexfile->GetMethodName(mid), other_dexfile.GetMethodName(name_and_sig_mid)); - DCHECK_EQ(dexfile->GetMethodSignature(mid), other_dexfile.GetMethodSignature(name_and_sig_mid)); - if (dexfile == &other_dexfile) { - return dex_method_idx; - } - const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_); - const DexFile::StringId* other_descriptor = - other_dexfile.FindStringId(mid_declaring_class_descriptor); - if (other_descriptor != nullptr) { - const DexFile::TypeId* other_type_id = - other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor)); - if (other_type_id != nullptr) { - const DexFile::MethodId* other_mid = other_dexfile.FindMethodId( - *other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_), - other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_)); - if (other_mid != nullptr) { - return other_dexfile.GetIndexForMethodId(*other_mid); - } - } - } - return DexFile::kDexNoIndex; - } - - private: - // Set the method_ field, for proxy methods looking up the interface method via the resolved - // methods table. - void SetMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - method_.Assign(method); - } - - Handle<mirror::ArtMethod> method_; - const char* shorty_; - uint32_t shorty_len_; - - DISALLOW_COPY_AND_ASSIGN(MethodHelper); -}; - -} // namespace art - -#endif // ART_RUNTIME_OBJECT_UTILS_H_ diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 577691c90b..9a1d0f71aa 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -265,38 +265,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize verify_ = true; image_isa_ = kRuntimeISA; - // Default to explicit checks. Switch off with -implicit-checks:. - // or setprop dalvik.vm.implicit_checks check1,check2,... -#ifdef HAVE_ANDROID_OS - { - char buf[PROP_VALUE_MAX]; - property_get("dalvik.vm.implicit_checks", buf, "null,stack"); - std::string checks(buf); - std::vector<std::string> checkvec; - Split(checks, ',', checkvec); - explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck | - kExplicitStackOverflowCheck; - for (auto& str : checkvec) { - std::string val = Trim(str); - if (val == "none") { - explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck | - kExplicitStackOverflowCheck; - } else if (val == "null") { - explicit_checks_ &= ~kExplicitNullCheck; - } else if (val == "suspend") { - explicit_checks_ &= ~kExplicitSuspendCheck; - } else if (val == "stack") { - explicit_checks_ &= ~kExplicitStackOverflowCheck; - } else if (val == "all") { - explicit_checks_ = 0; - } - } - } -#else - explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck | - kExplicitStackOverflowCheck; -#endif - for (size_t i = 0; i < options.size(); ++i) { if (true && options[0].first == "-Xzygote") { LOG(INFO) << "option[" << i << "]=" << options[i].first; @@ -312,6 +280,7 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize Exit(0); } else if (StartsWith(option, "-Xbootclasspath:")) { boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data(); + LOG(INFO) << "setting boot class path to " << boot_class_path_string_; } else if (option == "-classpath" || option == "-cp") { // TODO: support -Djava.class.path i++; @@ -589,54 +558,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize if (!ParseUnsignedInteger(option, ':', &profiler_options_.max_stack_depth_)) { return false; } - } else if (StartsWith(option, "-implicit-checks:")) { - std::string checks; - if (!ParseStringAfterChar(option, ':', &checks)) { - return false; - } - std::vector<std::string> checkvec; - Split(checks, ',', checkvec); - for (auto& str : checkvec) { - std::string val = Trim(str); - if (val == "none") { - explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck | - kExplicitStackOverflowCheck; - } else if (val == "null") { - explicit_checks_ &= ~kExplicitNullCheck; - } else if (val == "suspend") { - explicit_checks_ &= ~kExplicitSuspendCheck; - } else if (val == "stack") { - explicit_checks_ &= ~kExplicitStackOverflowCheck; - } else if (val == "all") { - explicit_checks_ = 0; - } else { - return false; - } - } - } else if (StartsWith(option, "-explicit-checks:")) { - std::string checks; - if (!ParseStringAfterChar(option, ':', &checks)) { - return false; - } - std::vector<std::string> checkvec; - Split(checks, ',', checkvec); - for (auto& str : checkvec) { - std::string val = Trim(str); - if (val == "none") { - explicit_checks_ = 0; - } else if (val == "null") { - explicit_checks_ |= kExplicitNullCheck; - } else if (val == "suspend") { - explicit_checks_ |= kExplicitSuspendCheck; - } else if (val == "stack") { - explicit_checks_ |= kExplicitStackOverflowCheck; - } else if (val == "all") { - explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck | - kExplicitStackOverflowCheck; - } else { - return false; - } - } } else if (StartsWith(option, "-Xcompiler:")) { if (!ParseStringAfterChar(option, ':', &compiler_executable_)) { return false; diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h index b1de62a54f..23f2bcfa62 100644 --- a/runtime/parsed_options.h +++ b/runtime/parsed_options.h @@ -93,10 +93,6 @@ class ParsedOptions { bool verify_; InstructionSet image_isa_; - static constexpr uint32_t kExplicitNullCheck = 1; - static constexpr uint32_t kExplicitSuspendCheck = 2; - static constexpr uint32_t kExplicitStackOverflowCheck = 4; - uint32_t explicit_checks_; // Whether or not we use homogeneous space compaction to avoid OOM errors. If enabled, // the heap will attempt to create an extra space which enables compacting from a malloc space to // another malloc space when we are about to throw OOM. diff --git a/runtime/profiler.cc b/runtime/profiler.cc index 7a7a92a7c0..951444812b 100644 --- a/runtime/profiler.cc +++ b/runtime/profiler.cc @@ -32,7 +32,6 @@ #include "mirror/dex_cache.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "os.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc index 57eb3f79b6..bd6656dda1 100644 --- a/runtime/proxy_test.cc +++ b/runtime/proxy_test.cc @@ -18,6 +18,7 @@ #include <vector> #include "common_compiler_test.h" +#include "field_helper.h" #include "mirror/art_field-inl.h" #include "scoped_thread_state_change.h" diff --git a/runtime/reflection.cc b/runtime/reflection.cc index 41421bc10f..758c1bbd1f 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -20,14 +20,14 @@ #include "common_throws.h" #include "dex_file-inl.h" #include "jni_internal.h" +#include "method_helper-inl.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" -#include "mirror/class.h" #include "mirror/class-inl.h" -#include "mirror/object_array.h" +#include "mirror/class.h" #include "mirror/object_array-inl.h" +#include "mirror/object_array.h" #include "nth_caller_visitor.h" -#include "object_utils.h" #include "scoped_thread_state_change.h" #include "stack.h" #include "well_known_classes.h" diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 0ddd2aed4a..aca2607bfc 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -141,7 +141,10 @@ Runtime::Runtime() suspend_handler_(nullptr), stack_overflow_handler_(nullptr), verify_(false), - target_sdk_version_(0) { + target_sdk_version_(0), + implicit_null_checks_(false), + implicit_so_checks_(false), + implicit_suspend_checks_(false) { for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) { callee_save_methods_[i] = nullptr; } @@ -581,41 +584,6 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) GetInstrumentation()->ForceInterpretOnly(); } - bool implicit_checks_supported = false; - switch (kRuntimeISA) { - case kArm: - case kThumb2: - implicit_checks_supported = true; - break; - default: - break; - } - - if (!options->interpreter_only_ && implicit_checks_supported && - (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck | - ParsedOptions::kExplicitNullCheck | - ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler)) { - fault_manager.Init(); - - // These need to be in a specific order. The null point check handler must be - // after the suspend check and stack overflow check handlers. - if ((options->explicit_checks_ & ParsedOptions::kExplicitSuspendCheck) == 0) { - suspend_handler_ = new SuspensionHandler(&fault_manager); - } - - if ((options->explicit_checks_ & ParsedOptions::kExplicitStackOverflowCheck) == 0) { - stack_overflow_handler_ = new StackOverflowHandler(&fault_manager); - } - - if ((options->explicit_checks_ & ParsedOptions::kExplicitNullCheck) == 0) { - null_pointer_handler_ = new NullPointerHandler(&fault_manager); - } - - if (kEnableJavaStackTraceHandler) { - new JavaStackTraceHandler(&fault_manager); - } - } - heap_ = new gc::Heap(options->heap_initial_size_, options->heap_growth_limit_, options->heap_min_free_, @@ -648,6 +616,42 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) BlockSignals(); InitPlatformSignalHandlers(); + // Change the implicit checks flags based on runtime architecture. + switch (kRuntimeISA) { + case kArm: + case kThumb2: + case kX86: + implicit_null_checks_ = true; + implicit_so_checks_ = true; + break; + default: + // Keep the defaults. + break; + } + + if (!options->interpreter_only_ && + (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_)) { + fault_manager.Init(); + + // These need to be in a specific order. The null point check handler must be + // after the suspend check and stack overflow check handlers. + if (implicit_suspend_checks_) { + suspend_handler_ = new SuspensionHandler(&fault_manager); + } + + if (implicit_so_checks_) { + stack_overflow_handler_ = new StackOverflowHandler(&fault_manager); + } + + if (implicit_null_checks_) { + null_pointer_handler_ = new NullPointerHandler(&fault_manager); + } + + if (kEnableJavaStackTraceHandler) { + new JavaStackTraceHandler(&fault_manager); + } + } + java_vm_ = new JavaVMExt(this, options.get()); Thread::Startup(); @@ -1222,37 +1226,6 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::strin argv->push_back("--compiler-filter=interpret-only"); } - argv->push_back("--runtime-arg"); - std::string checkstr = "-implicit-checks"; - - int nchecks = 0; - char checksep = ':'; - - if (!ExplicitNullChecks()) { - checkstr += checksep; - checksep = ','; - checkstr += "null"; - ++nchecks; - } - if (!ExplicitSuspendChecks()) { - checkstr += checksep; - checksep = ','; - checkstr += "suspend"; - ++nchecks; - } - - if (!ExplicitStackOverflowChecks()) { - checkstr += checksep; - checksep = ','; - checkstr += "stack"; - ++nchecks; - } - - if (nchecks == 0) { - checkstr += ":none"; - } - argv->push_back(checkstr); - // Make the dex2oat instruction set match that of the launching runtime. If we have multiple // architecture support, dex2oat may be compiled as a different instruction-set than that // currently being executed. diff --git a/runtime/runtime.h b/runtime/runtime.h index fccccbdfd7..284e4ffe30 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -589,6 +589,11 @@ class Runtime { // Specifies target SDK version to allow workarounds for certain API levels. int32_t target_sdk_version_; + // Implicit checks flags. + bool implicit_null_checks_; // NullPointer checks are implicit. + bool implicit_so_checks_; // StackOverflow checks are implicit. + bool implicit_suspend_checks_; // Thread suspension checks are implicit. + DISALLOW_COPY_AND_ASSIGN(Runtime); }; diff --git a/runtime/stack.cc b/runtime/stack.cc index a6cf24e503..71e566efc3 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -23,7 +23,6 @@ #include "mirror/object.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "quick/quick_method_frame_info.h" #include "runtime.h" #include "thread.h" diff --git a/runtime/thread.cc b/runtime/thread.cc index dd55ed1654..f888029af1 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -32,8 +32,8 @@ #include "arch/context.h" #include "base/mutex.h" -#include "class_linker.h" #include "class_linker-inl.h" +#include "class_linker.h" #include "debugger.h" #include "dex_file-inl.h" #include "entrypoints/entrypoint_utils.h" @@ -43,17 +43,18 @@ #include "gc/allocator/rosalloc.h" #include "gc/heap.h" #include "gc/space/space.h" +#include "handle_scope-inl.h" #include "handle_scope.h" #include "indirect_reference_table-inl.h" #include "jni_internal.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" -#include "mirror/class-inl.h" #include "mirror/class_loader.h" +#include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/stack_trace_element.h" #include "monitor.h" -#include "object_utils.h" +#include "object_lock.h" #include "quick_exception_handler.h" #include "quick/quick_method_frame_info.h" #include "reflection.h" @@ -61,10 +62,9 @@ #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" -#include "handle_scope-inl.h" #include "stack.h" -#include "thread-inl.h" #include "thread_list.h" +#include "thread-inl.h" #include "utils.h" #include "verifier/dex_gc_map.h" #include "verify_object-inl.h" @@ -232,47 +232,95 @@ static size_t FixStackSize(size_t stack_size) { return stack_size; } +// Global variable to prevent the compiler optimizing away the page reads for the stack. +byte dont_optimize_this; + // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack // overflow is detected. It is located right below the stack_end_. Just below that // is the StackOverflow reserved region used when creating the StackOverflow // exception. +// +// There is a little complexity here that deserves a special mention. When running on the +// host (glibc), the process's main thread's stack is allocated with a special flag +// to prevent memory being allocated when it's not needed. This flag makes the +// kernel only allocate memory for the stack by growing down in memory. Because we +// want to put an mprotected region far away from that at the stack top, we need +// to make sure the pages for the stack are mapped in before we call mprotect. We do +// this by reading every page from the stack bottom (highest address) to the stack top. +// We then madvise this away. void Thread::InstallImplicitProtection(bool is_main_stack) { byte* pregion = tlsPtr_.stack_end; + byte* stack_lowmem = tlsPtr_.stack_begin; + byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&pregion) & + ~(kPageSize - 1)); // Page containing current top of stack. + + const bool running_on_intel = (kRuntimeISA == kX86) || (kRuntimeISA == kX86_64); + + if (running_on_intel) { + // On Intel, we need to map in the main stack. This must be done by reading from the + // current stack pointer downwards as the stack is mapped using VM_GROWSDOWN + // in the kernel. Any access more than a page below the current SP will cause + // a segv. + if (is_main_stack) { + // First we need to unprotect the protected region because this may + // be called more than once for a particular stack and we will crash + // if we try to read the protected page. + mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_READ); + + // Read every page from the high address to the low. + for (byte* p = stack_top; p > stack_lowmem; p -= kPageSize) { + dont_optimize_this = *p; + } + } + } + // Check and place a marker word at the lowest usable address in the stack. This + // is used to prevent a double protection. constexpr uint32_t kMarker = 0xdadadada; uintptr_t *marker = reinterpret_cast<uintptr_t*>(pregion); if (*marker == kMarker) { - // The region has already been set up. + // The region has already been set up. But on the main stack on the host we have + // removed the protected region in order to read the stack memory. We need to put + // this back again. + if (is_main_stack && running_on_intel) { + mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_NONE); + madvise(stack_lowmem, stack_top - stack_lowmem, MADV_DONTNEED); + } return; } // Add marker so that we can detect a second attempt to do this. *marker = kMarker; - pregion -= kStackOverflowProtectedSize; - - // Touch the pages in the region to map them in. Otherwise mprotect fails. Only - // need to do this on the main stack. We only need to touch one byte per page. - if (is_main_stack) { - byte* start = pregion; - byte* end = pregion + kStackOverflowProtectedSize; - while (start < end) { - *start = static_cast<byte>(0); - start += kPageSize; + if (!running_on_intel) { + // Running on !Intel, stacks are mapped cleanly. The protected region for the + // main stack just needs to be mapped in. We do this by writing one byte per page. + for (byte* p = pregion - kStackOverflowProtectedSize; p < pregion; p += kPageSize) { + *p = 0; } } + pregion -= kStackOverflowProtectedSize; + VLOG(threads) << "installing stack protected region at " << std::hex << static_cast<void*>(pregion) << " to " << static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); + if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) { LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. Reason:" << strerror(errno); } // Tell the kernel that we won't be needing these pages any more. + // NB. madvise will probably write zeroes into the memory (on linux it does). if (is_main_stack) { - madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED); + if (running_on_intel) { + // On the host, it's the whole stack (minus a page to prevent overwrite of stack top). + madvise(stack_lowmem, stack_top - stack_lowmem - kPageSize, MADV_DONTNEED); + } else { + // On Android, just the protected region. + madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED); + } } } @@ -533,13 +581,17 @@ void Thread::InitStackHwm() { // Install the protected region if we are doing implicit overflow checks. if (implicit_stack_check) { if (is_main_thread) { - // The main thread has a 16K protected region at the bottom. We need + size_t guardsize; + pthread_attr_t attributes; + CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), "guard size query"); + CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, &guardsize), "guard size query"); + CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), "guard size query"); + // The main thread might have protected region at the bottom. We need // to install our own region so we need to move the limits // of the stack to make room for it. - constexpr uint32_t kDelta = 16 * KB; - tlsPtr_.stack_begin += kDelta; - tlsPtr_.stack_end += kDelta; - tlsPtr_.stack_size -= kDelta; + tlsPtr_.stack_begin += guardsize; + tlsPtr_.stack_end += guardsize; + tlsPtr_.stack_size -= guardsize; } InstallImplicitProtection(is_main_thread); } diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 54732fae04..b649b626ca 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -39,6 +39,8 @@ namespace art { +static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5); + ThreadList::ThreadList() : suspend_all_count_(0), debug_suspend_all_count_(0), thread_exit_cond_("thread exit condition variable", *Locks::thread_list_lock_) { @@ -304,8 +306,8 @@ void ThreadList::SuspendAll() { DCHECK(self != nullptr); VLOG(threads) << *self << " SuspendAll starting..."; - ATRACE_BEGIN("Suspending mutator threads"); + uint64_t start_time = NanoTime(); Locks::mutator_lock_->AssertNotHeld(self); Locks::thread_list_lock_->AssertNotHeld(self); @@ -338,6 +340,11 @@ void ThreadList::SuspendAll() { Locks::mutator_lock_->ExclusiveLock(self); #endif + uint64_t end_time = NanoTime(); + if (end_time - start_time > kLongThreadSuspendThreshold) { + LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(end_time - start_time); + } + if (kDebugLocking) { // Debug check that all threads are suspended. AssertThreadsAreSuspended(self, self); diff --git a/runtime/throw_location.cc b/runtime/throw_location.cc index a1347a49bb..04abe64453 100644 --- a/runtime/throw_location.cc +++ b/runtime/throw_location.cc @@ -19,7 +19,6 @@ #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "utils.h" namespace art { diff --git a/runtime/trace.cc b/runtime/trace.cc index 1eb5cf8f87..f51b8c435a 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -30,7 +30,6 @@ #include "mirror/dex_cache.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "os.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" diff --git a/runtime/utils.cc b/runtime/utils.cc index b56bdd0c29..8b1ad39edc 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -28,6 +28,7 @@ #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "dex_file-inl.h" +#include "field_helper.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" @@ -35,7 +36,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/string.h" -#include "object_utils.h" #include "os.h" #include "scoped_thread_state_change.h" #include "utf-inl.h" diff --git a/runtime/utils.h b/runtime/utils.h index 2cb3af7aec..c920050234 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -167,8 +167,7 @@ struct TypeIdentity { // For rounding integers. template<typename T> -static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) - __attribute__((warn_unused_result)); +static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) WARN_UNUSED; template<typename T> static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) { @@ -178,8 +177,7 @@ static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) { } template<typename T> -static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) - __attribute__((warn_unused_result)); +static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) WARN_UNUSED; template<typename T> static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) { @@ -188,7 +186,7 @@ static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) { // For aligning pointers. template<typename T> -static inline T* AlignDown(T* x, uintptr_t n) __attribute__((warn_unused_result)); +static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED; template<typename T> static inline T* AlignDown(T* x, uintptr_t n) { @@ -196,7 +194,7 @@ static inline T* AlignDown(T* x, uintptr_t n) { } template<typename T> -static inline T* AlignUp(T* x, uintptr_t n) __attribute__((warn_unused_result)); +static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED; template<typename T> static inline T* AlignUp(T* x, uintptr_t n) { diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index eabb993879..f1b5afd235 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -25,10 +25,12 @@ #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "dex_instruction_visitor.h" +#include "field_helper.h" #include "gc/accounting/card_table-inl.h" #include "indenter.h" #include "intern_table.h" #include "leb128.h" +#include "method_helper-inl.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class.h" @@ -36,7 +38,6 @@ #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "register_line-inl.h" #include "runtime.h" #include "scoped_thread_state_change.h" diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index e24c92091c..f0729e4271 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -24,7 +24,6 @@ #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "object_utils.h" #include "reg_type_cache-inl.h" #include "scoped_thread_state_change.h" diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index ff9edbbeaf..91fba4d2bb 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -21,7 +21,6 @@ #include "dex_file-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" namespace art { namespace verifier { diff --git a/runtime/verify_object.h b/runtime/verify_object.h index 6640e0dd4a..8e1653ddb9 100644 --- a/runtime/verify_object.h +++ b/runtime/verify_object.h @@ -52,10 +52,10 @@ static constexpr VerifyObjectFlags kDefaultVerifyFlags = kVerifyNone; static constexpr VerifyObjectMode kVerifyObjectSupport = kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled; -void VerifyObject(mirror::Object* obj) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS; +ALWAYS_INLINE void VerifyObject(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; // Check that c.getClass() == c.getClass().getClass(). -bool VerifyClassClass(mirror::Class* c) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS; +ALWAYS_INLINE bool VerifyClassClass(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS; } // namespace art diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk index 8e25339d9e..d86735d120 100644 --- a/sigchainlib/Android.mk +++ b/sigchainlib/Android.mk @@ -23,8 +23,23 @@ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) LOCAL_MODULE_TAGS := optional LOCAL_CFLAGS += $(ART_TARGET_CFLAGS) LOCAL_SRC_FILES := sigchain.cc +LOCAL_CLANG = $(ART_TARGET_CLANG) LOCAL_MODULE:= libsigchain LOCAL_SHARED_LIBRARIES := liblog libdl LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk include $(BUILD_SHARED_LIBRARY) + +# Build host library. +include $(CLEAR_VARS) +LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) +LOCAL_MODULE_TAGS := optional +LOCAL_IS_HOST_MODULE := true +LOCAL_CFLAGS += $(ART_HOST_CFLAGS) +LOCAL_CLANG = $(ART_HOST_CLANG) +LOCAL_SRC_FILES := sigchain.cc +LOCAL_MODULE:= libsigchain +LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk +LOCAL_LDLIBS = -ldl +LOCAL_MULTILIB := both +include $(BUILD_HOST_SHARED_LIBRARY) diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc index 5a5805fe4f..6f93083832 100644 --- a/sigchainlib/sigchain.cc +++ b/sigchainlib/sigchain.cc @@ -14,12 +14,22 @@ * limitations under the License. */ +#ifdef HAVE_ANDROID_OS #include <android/log.h> +#else +#include <stdarg.h> +#include <iostream> +#endif + #include <dlfcn.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> +#if defined(__APPLE__) +#define _NSIG NSIG +#endif + namespace art { class SignalAction { @@ -67,7 +77,11 @@ static void log(const char* format, ...) { va_list ap; va_start(ap, format); vsnprintf(buf, sizeof(buf), format, ap); +#ifdef HAVE_ANDROID_OS __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf); +#else + std::cout << buf << "\n"; +#endif va_end(ap); } @@ -104,10 +118,16 @@ void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) { if ((action.sa_flags & SA_SIGINFO) == 0) { if (action.sa_handler != NULL) { action.sa_handler(sig); + } else { + signal(sig, SIG_DFL); + raise(sig); } } else { if (action.sa_sigaction != NULL) { action.sa_sigaction(sig, info, context); + } else { + signal(sig, SIG_DFL); + raise(sig); } } } diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h index f6f2253d72..a4ce81ce4c 100644 --- a/sigchainlib/sigchain.h +++ b/sigchainlib/sigchain.h @@ -18,10 +18,13 @@ #define ART_SIGCHAINLIB_SIGCHAIN_H_ #include <signal.h> + namespace art { void ClaimSignalChain(int signal, struct sigaction* oldaction); + void UnclaimSignalChain(int signal); + void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context); } // namespace art diff --git a/test/Android.oat.mk b/test/Android.oat.mk index 16300bba54..2b142db890 100644 --- a/test/Android.oat.mk +++ b/test/Android.oat.mk @@ -203,6 +203,7 @@ $(3): $$(ART_TEST_HOST_OAT_$(1)_DEX) $(ART_TEST_HOST_OAT_DEPENDENCIES) ANDROID_ROOT=$(HOST_OUT) \ ANDROID_LOG_TAGS='*:d' \ LD_LIBRARY_PATH=$$($(2)ART_HOST_OUT_SHARED_LIBRARIES) \ + LD_PRELOAD=libsigchain$$(ART_HOST_SHLIB_EXTENSION) \ $(HOST_OUT_EXECUTABLES)/dalvikvm$$($(2)ART_PHONY_TEST_HOST_SUFFIX) $(DALVIKVM_FLAGS) $(5) \ -XXlib:libartd$(HOST_SHLIB_SUFFIX) -Ximage:$$(HOST_CORE_IMG_LOCATION) \ -classpath $(ART_HOST_TEST_DIR)/android-data-$$@/oat-test-dex-$(1).jar \ diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 25bcf0a790..78312d1245 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -21,26 +21,63 @@ include art/build/Android.common_test.mk TEST_ART_RUN_TESTS := $(wildcard $(LOCAL_PATH)/[0-9]*) TEST_ART_RUN_TESTS := $(subst $(LOCAL_PATH)/,, $(TEST_ART_RUN_TESTS)) +# List all the test names for host and target excluding the -trace suffix +# $(1): test name, e.g. 003-omnibus-opcodes +# $(2): undefined or -trace +define all-run-test-names + test-art-host-run-test$(2)-default-$(1)32 \ + test-art-host-run-test$(2)-optimizing-$(1)32 \ + test-art-host-run-test$(2)-interpreter-$(1)32 \ + test-art-host-run-test$(2)-default-$(1)64 \ + test-art-host-run-test$(2)-optimizing-$(1)64 \ + test-art-host-run-test$(2)-interpreter-$(1)64 \ + test-art-target-run-test$(2)-default-$(1)32 \ + test-art-target-run-test$(2)-optimizing-$(1)32 \ + test-art-target-run-test$(2)-interpreter-$(1)32 \ + test-art-target-run-test$(2)-default-$(1)64 \ + test-art-target-run-test$(2)-optimizing-$(1)64 \ + test-art-target-run-test$(2)-interpreter-$(1)64 +endef # all-run-test-names + # Tests that are timing sensitive and flaky on heavily loaded systems. TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \ - test-art-host-run-test-default-053-wait-some32 \ - test-art-host-run-test-default-053-wait-some64 \ - test-art-host-run-test-interpreter-053-wait-some32 \ - test-art-host-run-test-interpreter-053-wait-some64 \ - test-art-host-run-test-optimizing-053-wait-some32 \ - test-art-host-run-test-optimizing-053-wait-some64 \ - test-art-host-run-test-default-055-enum-performance32 \ - test-art-host-run-test-default-055-enum-performance64 \ - test-art-host-run-test-interpreter-055-enum-performance32 \ - test-art-host-run-test-interpreter-055-enum-performance64 \ - test-art-host-run-test-optimizing-055-enum-performance32 \ - test-art-host-run-test-optimizing-055-enum-performance64 + 053-wait-some \ + 055-enum-performance # disable timing sensitive tests on "dist" builds. ifdef dist_goal - ART_TEST_KNOWN_BROKEN += $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS) + ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),)) + ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace)) endif +# Tests that are broken in --trace mode. +TEST_ART_BROKEN_TRACE_RUN_TESTS := \ + 003-omnibus-opcodes \ + 004-annotations \ + 018-stack-overflow \ + 023-many-interfaces \ + 031-class-attributes \ + 037-inherit \ + 044-proxy \ + 046-reflect \ + 051-thread \ + 055-enum-performance \ + 064-field-access \ + 078-polymorphic-virtual \ + 080-oom-throw \ + 082-inline-execute \ + 083-compiler-regressions \ + 093-serialization \ + 097-duplicate-method \ + 100-reflect2 \ + 102-concurrent-gc \ + 103-string-append \ + 107-int-math2 \ + 112-double-math \ + 701-easy-div-rem + +ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace)) + # The path where build only targets will be output, e.g. # out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA art_run_tests_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA @@ -96,9 +133,11 @@ ART_TEST_HOST_RUN_TEST_ALL_RULES := ART_TEST_HOST_RUN_TEST_DEFAULT_RULES := ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES := ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES := +ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := @@ -124,8 +163,10 @@ endif # $(2): host or target # $(3): default, optimizing or interpreter # $(4): 32 or 64 +# $(5): run tests with tracing enabled or not: trace or undefined define define-test-art-run-test run_test_options := $(addprefix --runtime-option ,$(DALVIKVM_FLAGS)) + run_test_rule_name := test-art-$(2)-run-test-$(3)-$(1)$(4) uc_host_or_target := prereq_rule := ifeq ($(2),host) @@ -163,7 +204,14 @@ define define-test-art-run-test $$(error found $(4) expected 32 or 64) endif endif - run_test_rule_name := test-art-$(2)-run-test-$(3)-$(1)$(4) + ifeq ($(5),trace) + run_test_options += --trace + run_test_rule_name := test-art-$(2)-run-test-trace-$(3)-$(1)$(4) + else + ifneq (,$(5)) + $$(error found $(5) expected undefined or -trace) + endif + endif run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \ $$(run_test_options) $$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options) @@ -222,9 +270,13 @@ define define-test-art-run-test-group ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_INTERPRETER_$(1)_RULES := ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES := ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES := - $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX))) - $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX))) - $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX))) + $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),)) + $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),)) + $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),)) + ifeq ($(2),host) + # For now just test tracing on the host with default. + $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace)) + endif do_second := false ifeq ($(2),host) ifneq ($$(HOST_PREFER_32_BIT),true) @@ -236,9 +288,13 @@ define define-test-art-run-test-group endif endif ifeq (true,$$(do_second)) - $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX))) - $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX))) - $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX))) + $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),)) + $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),)) + $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),)) + ifeq ($(2),host) + # For now just test tracing on the host with default. + $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace)) + endif endif $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-default-$(1), \ @@ -319,6 +375,7 @@ endif define-test-art-run-test := define-test-art-run-test-group-rule := define-test-art-run-test-group := +all-run-test-names := ART_TEST_TARGET_RUN_TEST_ALL_RULES := ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES := ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES := @@ -335,9 +392,11 @@ ART_TEST_HOST_RUN_TEST_ALL_RULES := ART_TEST_HOST_RUN_TEST_DEFAULT_RULES := ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES := ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES := +ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 87187ed19d..e5a1786102 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -20,12 +20,10 @@ #include "class_linker.h" #include "dex_file-inl.h" #include "gc_map.h" -#include "mirror/art_method.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "scoped_thread_state_change.h" #include "thread.h" #include "jni.h" diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index c849c54bbe..e404f6ab7d 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -19,12 +19,10 @@ #include "class_linker.h" #include "gc_map.h" -#include "mirror/art_method.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "object_utils.h" #include "jni.h" #include "scoped_thread_state_change.h" |