diff options
| -rw-r--r-- | Android.mk | 45 | ||||
| -rw-r--r-- | build/Android.gtest.mk | 1 | ||||
| -rw-r--r-- | compiler/dex/quick/mir_to_lir-inl.h | 2 | ||||
| -rw-r--r-- | compiler/dex/quick/x86/assemble_x86.cc | 31 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.cc | 29 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/nodes_test.cc | 66 | ||||
| -rw-r--r-- | compiler/optimizing/register_allocator.cc | 4 | ||||
| -rw-r--r-- | runtime/debugger.cc | 650 | ||||
| -rw-r--r-- | runtime/debugger.h | 17 | ||||
| -rw-r--r-- | runtime/dex_instruction_list.h | 2 | ||||
| -rw-r--r-- | runtime/jdwp/jdwp_event.cc | 13 | ||||
| -rw-r--r-- | runtime/jdwp/jdwp_handler.cc | 40 | ||||
| -rw-r--r-- | runtime/thread.cc | 7 | ||||
| -rw-r--r-- | test/408-move-bug/expected.txt | 0 | ||||
| -rw-r--r-- | test/408-move-bug/info.txt | 2 | ||||
| -rw-r--r-- | test/408-move-bug/src/Main.java | 69 |
17 files changed, 595 insertions, 385 deletions
diff --git a/Android.mk b/Android.mk index 3cd6b5f273..f8d46a4e3a 100644 --- a/Android.mk +++ b/Android.mk @@ -77,7 +77,8 @@ endif .PHONY: clean-oat-target clean-oat-target: - adb remount + adb root + adb wait-for-device remount adb shell rm -rf $(ART_TARGET_NATIVETEST_DIR) adb shell rm -rf $(ART_TARGET_TEST_DIR) adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/* @@ -140,7 +141,8 @@ include $(art_path)/test/Android.run-test.mk # Sync test files to the target, depends upon all things that must be pushed to the target. .PHONY: test-art-target-sync test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS) - adb remount + adb root + adb wait-for-device remount adb sync # Undefine variable now its served its purpose. @@ -348,7 +350,8 @@ oat-target: $(ART_TARGET_DEPENDENCIES) $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE) $(O .PHONY: oat-target-sync oat-target-sync: oat-target - adb remount + adb root + adb wait-for-device remount adb sync ######################################################################## @@ -367,29 +370,29 @@ build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TAR .PHONY: use-art use-art: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so adb shell start .PHONY: use-artd use-artd: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so adb shell start .PHONY: use-dalvik use-dalvik: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so adb shell start .PHONY: use-art-full use-art-full: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "" adb shell setprop dalvik.vm.image-dex2oat-filter "" @@ -398,8 +401,8 @@ use-art-full: .PHONY: use-artd-full use-artd-full: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "" adb shell setprop dalvik.vm.image-dex2oat-filter "" @@ -408,8 +411,8 @@ use-artd-full: .PHONY: use-art-smart use-art-smart: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" adb shell setprop dalvik.vm.image-dex2oat-filter "" @@ -418,8 +421,8 @@ use-art-smart: .PHONY: use-art-interpret-only use-art-interpret-only: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" @@ -428,8 +431,8 @@ use-art-interpret-only: .PHONY: use-artd-interpret-only use-artd-interpret-only: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" @@ -438,8 +441,8 @@ use-artd-interpret-only: .PHONY: use-art-verify-none use-art-verify-none: - adb root && sleep 3 - adb shell stop + adb root + adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "verify-none" adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none" diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index d93d6dc10f..af43a3c6d6 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -151,6 +151,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \ compiler/optimizing/liveness_test.cc \ compiler/optimizing/live_interval_test.cc \ compiler/optimizing/live_ranges_test.cc \ + compiler/optimizing/nodes_test.cc \ compiler/optimizing/parallel_move_test.cc \ compiler/optimizing/pretty_printer_test.cc \ compiler/optimizing/register_allocator_test.cc \ diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h index 2e4e2921bd..22588f3e8c 100644 --- a/compiler/dex/quick/mir_to_lir-inl.h +++ b/compiler/dex/quick/mir_to_lir-inl.h @@ -97,7 +97,7 @@ inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1) { } inline LIR* Mir2Lir::NewLIR2NoDest(int opcode, int src, int info) { - DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP)) + DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP)) << GetTargetInstName(opcode) << " " << opcode << " " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " " << current_dalvik_offset_; diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index 9935a22991..a9a02523e2 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -192,7 +192,7 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0, { kX86Movnti32MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x0F, 0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti32MR", "[!0r+!1d],!2r" }, { kX86Movnti32AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x0F, 0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti32AR", "[!0r+!1r<<!2d+!3d],!4r" }, { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0, false }, "Mov32TR", "fs:[!0d],!1r" }, - { kX86Mov32RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RR", "!0r,!1r" }, + { kX86Mov32RR, kRegReg, IS_MOVE | IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RR", "!0r,!1r" }, { kX86Mov32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RM", "!0r,[!1r+!2d]" }, { kX86Mov32RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, { kX86Mov32RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RT", "!0r,fs:[!1d]" }, @@ -201,15 +201,15 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0, { kX86Mov32AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" }, { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov32TI", "fs:[!0d],!1d" }, - { kX86Lea32RM, kRegMem, IS_TERTIARY_OP | IS_LOAD | REG_DEF0_USE1, { 0, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea32RM", "!0r,[!1r+!2d]" }, - { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, + { kX86Lea32RM, kRegMem, IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea32RM", "!0r,[!1r+!2d]" }, + { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, { kX86Mov64MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { REX_W, 0, 0x89, 0, 0, 0, 0, 0, false }, "Mov64MR", "[!0r+!1d],!2r" }, { kX86Mov64AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { REX_W, 0, 0x89, 0, 0, 0, 0, 0, false }, "Mov64AR", "[!0r+!1r<<!2d+!3d],!4r" }, { kX86Movnti64MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x0F, 0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti64MR", "[!0r+!1d],!2r" }, { kX86Movnti64AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x0F, 0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti64AR", "[!0r+!1r<<!2d+!3d],!4r" }, { kX86Mov64TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, REX_W, 0x89, 0, 0, 0, 0, 0, false }, "Mov64TR", "fs:[!0d],!1r" }, - { kX86Mov64RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { REX_W, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RR", "!0r,!1r" }, + { kX86Mov64RR, kRegReg, IS_MOVE | IS_BINARY_OP | REG_DEF0_USE1, { REX_W, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RR", "!0r,!1r" }, { kX86Mov64RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { REX_W, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RM", "!0r,[!1r+!2d]" }, { kX86Mov64RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { REX_W, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RA", "!0r,[!1r+!2r<<!3d+!4d]" }, { kX86Mov64RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, REX_W, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RT", "!0r,fs:[!1d]" }, @@ -219,8 +219,8 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0, { kX86Mov64AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { REX_W, 0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov64AI", "[!0r+!1r<<!2d+!3d],!4d" }, { kX86Mov64TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, REX_W, 0xC7, 0, 0, 0, 0, 4, false }, "Mov64TI", "fs:[!0d],!1d" }, - { kX86Lea64RM, kRegMem, IS_TERTIARY_OP | IS_LOAD | REG_DEF0_USE1, { REX_W, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea64RM", "!0r,[!1r+!2d]" }, - { kX86Lea64RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { REX_W, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea64RA", "!0r,[!1r+!2r<<!3d+!4d]" }, + { kX86Lea64RM, kRegMem, IS_TERTIARY_OP | REG_DEF0_USE1, { REX_W, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea64RM", "!0r,[!1r+!2d]" }, + { kX86Lea64RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { REX_W, 0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea64RA", "!0r,[!1r+!2r<<!3d+!4d]" }, { kX86Cmov32RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x40, 0, 0, 0, 0, false }, "Cmovcc32RR", "!2c !0r,!1r" }, { kX86Cmov64RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, { REX_W, 0, 0x0F, 0x40, 0, 0, 0, 0, false }, "Cmovcc64RR", "!2c !0r,!1r" }, @@ -444,14 +444,14 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0, { kX86PslldRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 6, 0, 1, false }, "PslldRI", "!0r,!1d" }, { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1, false }, "PsllqRI", "!0r,!1d" }, - { kX86Fild32M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDB, 0x00, 0, 0, 0, 0, false }, "Fild32M", "[!0r,!1d]" }, - { kX86Fild64M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDF, 0x00, 0, 5, 0, 0, false }, "Fild64M", "[!0r,!1d]" }, - { kX86Fld32M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xD9, 0x00, 0, 0, 0, 0, false }, "Fld32M", "[!0r,!1d]" }, - { kX86Fld64M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 0, 0, 0, false }, "Fld64M", "[!0r,!1d]" }, - { kX86Fstp32M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xD9, 0x00, 0, 3, 0, 0, false }, "Fstps32M", "[!0r,!1d]" }, - { kX86Fstp64M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 3, 0, 0, false }, "Fstpd64M", "[!0r,!1d]" }, - { kX86Fst32M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xD9, 0x00, 0, 2, 0, 0, false }, "Fsts32M", "[!0r,!1d]" }, - { kX86Fst64M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 2, 0, 0, false }, "Fstd64M", "[!0r,!1d]" }, + { kX86Fild32M, kMem, IS_LOAD | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDB, 0x00, 0, 0, 0, 0, false }, "Fild32M", "[!0r,!1d]" }, + { kX86Fild64M, kMem, IS_LOAD | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDF, 0x00, 0, 5, 0, 0, false }, "Fild64M", "[!0r,!1d]" }, + { kX86Fld32M, kMem, IS_LOAD | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xD9, 0x00, 0, 0, 0, 0, false }, "Fld32M", "[!0r,!1d]" }, + { kX86Fld64M, kMem, IS_LOAD | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 0, 0, 0, false }, "Fld64M", "[!0r,!1d]" }, + { kX86Fstp32M, kMem, IS_STORE | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xD9, 0x00, 0, 3, 0, 0, false }, "Fstps32M", "[!0r,!1d]" }, + { kX86Fstp64M, kMem, IS_STORE | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 3, 0, 0, false }, "Fstpd64M", "[!0r,!1d]" }, + { kX86Fst32M, kMem, IS_STORE | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xD9, 0x00, 0, 2, 0, 0, false }, "Fsts32M", "[!0r,!1d]" }, + { kX86Fst64M, kMem, IS_STORE | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 2, 0, 0, false }, "Fstd64M", "[!0r,!1d]" }, { kX86Fprem, kNullary, NO_OPERAND | USE_FP_STACK, { 0xD9, 0, 0xF8, 0, 0, 0, 0, 0, false }, "Fprem64", "" }, { kX86Fucompp, kNullary, NO_OPERAND | USE_FP_STACK, { 0xDA, 0, 0xE9, 0, 0, 0, 0, 0, false }, "Fucompp", "" }, { kX86Fstsw16R, kNullary, NO_OPERAND | REG_DEFA | USE_FP_STACK, { 0x9B, 0xDF, 0xE0, 0, 0, 0, 0, 0, false }, "Fstsw16R", "ax" }, @@ -680,7 +680,8 @@ size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int } if (displacement != 0 || LowRegisterBits(raw_base) == rs_rBP.GetRegNum()) { // BP requires an explicit displacement, even when it's 0. - if (entry->opcode != kX86Lea32RA && entry->opcode != kX86Lea64RA) { + if (entry->opcode != kX86Lea32RA && entry->opcode != kX86Lea64RA && + entry->opcode != kX86Lea32RM && entry->opcode != kX86Lea64RM) { DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), UINT64_C(0)) << entry->name; } size += IS_SIMM8(displacement) ? 1 : 4; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 1a24677261..72c5834a8c 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -351,6 +351,16 @@ static void Remove(HInstructionList* instruction_list, for (size_t i = 0; i < instruction->InputCount(); i++) { instruction->InputAt(i)->RemoveUser(instruction, i); } + + HEnvironment* environment = instruction->GetEnvironment(); + if (environment != nullptr) { + for (size_t i = 0, e = environment->Size(); i < e; ++i) { + HInstruction* vreg = environment->GetInstructionAt(i); + if (vreg != nullptr) { + vreg->RemoveEnvironmentUser(environment, i); + } + } + } } void HBasicBlock::RemoveInstruction(HInstruction* instruction) { @@ -361,13 +371,16 @@ void HBasicBlock::RemovePhi(HPhi* phi) { Remove(&phis_, this, phi); } -void HInstruction::RemoveUser(HInstruction* user, size_t input_index) { - HUseListNode<HInstruction>* previous = nullptr; - HUseListNode<HInstruction>* current = uses_; +template <typename T> +static void RemoveFromUseList(T* user, + size_t input_index, + HUseListNode<T>** list) { + HUseListNode<T>* previous = nullptr; + HUseListNode<T>* current = *list; while (current != nullptr) { if (current->GetUser() == user && current->GetIndex() == input_index) { if (previous == NULL) { - uses_ = current->GetTail(); + *list = current->GetTail(); } else { previous->SetTail(current->GetTail()); } @@ -377,6 +390,14 @@ void HInstruction::RemoveUser(HInstruction* user, size_t input_index) { } } +void HInstruction::RemoveUser(HInstruction* user, size_t input_index) { + RemoveFromUseList(user, input_index, &uses_); +} + +void HInstruction::RemoveEnvironmentUser(HEnvironment* user, size_t input_index) { + RemoveFromUseList(user, input_index, &env_uses_); +} + void HInstructionList::AddInstruction(HInstruction* instruction) { if (first_instruction_ == nullptr) { DCHECK(last_instruction_ == nullptr); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index af173c8087..47c8eda8d4 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -624,11 +624,13 @@ class HInstruction : public ArenaObject { } void AddEnvUseAt(HEnvironment* user, size_t index) { + DCHECK(user != nullptr); env_uses_ = new (block_->GetGraph()->GetArena()) HUseListNode<HEnvironment>( user, index, env_uses_); } void RemoveUser(HInstruction* user, size_t index); + void RemoveEnvironmentUser(HEnvironment* user, size_t index); HUseListNode<HInstruction>* GetUses() const { return uses_; } HUseListNode<HEnvironment>* GetEnvUses() const { return env_uses_; } diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc new file mode 100644 index 0000000000..b75bacb6ea --- /dev/null +++ b/compiler/optimizing/nodes_test.cc @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nodes.h" +#include "utils/arena_allocator.h" + +#include "gtest/gtest.h" + +namespace art { + +/** + * Test that removing instruction from the graph removes itself from user lists + * and environment lists. + */ +TEST(Node, RemoveInstruction) { + ArenaPool pool; + ArenaAllocator allocator(&pool); + + HGraph* graph = new (&allocator) HGraph(&allocator); + HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + graph->AddBlock(entry); + graph->SetEntryBlock(entry); + HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot); + entry->AddInstruction(parameter); + entry->AddInstruction(new (&allocator) HGoto()); + + HBasicBlock* first_block = new (&allocator) HBasicBlock(graph); + graph->AddBlock(first_block); + entry->AddSuccessor(first_block); + HInstruction* null_check = new (&allocator) HNullCheck(parameter, 0); + first_block->AddInstruction(null_check); + first_block->AddInstruction(new (&allocator) HReturnVoid()); + + HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph); + graph->AddBlock(exit_block); + first_block->AddSuccessor(exit_block); + exit_block->AddInstruction(new (&allocator) HExit()); + + HEnvironment* environment = new (&allocator) HEnvironment(&allocator, 1); + null_check->SetEnvironment(environment); + environment->SetRawEnvAt(0, parameter); + parameter->AddEnvUseAt(null_check->GetEnvironment(), 0); + + ASSERT_TRUE(parameter->HasEnvironmentUses()); + ASSERT_TRUE(parameter->HasUses()); + + first_block->RemoveInstruction(null_check); + + ASSERT_FALSE(parameter->HasEnvironmentUses()); + ASSERT_FALSE(parameter->HasUses()); +} + +} // namespace art diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index 786261121b..9ba75b8da4 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -796,6 +796,10 @@ void RegisterAllocator::InsertParallelMoveAt(size_t position, // This is a parallel move for connecting siblings in a same block. We need to // differentiate it with moves for connecting blocks, and input moves. if (previous->GetLifetimePosition() != position) { + // If the previous instruction of the previous instruction is not a parallel + // move, we have to insert the new parallel move before the input or connecting + // block moves. + at = previous; previous = previous->GetPrevious(); } } diff --git a/runtime/debugger.cc b/runtime/debugger.cc index e57133d016..a9c4b4a514 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -845,7 +845,9 @@ std::string Dbg::GetClassName(JDWP::RefTypeId class_id) { } std::string Dbg::GetClassName(mirror::Class* klass) { - DCHECK(klass != nullptr); + if (klass == nullptr) { + return "NULL"; + } std::string temp; return DescriptorToName(klass->GetDescriptor(&temp)); } @@ -1466,6 +1468,9 @@ bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location, } bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) { + if (event_class == nullptr) { + return false; + } JDWP::JdwpError error; mirror::Class* expected_class = DecodeClass(class_id, &error); CHECK(expected_class != nullptr); @@ -1490,7 +1495,7 @@ bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* eve void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (m == nullptr) { - memset(&location, 0, sizeof(*location)); + memset(location, 0, sizeof(*location)); } else { mirror::Class* c = m->GetDeclaringClass(); location->type_tag = GetTypeTag(c); @@ -1502,11 +1507,18 @@ void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, ui std::string Dbg::GetMethodName(JDWP::MethodId method_id) { mirror::ArtMethod* m = FromMethodId(method_id); + if (m == nullptr) { + return "NULL"; + } return m->GetName(); } std::string Dbg::GetFieldName(JDWP::FieldId field_id) { - return FromFieldId(field_id)->GetName(); + mirror::ArtField* f = FromFieldId(field_id); + if (f == nullptr) { + return "NULL"; + } + return f->GetName(); } /* @@ -2474,300 +2486,329 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame return JDWP::ERR_NONE; } -JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, - JDWP::JdwpTag tag, uint8_t* buf, size_t width) { - struct GetLocalVisitor : public StackVisitor { - GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context, - JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag), - buf_(buf), width_(width), error_(JDWP::ERR_NONE) {} - - // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses - // annotalysis. - bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { - if (GetFrameId() != frame_id_) { - return true; // Not our frame, carry on. - } - // TODO: check that the tag is compatible with the actual type of the slot! - // TODO: check slot is valid for this method or return INVALID_SLOT error. - mirror::ArtMethod* m = GetMethod(); - if (m->IsNative()) { - // We can't read local value from native method. - error_ = JDWP::ERR_OPAQUE_FRAME; - return false; - } - uint16_t reg = DemangleSlot(slot_, m); - constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; - switch (tag_) { - case JDWP::JT_BOOLEAN: { - CHECK_EQ(width_, 1U); - uint32_t intVal; - if (GetVReg(m, reg, kIntVReg, &intVal)) { - VLOG(jdwp) << "get boolean local " << reg << " = " << intVal; - JDWP::Set1(buf_+1, intVal != 0); - } else { - VLOG(jdwp) << "failed to get boolean local " << reg; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_BYTE: { - CHECK_EQ(width_, 1U); - uint32_t intVal; - if (GetVReg(m, reg, kIntVReg, &intVal)) { - VLOG(jdwp) << "get byte local " << reg << " = " << intVal; - JDWP::Set1(buf_+1, intVal); - } else { - VLOG(jdwp) << "failed to get byte local " << reg; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_SHORT: - case JDWP::JT_CHAR: { - CHECK_EQ(width_, 2U); - uint32_t intVal; - if (GetVReg(m, reg, kIntVReg, &intVal)) { - VLOG(jdwp) << "get short/char local " << reg << " = " << intVal; - JDWP::Set2BE(buf_+1, intVal); - } else { - VLOG(jdwp) << "failed to get short/char local " << reg; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_INT: { - CHECK_EQ(width_, 4U); - uint32_t intVal; - if (GetVReg(m, reg, kIntVReg, &intVal)) { - VLOG(jdwp) << "get int local " << reg << " = " << intVal; - JDWP::Set4BE(buf_+1, intVal); - } else { - VLOG(jdwp) << "failed to get int local " << reg; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_FLOAT: { - CHECK_EQ(width_, 4U); - uint32_t intVal; - if (GetVReg(m, reg, kFloatVReg, &intVal)) { - VLOG(jdwp) << "get float local " << reg << " = " << intVal; - JDWP::Set4BE(buf_+1, intVal); - } else { - VLOG(jdwp) << "failed to get float local " << reg; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_ARRAY: - case JDWP::JT_CLASS_LOADER: - case JDWP::JT_CLASS_OBJECT: - case JDWP::JT_OBJECT: - case JDWP::JT_STRING: - case JDWP::JT_THREAD: - case JDWP::JT_THREAD_GROUP: { - CHECK_EQ(width_, sizeof(JDWP::ObjectId)); - uint32_t intVal; - if (GetVReg(m, reg, kReferenceVReg, &intVal)) { - mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal); - VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o; - if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) { - LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o; - } - tag_ = TagFromObject(soa_, o); - JDWP::SetObjectId(buf_+1, gRegistry->Add(o)); - } else { - VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_DOUBLE: { - CHECK_EQ(width_, 8U); - uint64_t longVal; - if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) { - VLOG(jdwp) << "get double local " << reg << " = " << longVal; - JDWP::Set8BE(buf_+1, longVal); - } else { - VLOG(jdwp) << "failed to get double local " << reg; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_LONG: { - CHECK_EQ(width_, 8U); - uint64_t longVal; - if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) { - VLOG(jdwp) << "get long local " << reg << " = " << longVal; - JDWP::Set8BE(buf_+1, longVal); - } else { - VLOG(jdwp) << "failed to get long local " << reg; - error_ = kFailureErrorCode; - } - break; - } - default: - LOG(FATAL) << "Unknown tag " << tag_; - break; - } +// Walks the stack until we find the frame with the given FrameId. +class FindFrameVisitor FINAL : public StackVisitor { + public: + FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + : StackVisitor(thread, context), frame_id_(frame_id), error_(JDWP::ERR_INVALID_FRAMEID) {} - // Prepend tag, which may have been updated. - JDWP::Set1(buf_, tag_); - return false; + // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses + // annotalysis. + bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { + if (GetFrameId() != frame_id_) { + return true; // Not our frame, carry on. } - const ScopedObjectAccessUnchecked& soa_; - const JDWP::FrameId frame_id_; - const int slot_; - JDWP::JdwpTag tag_; - uint8_t* const buf_; - const size_t width_; - JDWP::JdwpError error_; - }; + mirror::ArtMethod* m = GetMethod(); + if (m->IsNative()) { + // We can't read/write local value from/into native method. + error_ = JDWP::ERR_OPAQUE_FRAME; + } else { + // We found our frame. + error_ = JDWP::ERR_NONE; + } + return false; + } + + JDWP::JdwpError GetError() const { + return error_; + } + + private: + const JDWP::FrameId frame_id_; + JDWP::JdwpError error_; +}; + +JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) { + JDWP::ObjectId thread_id = request->ReadThreadId(); + JDWP::FrameId frame_id = request->ReadFrameId(); ScopedObjectAccessUnchecked soa(Thread::Current()); - MutexLock mu(soa.Self(), *Locks::thread_list_lock_); - JDWP::JdwpError error; - Thread* thread = DecodeThread(soa, thread_id, &error); - if (error != JDWP::ERR_NONE) { - return error; + Thread* thread; + { + MutexLock mu(soa.Self(), *Locks::thread_list_lock_); + JDWP::JdwpError error; + thread = DecodeThread(soa, thread_id, &error); + if (error != JDWP::ERR_NONE) { + return error; + } } - // TODO check thread is suspended by the debugger ? + // Find the frame with the given frame_id. std::unique_ptr<Context> context(Context::Create()); - GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width); + FindFrameVisitor visitor(thread, context.get(), frame_id); visitor.WalkStack(); - return visitor.error_; -} + if (visitor.GetError() != JDWP::ERR_NONE) { + return visitor.GetError(); + } -JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, - JDWP::JdwpTag tag, uint64_t value, size_t width) { - struct SetLocalVisitor : public StackVisitor { - SetLocalVisitor(Thread* thread, Context* context, - JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value, - size_t width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : StackVisitor(thread, context), - frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width), - error_(JDWP::ERR_NONE) {} + // Read the values from visitor's context. + int32_t slot_count = request->ReadSigned32("slot count"); + expandBufAdd4BE(pReply, slot_count); /* "int values" */ + for (int32_t i = 0; i < slot_count; ++i) { + uint32_t slot = request->ReadUnsigned32("slot"); + JDWP::JdwpTag reqSigByte = request->ReadTag(); - // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses - // annotalysis. - bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { - if (GetFrameId() != frame_id_) { - return true; // Not our frame, carry on. + VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte; + + size_t width = Dbg::GetTagWidth(reqSigByte); + uint8_t* ptr = expandBufAddSpace(pReply, width+1); + JDWP::JdwpError error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width); + if (error != JDWP::ERR_NONE) { + return error; + } + } + return JDWP::ERR_NONE; +} + +JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa, + int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) { + mirror::ArtMethod* m = visitor.GetMethod(); + uint16_t reg = DemangleSlot(slot, m); + // TODO: check that the tag is compatible with the actual type of the slot! + // TODO: check slot is valid for this method or return INVALID_SLOT error. + constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; + switch (tag) { + case JDWP::JT_BOOLEAN: { + CHECK_EQ(width, 1U); + uint32_t intVal; + if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) { + VLOG(jdwp) << "get boolean local " << reg << " = " << intVal; + JDWP::Set1(buf + 1, intVal != 0); + } else { + VLOG(jdwp) << "failed to get boolean local " << reg; + return kFailureErrorCode; } - // TODO: check that the tag is compatible with the actual type of the slot! - // TODO: check slot is valid for this method or return INVALID_SLOT error. - mirror::ArtMethod* m = GetMethod(); - if (m->IsNative()) { - // We can't read local value from native method. - error_ = JDWP::ERR_OPAQUE_FRAME; - return false; + break; + } + case JDWP::JT_BYTE: { + CHECK_EQ(width, 1U); + uint32_t intVal; + if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) { + VLOG(jdwp) << "get byte local " << reg << " = " << intVal; + JDWP::Set1(buf + 1, intVal); + } else { + VLOG(jdwp) << "failed to get byte local " << reg; + return kFailureErrorCode; } - uint16_t reg = DemangleSlot(slot_, m); - constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; - switch (tag_) { - case JDWP::JT_BOOLEAN: - case JDWP::JT_BYTE: - CHECK_EQ(width_, 1U); - if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { - VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = " - << static_cast<uint32_t>(value_); - error_ = kFailureErrorCode; - } - break; - case JDWP::JT_SHORT: - case JDWP::JT_CHAR: - CHECK_EQ(width_, 2U); - if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { - VLOG(jdwp) << "failed to set short/char local " << reg << " = " - << static_cast<uint32_t>(value_); - error_ = kFailureErrorCode; - } - break; - case JDWP::JT_INT: - CHECK_EQ(width_, 4U); - if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { - VLOG(jdwp) << "failed to set int local " << reg << " = " - << static_cast<uint32_t>(value_); - error_ = kFailureErrorCode; - } - break; - case JDWP::JT_FLOAT: - CHECK_EQ(width_, 4U); - if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) { - VLOG(jdwp) << "failed to set float local " << reg << " = " - << static_cast<uint32_t>(value_); - error_ = kFailureErrorCode; - } - break; - case JDWP::JT_ARRAY: - case JDWP::JT_CLASS_LOADER: - case JDWP::JT_CLASS_OBJECT: - case JDWP::JT_OBJECT: - case JDWP::JT_STRING: - case JDWP::JT_THREAD: - case JDWP::JT_THREAD_GROUP: { - CHECK_EQ(width_, sizeof(JDWP::ObjectId)); - JDWP::JdwpError error; - mirror::Object* o = - gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_), &error); - if (error != JDWP::ERR_NONE) { - VLOG(jdwp) << tag_ << " object " << value_ << " is an invalid object"; - error_ = JDWP::ERR_INVALID_OBJECT; - } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)), - kReferenceVReg)) { - VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_DOUBLE: { - CHECK_EQ(width_, 8U); - bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg); - if (!success) { - VLOG(jdwp) << "failed to set double local " << reg << " = " << value_; - error_ = kFailureErrorCode; - } - break; - } - case JDWP::JT_LONG: { - CHECK_EQ(width_, 8U); - bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg); - if (!success) { - VLOG(jdwp) << "failed to set double local " << reg << " = " << value_; - error_ = kFailureErrorCode; - } - break; + break; + } + case JDWP::JT_SHORT: + case JDWP::JT_CHAR: { + CHECK_EQ(width, 2U); + uint32_t intVal; + if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) { + VLOG(jdwp) << "get short/char local " << reg << " = " << intVal; + JDWP::Set2BE(buf + 1, intVal); + } else { + VLOG(jdwp) << "failed to get short/char local " << reg; + return kFailureErrorCode; + } + break; + } + case JDWP::JT_INT: { + CHECK_EQ(width, 4U); + uint32_t intVal; + if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) { + VLOG(jdwp) << "get int local " << reg << " = " << intVal; + JDWP::Set4BE(buf + 1, intVal); + } else { + VLOG(jdwp) << "failed to get int local " << reg; + return kFailureErrorCode; + } + break; + } + case JDWP::JT_FLOAT: { + CHECK_EQ(width, 4U); + uint32_t intVal; + if (visitor.GetVReg(m, reg, kFloatVReg, &intVal)) { + VLOG(jdwp) << "get float local " << reg << " = " << intVal; + JDWP::Set4BE(buf + 1, intVal); + } else { + VLOG(jdwp) << "failed to get float local " << reg; + return kFailureErrorCode; + } + break; + } + case JDWP::JT_ARRAY: + case JDWP::JT_CLASS_LOADER: + case JDWP::JT_CLASS_OBJECT: + case JDWP::JT_OBJECT: + case JDWP::JT_STRING: + case JDWP::JT_THREAD: + case JDWP::JT_THREAD_GROUP: { + CHECK_EQ(width, sizeof(JDWP::ObjectId)); + uint32_t intVal; + if (visitor.GetVReg(m, reg, kReferenceVReg, &intVal)) { + mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal); + VLOG(jdwp) << "get " << tag << " object local " << reg << " = " << o; + if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) { + LOG(FATAL) << "Register " << reg << " expected to hold " << tag << " object: " << o; } - default: - LOG(FATAL) << "Unknown tag " << tag_; - break; + tag = TagFromObject(soa, o); + JDWP::SetObjectId(buf + 1, gRegistry->Add(o)); + } else { + VLOG(jdwp) << "failed to get " << tag << " object local " << reg; + return kFailureErrorCode; } - return false; + break; + } + case JDWP::JT_DOUBLE: { + CHECK_EQ(width, 8U); + uint64_t longVal; + if (visitor.GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) { + VLOG(jdwp) << "get double local " << reg << " = " << longVal; + JDWP::Set8BE(buf + 1, longVal); + } else { + VLOG(jdwp) << "failed to get double local " << reg; + return kFailureErrorCode; + } + break; + } + case JDWP::JT_LONG: { + CHECK_EQ(width, 8U); + uint64_t longVal; + if (visitor.GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) { + VLOG(jdwp) << "get long local " << reg << " = " << longVal; + JDWP::Set8BE(buf + 1, longVal); + } else { + VLOG(jdwp) << "failed to get long local " << reg; + return kFailureErrorCode; + } + break; } + default: + LOG(FATAL) << "Unknown tag " << tag; + break; + } - const JDWP::FrameId frame_id_; - const int slot_; - const JDWP::JdwpTag tag_; - const uint64_t value_; - const size_t width_; - JDWP::JdwpError error_; - }; + // Prepend tag, which may have been updated. + JDWP::Set1(buf, tag); + return JDWP::ERR_NONE; +} + +JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) { + JDWP::ObjectId thread_id = request->ReadThreadId(); + JDWP::FrameId frame_id = request->ReadFrameId(); ScopedObjectAccessUnchecked soa(Thread::Current()); - MutexLock mu(soa.Self(), *Locks::thread_list_lock_); - JDWP::JdwpError error; - Thread* thread = DecodeThread(soa, thread_id, &error); - if (error != JDWP::ERR_NONE) { - return error; + Thread* thread; + { + MutexLock mu(soa.Self(), *Locks::thread_list_lock_); + JDWP::JdwpError error; + thread = DecodeThread(soa, thread_id, &error); + if (error != JDWP::ERR_NONE) { + return error; + } } - // TODO check thread is suspended by the debugger ? + // Find the frame with the given frame_id. std::unique_ptr<Context> context(Context::Create()); - SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width); + FindFrameVisitor visitor(thread, context.get(), frame_id); visitor.WalkStack(); - return visitor.error_; + if (visitor.GetError() != JDWP::ERR_NONE) { + return visitor.GetError(); + } + + // Writes the values into visitor's context. + int32_t slot_count = request->ReadSigned32("slot count"); + for (int32_t i = 0; i < slot_count; ++i) { + uint32_t slot = request->ReadUnsigned32("slot"); + JDWP::JdwpTag sigByte = request->ReadTag(); + size_t width = Dbg::GetTagWidth(sigByte); + uint64_t value = request->ReadValue(width); + + VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value; + JDWP::JdwpError error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width); + if (error != JDWP::ERR_NONE) { + return error; + } + } + return JDWP::ERR_NONE; +} + +JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag, + uint64_t value, size_t width) { + mirror::ArtMethod* m = visitor.GetMethod(); + uint16_t reg = DemangleSlot(slot, m); + // TODO: check that the tag is compatible with the actual type of the slot! + // TODO: check slot is valid for this method or return INVALID_SLOT error. + constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; + switch (tag) { + case JDWP::JT_BOOLEAN: + case JDWP::JT_BYTE: + CHECK_EQ(width, 1U); + if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) { + VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = " + << static_cast<uint32_t>(value); + return kFailureErrorCode; + } + break; + case JDWP::JT_SHORT: + case JDWP::JT_CHAR: + CHECK_EQ(width, 2U); + if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) { + VLOG(jdwp) << "failed to set short/char local " << reg << " = " + << static_cast<uint32_t>(value); + return kFailureErrorCode; + } + break; + case JDWP::JT_INT: + CHECK_EQ(width, 4U); + if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) { + VLOG(jdwp) << "failed to set int local " << reg << " = " + << static_cast<uint32_t>(value); + return kFailureErrorCode; + } + break; + case JDWP::JT_FLOAT: + CHECK_EQ(width, 4U); + if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kFloatVReg)) { + VLOG(jdwp) << "failed to set float local " << reg << " = " + << static_cast<uint32_t>(value); + return kFailureErrorCode; + } + break; + case JDWP::JT_ARRAY: + case JDWP::JT_CLASS_LOADER: + case JDWP::JT_CLASS_OBJECT: + case JDWP::JT_OBJECT: + case JDWP::JT_STRING: + case JDWP::JT_THREAD: + case JDWP::JT_THREAD_GROUP: { + CHECK_EQ(width, sizeof(JDWP::ObjectId)); + JDWP::JdwpError error; + mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value), + &error); + if (error != JDWP::ERR_NONE) { + VLOG(jdwp) << tag << " object " << o << " is an invalid object"; + return JDWP::ERR_INVALID_OBJECT; + } else if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)), + kReferenceVReg)) { + VLOG(jdwp) << "failed to set " << tag << " object local " << reg << " = " << o; + return kFailureErrorCode; + } + break; + } + case JDWP::JT_DOUBLE: { + CHECK_EQ(width, 8U); + if (!visitor.SetVRegPair(m, reg, value, kDoubleLoVReg, kDoubleHiVReg)) { + VLOG(jdwp) << "failed to set double local " << reg << " = " << value; + return kFailureErrorCode; + } + break; + } + case JDWP::JT_LONG: { + CHECK_EQ(width, 8U); + if (!visitor.SetVRegPair(m, reg, value, kLongLoVReg, kLongHiVReg)) { + VLOG(jdwp) << "failed to set double local " << reg << " = " << value; + return kFailureErrorCode; + } + break; + } + default: + LOG(FATAL) << "Unknown tag " << tag; + break; + } + return JDWP::ERR_NONE; } static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc) @@ -3124,6 +3165,8 @@ static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m) // should never be null. We could just check we never encounter this case. return false; } + // Note: method verifier may cause thread suspension. + self->AssertThreadSuspensionIsAllowable(); StackHandleScope<3> hs(self); mirror::Class* declaring_class = m->GetDeclaringClass(); Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache())); @@ -3149,20 +3192,18 @@ static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m) // Sanity checks all existing breakpoints on the same method. static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) { - if (kIsDebugBuild) { - for (const Breakpoint& breakpoint : gBreakpoints) { - CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization()); - } - if (need_full_deoptimization) { - // We should have deoptimized everything but not "selectively" deoptimized this method. - CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized()); - CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); - } else { - // We should have "selectively" deoptimized this method. - // Note: while we have not deoptimized everything for this method, we may have done it for - // another event. - CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); - } + for (const Breakpoint& breakpoint : gBreakpoints) { + CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization()); + } + if (need_full_deoptimization) { + // We should have deoptimized everything but not "selectively" deoptimized this method. + CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized()); + CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); + } else { + // We should have "selectively" deoptimized this method. + // Note: while we have not deoptimized everything for this method, we may have done it for + // another event. + CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); } } @@ -3173,12 +3214,17 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques mirror::ArtMethod* m = FromMethodId(location->method_id); DCHECK(m != nullptr) << "No method for method id " << location->method_id; - WriterMutexLock mu(self, *Locks::breakpoint_lock_); - const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m); + const Breakpoint* existing_breakpoint; + { + ReaderMutexLock mu(self, *Locks::breakpoint_lock_); + existing_breakpoint = FindFirstBreakpointForMethod(m); + } bool need_full_deoptimization; if (existing_breakpoint == nullptr) { // There is no breakpoint on this method yet: we need to deoptimize. If this method may be // inlined, we deoptimize everything; otherwise we deoptimize only this method. + // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension. + // Therefore we must not hold any lock when we call it. need_full_deoptimization = IsMethodPossiblyInlined(self, m); if (need_full_deoptimization) { req->SetKind(DeoptimizationRequest::kFullDeoptimization); @@ -3193,12 +3239,18 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques req->SetMethod(nullptr); need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization(); - SanityCheckExistingBreakpoints(m, need_full_deoptimization); + if (kIsDebugBuild) { + ReaderMutexLock mu(self, *Locks::breakpoint_lock_); + SanityCheckExistingBreakpoints(m, need_full_deoptimization); + } } - gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization)); - VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " - << gBreakpoints[gBreakpoints.size() - 1]; + { + WriterMutexLock mu(self, *Locks::breakpoint_lock_); + gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization)); + VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " + << gBreakpoints[gBreakpoints.size() - 1]; + } } // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization @@ -3233,7 +3285,9 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ // There is at least one breakpoint for this method: we don't need to undeoptimize. req->SetKind(DeoptimizationRequest::kNothing); req->SetMethod(nullptr); - SanityCheckExistingBreakpoints(m, need_full_deoptimization); + if (kIsDebugBuild) { + SanityCheckExistingBreakpoints(m, need_full_deoptimization); + } } } diff --git a/runtime/debugger.h b/runtime/debugger.h index 97985ec649..cb7adae47a 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -45,6 +45,7 @@ class Throwable; class AllocRecord; class ObjectRegistry; class ScopedObjectAccessUnchecked; +class StackVisitor; class Thread; class ThrowLocation; @@ -475,12 +476,10 @@ class Dbg { LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static JDWP::JdwpError GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, - JDWP::JdwpTag tag, uint8_t* buf, size_t expectedLen) + static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) LOCKS_EXCLUDED(Locks::thread_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static JDWP::JdwpError SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, - JDWP::JdwpTag tag, uint64_t value, size_t width) + static JDWP::JdwpError SetLocalValues(JDWP::Request* request) LOCKS_EXCLUDED(Locks::thread_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -641,6 +640,16 @@ class Dbg { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: + static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor, + ScopedObjectAccessUnchecked& soa, int slot, + JDWP::JdwpTag tag, uint8_t* buf, size_t width) + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static JDWP::JdwpError SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag, + uint64_t value, size_t width) + LOCKS_EXCLUDED(Locks::thread_list_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void PostThreadStartOrStop(Thread*, uint32_t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h index 6a9976acb3..05214a4600 100644 --- a/runtime/dex_instruction_list.h +++ b/runtime/dex_instruction_list.h @@ -122,7 +122,7 @@ V(0x65, SGET_CHAR, "sget-char", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \ V(0x66, SGET_SHORT, "sget-short", k21c, true, kFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \ V(0x67, SPUT, "sput", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \ - V(0x68, SPUT_WIDE, "sput-wide", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \ + V(0x68, SPUT_WIDE, "sput-wide", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \ V(0x69, SPUT_OBJECT, "sput-object", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \ V(0x6A, SPUT_BOOLEAN, "sput-boolean", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \ V(0x6B, SPUT_BYTE, "sput-byte", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \ diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index d61660bca7..7c8c63ce46 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -1125,12 +1125,19 @@ bool JdwpState::PostException(const EventLocation* pThrowLoc, mirror::Throwable* DCHECK(exception_object != nullptr); DCHECK(pThrowLoc != nullptr); DCHECK(pCatchLoc != nullptr); - DCHECK(pThrowLoc->method != nullptr); - DCHECK_EQ(pThrowLoc->method->IsStatic(), thisPtr == nullptr); + if (pThrowLoc->method != nullptr) { + DCHECK_EQ(pThrowLoc->method->IsStatic(), thisPtr == nullptr); + } else { + VLOG(jdwp) << "Unexpected: exception event with empty throw location"; + } ModBasket basket; basket.pLoc = pThrowLoc; - basket.locationClass = pThrowLoc->method->GetDeclaringClass(); + if (pThrowLoc->method != nullptr) { + basket.locationClass = pThrowLoc->method->GetDeclaringClass(); + } else { + basket.locationClass = nullptr; + } basket.thread = Thread::Current(); basket.className = Dbg::GetClassName(basket.locationClass); basket.exceptionClass = exception_object->GetClass(); diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index e0a83f607b..16a774fdf0 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -1385,26 +1385,7 @@ static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*) */ static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectId thread_id = request->ReadThreadId(); - FrameId frame_id = request->ReadFrameId(); - int32_t slot_count = request->ReadSigned32("slot count"); - - expandBufAdd4BE(pReply, slot_count); /* "int values" */ - for (int32_t i = 0; i < slot_count; ++i) { - uint32_t slot = request->ReadUnsigned32("slot"); - JDWP::JdwpTag reqSigByte = request->ReadTag(); - - VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte; - - size_t width = Dbg::GetTagWidth(reqSigByte); - uint8_t* ptr = expandBufAddSpace(pReply, width+1); - JdwpError error = Dbg::GetLocalValue(thread_id, frame_id, slot, reqSigByte, ptr, width); - if (error != ERR_NONE) { - return error; - } - } - - return ERR_NONE; + return Dbg::GetLocalValues(request, pReply); } /* @@ -1412,24 +1393,7 @@ static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) */ static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectId thread_id = request->ReadThreadId(); - FrameId frame_id = request->ReadFrameId(); - int32_t slot_count = request->ReadSigned32("slot count"); - - for (int32_t i = 0; i < slot_count; ++i) { - uint32_t slot = request->ReadUnsigned32("slot"); - JDWP::JdwpTag sigByte = request->ReadTag(); - size_t width = Dbg::GetTagWidth(sigByte); - uint64_t value = request->ReadValue(width); - - VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value; - JdwpError error = Dbg::SetLocalValue(thread_id, frame_id, slot, sigByte, value, width); - if (error != ERR_NONE) { - return error; - } - } - - return ERR_NONE; + return Dbg::SetLocalValues(request); } static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply) diff --git a/runtime/thread.cc b/runtime/thread.cc index 650b0f95d4..ae89c90dc4 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -931,6 +931,13 @@ static bool ShouldShowNativeStack(const Thread* thread) return false; } + // Threads with no managed stack frames should be shown. + const ManagedStack* managed_stack = thread->GetManagedStack(); + if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL && + managed_stack->GetTopShadowFrame() == NULL)) { + return true; + } + // In some other native method? That's interesting. // We don't just check kNative because native methods will be in state kSuspended if they're // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the diff --git a/test/408-move-bug/expected.txt b/test/408-move-bug/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/408-move-bug/expected.txt diff --git a/test/408-move-bug/info.txt b/test/408-move-bug/info.txt new file mode 100644 index 0000000000..27a3dbc1f5 --- /dev/null +++ b/test/408-move-bug/info.txt @@ -0,0 +1,2 @@ +Regression test for the register allocator in the optimizing +compiler. Input moves where being overridden by sibling moves. diff --git a/test/408-move-bug/src/Main.java b/test/408-move-bug/src/Main.java new file mode 100644 index 0000000000..420298b8c6 --- /dev/null +++ b/test/408-move-bug/src/Main.java @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + public static void main(String[] args) { + crash(); + npe(); + } + + static void crash() { + boolean b = baz(); + // Create many objects to starve registers. + Main foo1 = create(); + Main foo2 = create(); + Main foo3 = create(); + Main foo4 = create(); + foo1.otherField = null; + // On X86, we would force b to be in a byte register, which + // would generate moves. This code exposed a bug in the + // register allocator, where an input move was not just before + // the instruction itself, and its destination was overridden + // by another value. + foo1.field = b; + foo2.field = b; + foo3.field = b; + foo4.field = b; + foo1.lastField = b; + } + + // Similar to `crash` but generated an NPE. + static void npe() { + boolean b = baz(); + Main foo1 = create(); + Main foo2 = create(); + Main foo3 = create(); + Main foo4 = create(); + foo1.field = b; + foo2.field = b; + foo3.field = b; + foo4.field = b; + foo1.lastField = b; + } + + static Main create() { + return new Main(); + } + + static boolean baz() { + return false; + } + + boolean field; + Object otherField; + boolean lastField; +} |