diff options
author | 2016-02-09 14:30:11 +0000 | |
---|---|---|
committer | 2016-02-24 10:21:57 +0000 | |
commit | c7098ff991bb4e00a800d315d1c36f52a9cb0149 (patch) | |
tree | 3a150e927bc7f4894f5b148ec6f5a2b796cdd80d /compiler | |
parent | 5322e55ab9a15996a197456ca39d9c77488cd5c1 (diff) |
Remove HNativeDebugInfo from start of basic blocks.
We do not require full environment at the start of basic block.
The dex pc contained in basic block is sufficient for line mapping.
Change-Id: I5ba9e5f5acbc4a783ad544769f9a73bb33e2bafa
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/optimizing/builder.cc | 17 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.cc | 17 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 2 |
15 files changed, 65 insertions, 44 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 05e1356ed8..35ec7d41ff 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -368,7 +368,6 @@ GraphAnalysisResult HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item if (native_debuggable) { const uint32_t num_instructions = code_item.insns_size_in_code_units_; native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false); - native_debug_info_locations->ClearAllBits(); FindNativeDebugInfoLocations(code_item, native_debug_info_locations); } @@ -443,23 +442,15 @@ void HGraphBuilder::FindNativeDebugInfoLocations(const DexFile::CodeItem& code_i } }; dex_file_->DecodeDebugPositionInfo(&code_item, Callback::Position, locations); - // Add native debug info at the start of every basic block. - for (uint32_t pc = 0; pc < code_item.insns_size_in_code_units_; pc++) { - if (FindBlockStartingAt(pc) != nullptr) { - locations->SetBit(pc); - } - } // Instruction-specific tweaks. const Instruction* const begin = Instruction::At(code_item.insns_); const Instruction* const end = begin->RelativeAt(code_item.insns_size_in_code_units_); for (const Instruction* inst = begin; inst < end; inst = inst->Next()) { switch (inst->Opcode()) { - case Instruction::MOVE_EXCEPTION: - case Instruction::MOVE_RESULT: - case Instruction::MOVE_RESULT_WIDE: - case Instruction::MOVE_RESULT_OBJECT: { - // The compiler checks that there are no instructions before those. - // So generate HNativeDebugInfo after them instead. + case Instruction::MOVE_EXCEPTION: { + // Stop in native debugger after the exception has been moved. + // The compiler also expects the move at the start of basic block so + // we do not want to interfere by inserting native-debug-info before it. locations->ClearBit(inst->GetDexPc(code_item.insns_)); const Instruction* next = inst->Next(); if (next < end) { diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index c2c8ccfc56..c67efc06c1 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -226,6 +226,10 @@ void CodeGenerator::Compile(CodeAllocator* allocator) { // errors where we reference that label. if (block->IsSingleJump()) continue; Bind(block); + // This ensures that we have correct native line mapping for all native instructions. + // It is necessary to make stepping over a statement work. Otherwise, any initial + // instructions (e.g. moves) would be assumed to be the start of next statement. + MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc()); for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); DisassemblyScope disassembly_scope(current, *this); @@ -733,7 +737,8 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t native_pc = GetAssembler()->CodeSize(); if (instruction == nullptr) { - // For stack overflow checks. + // For stack overflow checks and native-debug-info entries without dex register + // mapping (i.e. start of basic block or start of slow path). stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0); stack_map_stream_.EndStackMapEntry(); return; @@ -808,6 +813,16 @@ bool CodeGenerator::HasStackMapAtCurrentPc() { return count > 0 && stack_map_stream_.GetStackMap(count - 1).native_pc_offset == pc; } +void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, uint32_t dex_pc) { + if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) { + if (HasStackMapAtCurrentPc()) { + // Ensure that we do not collide with the stack map of the previous instruction. + GenerateNop(); + } + RecordPcInfo(instruction, dex_pc); + } +} + void CodeGenerator::RecordCatchBlockInfo() { ArenaAllocator* arena = graph_->GetArena(); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 49c193e7bf..789bf4019f 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -267,6 +267,8 @@ class CodeGenerator { void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr); // Check whether we have already recorded mapping at this PC. bool HasStackMapAtCurrentPc(); + // Record extra stack maps if we support native debugging. + void MaybeRecordNativeDebugInfo(HInstruction* instruction, uint32_t dex_pc); bool CanMoveNullCheckToUser(HNullCheck* null_check); void MaybeRecordImplicitNullCheck(HInstruction* instruction); @@ -440,6 +442,8 @@ class CodeGenerator { // Copy the result of a call into the given target. virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0; + virtual void GenerateNop() = 0; + protected: // Method patch info used for recording locations of required linker patches and // target methods. The target method can be used for various purposes, whether for diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 87f52c6f21..f60c5e9d86 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1557,11 +1557,11 @@ void LocationsBuilderARM::VisitNativeDebugInfo(HNativeDebugInfo* info) { } void InstructionCodeGeneratorARM::VisitNativeDebugInfo(HNativeDebugInfo* info) { - if (codegen_->HasStackMapAtCurrentPc()) { - // Ensure that we do not collide with the stack map of the previous instruction. - __ nop(); - } - codegen_->RecordPcInfo(info, info->GetDexPc()); + codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc()); +} + +void CodeGeneratorARM::GenerateNop() { + __ nop(); } void LocationsBuilderARM::HandleCondition(HCondition* cond) { diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index cfd7a3bc14..2e4dc1e014 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -510,6 +510,8 @@ class CodeGeneratorARM : public CodeGenerator { // artReadBarrierForRootSlow. void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); + void GenerateNop(); + private: // Factored implementation of GenerateFieldLoadWithBakerReadBarrier // and GenerateArrayLoadWithBakerReadBarrier. diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 435ae5e954..0c2e9cf373 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -3057,11 +3057,11 @@ void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) { } void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) { - if (codegen_->HasStackMapAtCurrentPc()) { - // Ensure that we do not collide with the stack map of the previous instruction. - __ Nop(); - } - codegen_->RecordPcInfo(info, info->GetDexPc()); + codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc()); +} + +void CodeGeneratorARM64::GenerateNop() { + __ Nop(); } void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 360488eb4a..fea87ab6dc 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -532,6 +532,8 @@ class CodeGeneratorARM64 : public CodeGenerator { // artReadBarrierForRootSlow. void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); + void GenerateNop(); + private: // Factored implementation of GenerateFieldLoadWithBakerReadBarrier // and GenerateArrayLoadWithBakerReadBarrier. diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 3eda8639c1..23ca703b89 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3406,11 +3406,11 @@ void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) { } void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) { - if (codegen_->HasStackMapAtCurrentPc()) { - // Ensure that we do not collide with the stack map of the previous instruction. - __ Nop(); - } - codegen_->RecordPcInfo(info, info->GetDexPc()); + codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc()); +} + +void CodeGeneratorMIPS::GenerateNop() { + __ Nop(); } void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 12964b0b6a..752bf3b986 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -363,6 +363,8 @@ class CodeGeneratorMIPS : public CodeGenerator { UNIMPLEMENTED(FATAL) << "Not implemented on MIPS"; } + void GenerateNop(); + private: // Labels for each block that will be compiled. MipsLabel* block_labels_; diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 119084e026..db85dbeba6 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2733,11 +2733,11 @@ void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) { } void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) { - if (codegen_->HasStackMapAtCurrentPc()) { - // Ensure that we do not collide with the stack map of the previous instruction. - __ Nop(); - } - codegen_->RecordPcInfo(info, info->GetDexPc()); + codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc()); +} + +void CodeGeneratorMIPS64::GenerateNop() { + __ Nop(); } void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction, diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index 1161253792..1ba44dfc54 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -352,6 +352,8 @@ class CodeGeneratorMIPS64 : public CodeGenerator { UNIMPLEMENTED(FATAL); } + void GenerateNop(); + private: // Labels for each block that will be compiled. Mips64Label* block_labels_; // Indexed by block id. diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 07edd97c1f..143dad8085 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1536,11 +1536,11 @@ void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) { } void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo* info) { - if (codegen_->HasStackMapAtCurrentPc()) { - // Ensure that we do not collide with the stack map of the previous instruction. - __ nop(); - } - codegen_->RecordPcInfo(info, info->GetDexPc()); + codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc()); +} + +void CodeGeneratorX86::GenerateNop() { + __ nop(); } void LocationsBuilderX86::VisitLocal(HLocal* local) { diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 2fb6d60ad5..858fe17e72 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -539,6 +539,7 @@ class CodeGeneratorX86 : public CodeGenerator { } } + void GenerateNop(); private: // Factored implementation of GenerateFieldLoadWithBakerReadBarrier diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index a53a6be3de..e79c1fb227 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -1632,11 +1632,11 @@ void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) { } void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) { - if (codegen_->HasStackMapAtCurrentPc()) { - // Ensure that we do not collide with the stack map of the previous instruction. - __ nop(); - } - codegen_->RecordPcInfo(info, info->GetDexPc()); + codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc()); +} + +void CodeGeneratorX86_64::GenerateNop() { + __ nop(); } void LocationsBuilderX86_64::VisitLocal(HLocal* local) { diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 97f6f84236..b3d27e194a 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -513,6 +513,8 @@ class CodeGeneratorX86_64 : public CodeGenerator { } } + void GenerateNop(); + private: // Factored implementation of GenerateFieldLoadWithBakerReadBarrier // and GenerateArrayLoadWithBakerReadBarrier. |