summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_mips.cc100
-rw-r--r--compiler/optimizing/code_generator_mips64.cc40
-rw-r--r--compiler/optimizing/instruction_simplifier.cc26
-rw-r--r--compiler/optimizing/intrinsics_arm.cc3
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc6
-rw-r--r--compiler/optimizing/nodes.h12
-rw-r--r--compiler/optimizing/optimizing_compiler.cc38
-rw-r--r--compiler/optimizing/register_allocator.cc7
8 files changed, 169 insertions, 63 deletions
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 8d3e863d82..75f5fb3bab 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1508,7 +1508,7 @@ void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction)
}
void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
Primitive::Type type = instr->GetResultType();
@@ -1531,7 +1531,7 @@ void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = instr->GetLocations();
Primitive::Type type = instr->GetType();
@@ -1541,28 +1541,49 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
uint32_t shift_value = rhs_imm & shift_mask;
- // Is the INS (Insert Bit Field) instruction supported?
- bool has_ins = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+ // Are the INS (Insert Bit Field) and ROTR instructions supported?
+ bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
switch (type) {
case Primitive::kPrimInt: {
Register dst = locations->Out().AsRegister<Register>();
Register lhs = locations->InAt(0).AsRegister<Register>();
if (use_imm) {
- if (instr->IsShl()) {
+ if (shift_value == 0) {
+ if (dst != lhs) {
+ __ Move(dst, lhs);
+ }
+ } else if (instr->IsShl()) {
__ Sll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Sra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst, lhs, shift_value);
+ } else {
+ if (has_ins_rotr) {
+ __ Rotr(dst, lhs, shift_value);
+ } else {
+ __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
+ __ Srl(dst, lhs, shift_value);
+ __ Or(dst, dst, TMP);
+ }
}
} else {
if (instr->IsShl()) {
__ Sllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Srav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst, lhs, rhs_reg);
+ } else {
+ if (has_ins_rotr) {
+ __ Rotrv(dst, lhs, rhs_reg);
+ } else {
+ __ Subu(TMP, ZERO, rhs_reg);
+ __ Sllv(TMP, lhs, TMP);
+ __ Srlv(dst, lhs, rhs_reg);
+ __ Or(dst, dst, TMP);
+ }
}
}
break;
@@ -1577,7 +1598,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
if (shift_value == 0) {
codegen_->Move64(locations->Out(), locations->InAt(0));
} else if (shift_value < kMipsBitsPerWord) {
- if (has_ins) {
+ if (has_ins_rotr) {
if (instr->IsShl()) {
__ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
__ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
@@ -1586,10 +1607,15 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Srl(dst_low, lhs_low, shift_value);
__ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
__ Sra(dst_high, lhs_high, shift_value);
+ } else if (instr->IsUShr()) {
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+ __ Srl(dst_high, lhs_high, shift_value);
} else {
__ Srl(dst_low, lhs_low, shift_value);
__ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
__ Srl(dst_high, lhs_high, shift_value);
+ __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
}
} else {
if (instr->IsShl()) {
@@ -1602,11 +1628,18 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
__ Srl(dst_low, lhs_low, shift_value);
__ Or(dst_low, dst_low, TMP);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst_high, lhs_high, shift_value);
__ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
__ Srl(dst_low, lhs_low, shift_value);
__ Or(dst_low, dst_low, TMP);
+ } else {
+ __ Srl(TMP, lhs_low, shift_value);
+ __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ __ Srl(TMP, lhs_high, shift_value);
+ __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Or(dst_high, dst_high, TMP);
}
}
} else {
@@ -1617,9 +1650,29 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
} else if (instr->IsShr()) {
__ Sra(dst_low, lhs_high, shift_value);
__ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst_low, lhs_high, shift_value);
__ Move(dst_high, ZERO);
+ } else {
+ if (shift_value == 0) {
+ // 64-bit rotation by 32 is just a swap.
+ __ Move(dst_low, lhs_high);
+ __ Move(dst_high, lhs_low);
+ } else {
+ if (has_ins_rotr) {
+ __ Srl(dst_low, lhs_high, shift_value);
+ __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
+ __ Srl(dst_high, lhs_low, shift_value);
+ __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+ } else {
+ __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_high, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_high, lhs_low, shift_value);
+ __ Or(dst_high, dst_high, TMP);
+ }
+ }
}
}
} else {
@@ -1646,7 +1699,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Beqz(TMP, &done);
__ Move(dst_low, dst_high);
__ Sra(dst_high, dst_high, 31);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst_high, lhs_high, rhs_reg);
__ Nor(AT, ZERO, rhs_reg);
__ Sll(TMP, lhs_high, 1);
@@ -1657,6 +1710,21 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Beqz(TMP, &done);
__ Move(dst_low, dst_high);
__ Move(dst_high, ZERO);
+ } else {
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Srlv(TMP, lhs_low, rhs_reg);
+ __ Sll(dst_low, lhs_high, 1);
+ __ Sllv(dst_low, dst_low, AT);
+ __ Or(dst_low, dst_low, TMP);
+ __ Srlv(TMP, lhs_high, rhs_reg);
+ __ Sll(dst_high, lhs_low, 1);
+ __ Sllv(dst_high, dst_high, AT);
+ __ Or(dst_high, dst_high, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(TMP, dst_high);
+ __ Move(dst_high, dst_low);
+ __ Move(dst_low, TMP);
}
__ Bind(&done);
}
@@ -4536,14 +4604,12 @@ void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UN
codegen_->GenerateFrameExit();
}
-void LocationsBuilderMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void LocationsBuilderMIPS::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
-void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
void LocationsBuilderMIPS::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 8c4f0c6c0d..abfaae4b50 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1247,7 +1247,7 @@ void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instructio
}
void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
Primitive::Type type = instr->GetResultType();
@@ -1265,7 +1265,7 @@ void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
}
void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = instr->GetLocations();
Primitive::Type type = instr->GetType();
@@ -1290,13 +1290,19 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
: static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
- if (type == Primitive::kPrimInt) {
+ if (shift_value == 0) {
+ if (dst != lhs) {
+ __ Move(dst, lhs);
+ }
+ } else if (type == Primitive::kPrimInt) {
if (instr->IsShl()) {
__ Sll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Sra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst, lhs, shift_value);
+ } else {
+ __ Rotr(dst, lhs, shift_value);
}
} else {
if (shift_value < 32) {
@@ -1304,8 +1310,10 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
__ Dsll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Dsra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrl(dst, lhs, shift_value);
+ } else {
+ __ Drotr(dst, lhs, shift_value);
}
} else {
shift_value -= 32;
@@ -1313,8 +1321,10 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
__ Dsll32(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Dsra32(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrl32(dst, lhs, shift_value);
+ } else {
+ __ Drotr32(dst, lhs, shift_value);
}
}
}
@@ -1324,16 +1334,20 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
__ Sllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Srav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst, lhs, rhs_reg);
+ } else {
+ __ Rotrv(dst, lhs, rhs_reg);
}
} else {
if (instr->IsShl()) {
__ Dsllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Dsrav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrlv(dst, lhs, rhs_reg);
+ } else {
+ __ Drotrv(dst, lhs, rhs_reg);
}
}
}
@@ -3724,14 +3738,12 @@ void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_
codegen_->GenerateFrameExit();
}
-void LocationsBuilderMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void LocationsBuilderMIPS64::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
-void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index c504ded54c..b90afb1d73 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -211,19 +211,6 @@ bool InstructionSimplifierVisitor::ReplaceRotateWithRor(HBinaryOperation* op,
// Try to replace a binary operation flanked by one UShr and one Shl with a bitfield rotation.
bool InstructionSimplifierVisitor::TryReplaceWithRotate(HBinaryOperation* op) {
- // This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS/64.
- const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- switch (instruction_set) {
- case kArm:
- case kArm64:
- case kThumb2:
- case kX86:
- case kX86_64:
- break;
- default:
- return false;
- }
DCHECK(op->IsAdd() || op->IsXor() || op->IsOr());
HInstruction* left = op->GetLeft();
HInstruction* right = op->GetRight();
@@ -1261,19 +1248,6 @@ void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) {
void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke, bool is_left) {
DCHECK(invoke->IsInvokeStaticOrDirect());
DCHECK_EQ(invoke->GetOriginalInvokeType(), InvokeType::kStatic);
- // This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS/64.
- const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- switch (instruction_set) {
- case kArm:
- case kArm64:
- case kThumb2:
- case kX86:
- case kX86_64:
- break;
- default:
- return;
- }
HInstruction* value = invoke->InputAt(0);
HInstruction* distance = invoke->InputAt(1);
// Replace the invoke with an HRor.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 1e6b3a1fb3..b1fbf28204 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -847,6 +847,9 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
}
// Prevent reordering with prior memory operations.
+ // Emit a DMB ISH instruction instead of an DMB ISHST one, as the
+ // latter allows a preceding load to be delayed past the STXR
+ // instruction below.
__ dmb(ISH);
__ add(tmp_ptr, base, ShifterOperand(offset));
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index f723940444..81cab86c83 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1035,7 +1035,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
__ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
__ Cbnz(tmp_32, &loop_head);
} else {
- __ Dmb(InnerShareable, BarrierWrites);
+ // Emit a `Dmb(InnerShareable, BarrierAll)` (DMB ISH) instruction
+ // instead of a `Dmb(InnerShareable, BarrierWrites)` (DMB ISHST)
+ // one, as the latter allows a preceding load to be delayed past
+ // the STXR instruction below.
+ __ Dmb(InnerShareable, BarrierAll);
__ Bind(&loop_head);
// TODO: When `type == Primitive::kPrimNot`, add a read barrier for
// the reference stored in the object before attempting the CAS,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9a7dfd8abf..b65d0f5750 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2491,8 +2491,10 @@ class HTryBoundary : public HTemplateInstruction<0> {
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize : public HTemplateInstruction<1> {
public:
+ // We set CanTriggerGC to prevent any intermediate address to be live
+ // at the point of the `HDeoptimize`.
HDeoptimize(HInstruction* cond, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None(), dex_pc) {
+ : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, cond);
}
@@ -4530,8 +4532,10 @@ class HPhi : public HInstruction {
class HNullCheck : public HExpression<1> {
public:
+ // `HNullCheck` can trigger GC, as it may call the `NullPointerException`
+ // constructor.
HNullCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(value->GetType(), SideEffects::None(), dex_pc) {
+ : HExpression(value->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -4852,8 +4856,10 @@ class HArrayLength : public HExpression<1> {
class HBoundsCheck : public HExpression<2> {
public:
+ // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
+ // constructor.
HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc)
- : HExpression(index->GetType(), SideEffects::None(), dex_pc) {
+ : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
DCHECK(index->GetType() == Primitive::kPrimInt);
SetRawInputAt(0, index);
SetRawInputAt(1, length);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3eb72744ee..988e32bc1a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -17,6 +17,7 @@
#include "optimizing_compiler.h"
#include <fstream>
+#include <memory>
#include <stdint.h>
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -52,6 +53,8 @@
#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
+#include "dwarf/method_debug_info.h"
+#include "elf_writer_debug.h"
#include "elf_writer_quick.h"
#include "graph_checker.h"
#include "graph_visualizer.h"
@@ -60,6 +63,7 @@
#include "inliner.h"
#include "instruction_simplifier.h"
#include "intrinsics.h"
+#include "jit/debugger_interface.h"
#include "jit/jit_code_cache.h"
#include "licm.h"
#include "jni/quick/jni_compiler.h"
@@ -68,6 +72,7 @@
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "oat_quick_method_header.h"
#include "sharpening.h"
#include "side_effects_analysis.h"
#include "ssa_builder.h"
@@ -968,6 +973,39 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return false;
}
+ if (GetCompilerDriver()->GetCompilerOptions().GetGenerateDebugInfo()) {
+ const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
+ const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+ CompiledMethod compiled_method(
+ GetCompilerDriver(),
+ codegen->GetInstructionSet(),
+ ArrayRef<const uint8_t>(code_allocator.GetMemory()),
+ codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
+ codegen->GetCoreSpillMask(),
+ codegen->GetFpuSpillMask(),
+ ArrayRef<const SrcMapElem>(),
+ ArrayRef<const uint8_t>(), // mapping_table.
+ ArrayRef<const uint8_t>(stack_map_data, stack_map_size),
+ ArrayRef<const uint8_t>(), // native_gc_map.
+ ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
+ ArrayRef<const LinkerPatch>());
+ dwarf::MethodDebugInfo method_debug_info {
+ dex_file,
+ class_def_idx,
+ method_idx,
+ access_flags,
+ code_item,
+ false, // deduped.
+ code_address,
+ code_address + code_allocator.GetSize(),
+ &compiled_method
+ };
+ ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForMethod(method_debug_info);
+ CreateJITCodeEntryForAddress(code_address,
+ std::unique_ptr<const uint8_t[]>(elf_file.data()),
+ elf_file.size());
+ }
+
return true;
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 9a06d9be41..eb0419b6e0 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1677,7 +1677,9 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
LocationSummary* locations = safepoint_position->GetLocations();
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
- DCHECK(interval->GetDefinedBy()->IsActualObject()) << interval->GetDefinedBy()->DebugName();
+ DCHECK(interval->GetDefinedBy()->IsActualObject())
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
}
@@ -1691,7 +1693,8 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
}
if (current->GetType() == Primitive::kPrimNot) {
DCHECK(interval->GetDefinedBy()->IsActualObject())
- << interval->GetDefinedBy()->DebugName();
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
locations->SetRegisterBit(source.reg());
}
break;