Update locations of registers after slow paths spilling.
Change-Id: Id9aafcc13c1a085c17ce65d704c67b73f9de695d
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c4cef09..561dcb7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -547,7 +547,9 @@
stack_map_stream_.FillIn(region);
}
-void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) {
+void CodeGenerator::RecordPcInfo(HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
if (instruction != nullptr) {
// The code generated for some type conversions may call the
// runtime, thus normally requiring a subsequent call to this
@@ -578,41 +580,177 @@
pc_infos_.Add(pc_info);
uint32_t inlining_depth = 0;
+
if (instruction == nullptr) {
// For stack overflow checks.
- stack_map_stream_.RecordEnvironment(
- /* environment */ nullptr,
- /* environment_size */ 0,
- /* locations */ nullptr,
- dex_pc,
- pc_info.native_pc,
- /* register_mask */ 0,
- inlining_depth);
- } else {
- LocationSummary* locations = instruction->GetLocations();
- HEnvironment* environment = instruction->GetEnvironment();
- size_t environment_size = instruction->EnvironmentSize();
+ stack_map_stream_.AddStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, inlining_depth);
+ return;
+ }
+ LocationSummary* locations = instruction->GetLocations();
+ HEnvironment* environment = instruction->GetEnvironment();
+ size_t environment_size = instruction->EnvironmentSize();
- uint32_t register_mask = locations->GetRegisterMask();
- if (locations->OnlyCallsOnSlowPath()) {
- // In case of slow path, we currently set the location of caller-save registers
- // to register (instead of their stack location when pushed before the slow-path
- // call). Therefore register_mask contains both callee-save and caller-save
- // registers that hold objects. We must remove the caller-save from the mask, since
- // they will be overwritten by the callee.
- register_mask &= core_callee_save_mask_;
+ uint32_t register_mask = locations->GetRegisterMask();
+ if (locations->OnlyCallsOnSlowPath()) {
+ // In case of slow path, we currently set the location of caller-save registers
+ // to register (instead of their stack location when pushed before the slow-path
+ // call). Therefore register_mask contains both callee-save and caller-save
+ // registers that hold objects. We must remove the caller-save from the mask, since
+ // they will be overwritten by the callee.
+ register_mask &= core_callee_save_mask_;
+ }
+ // The register mask must be a subset of callee-save registers.
+ DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
+ stack_map_stream_.AddStackMapEntry(dex_pc,
+ pc_info.native_pc,
+ register_mask,
+ locations->GetStackMask(),
+ environment_size,
+ inlining_depth);
+
+ // Walk over the environment, and record the location of dex registers.
+ for (size_t i = 0; i < environment_size; ++i) {
+ HInstruction* current = environment->GetInstructionAt(i);
+ if (current == nullptr) {
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kNone, 0);
+ continue;
}
- // The register mask must be a subset of callee-save registers.
- DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
- // Populate stack map information.
- stack_map_stream_.RecordEnvironment(environment,
- environment_size,
- locations,
- dex_pc,
- pc_info.native_pc,
- register_mask,
- inlining_depth);
+ Location location = locations->GetEnvironmentAt(i);
+ switch (location.GetKind()) {
+ case Location::kConstant: {
+ DCHECK_EQ(current, location.GetConstant());
+ if (current->IsLongConstant()) {
+ int64_t value = current->AsLongConstant()->GetValue();
+ stack_map_stream_.AddDexRegisterEntry(
+ i, DexRegisterLocation::Kind::kConstant, Low32Bits(value));
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ DCHECK_LT(i, environment_size);
+ } else if (current->IsDoubleConstant()) {
+ int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue());
+ stack_map_stream_.AddDexRegisterEntry(
+ i, DexRegisterLocation::Kind::kConstant, Low32Bits(value));
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ DCHECK_LT(i, environment_size);
+ } else if (current->IsIntConstant()) {
+ int32_t value = current->AsIntConstant()->GetValue();
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, value);
+ } else if (current->IsNullConstant()) {
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0);
+ } else {
+ DCHECK(current->IsFloatConstant()) << current->DebugName();
+ int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue());
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, value);
+ }
+ break;
+ }
+
+ case Location::kStackSlot: {
+ stack_map_stream_.AddDexRegisterEntry(
+ i, DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ break;
+ }
+
+ case Location::kDoubleStackSlot: {
+ stack_map_stream_.AddDexRegisterEntry(
+ i, DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
+ DCHECK_LT(i, environment_size);
+ break;
+ }
+
+ case Location::kRegister : {
+ int id = location.reg();
+ if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
+ uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ if (current->GetType() == Primitive::kPrimLong) {
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ DCHECK_LT(i, environment_size);
+ }
+ } else {
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInRegister, id);
+ if (current->GetType() == Primitive::kPrimLong) {
+ stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInRegister, id);
+ DCHECK_LT(i, environment_size);
+ }
+ }
+ break;
+ }
+
+ case Location::kFpuRegister : {
+ int id = location.reg();
+ if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
+ uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ if (current->GetType() == Primitive::kPrimDouble) {
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ DCHECK_LT(i, environment_size);
+ }
+ } else {
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInFpuRegister, id);
+ if (current->GetType() == Primitive::kPrimDouble) {
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kInFpuRegister, id);
+ DCHECK_LT(i, environment_size);
+ }
+ }
+ break;
+ }
+
+ case Location::kFpuRegisterPair : {
+ int low = location.low();
+ int high = location.high();
+ if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
+ uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ } else {
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInFpuRegister, low);
+ }
+ if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
+ uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
+ stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInStack, offset);
+ } else {
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kInFpuRegister, high);
+ }
+ DCHECK_LT(i, environment_size);
+ break;
+ }
+
+ case Location::kRegisterPair : {
+ int low = location.low();
+ int high = location.high();
+ if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
+ uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ } else {
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInRegister, low);
+ }
+ if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
+ uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
+ stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInStack, offset);
+ } else {
+ stack_map_stream_.AddDexRegisterEntry(
+ ++i, DexRegisterLocation::Kind::kInRegister, high);
+ }
+ DCHECK_LT(i, environment_size);
+ break;
+ }
+
+ case Location::kInvalid: {
+ stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kNone, 0);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected kind " << location.GetKind();
+ }
}
}
@@ -677,7 +815,7 @@
}
void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) {
- codegen->RecordPcInfo(instruction, dex_pc);
+ codegen->RecordPcInfo(instruction, dex_pc, this);
}
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
@@ -691,6 +829,8 @@
locations->SetStackBit(stack_offset / kVRegSize);
}
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_core_stack_offsets_[i] = stack_offset;
stack_offset += codegen->SaveCoreRegister(stack_offset, i);
}
}
@@ -700,6 +840,8 @@
if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
if (register_set->ContainsFloatingPointRegister(i)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_fpu_stack_offsets_[i] = stack_offset;
stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 81efc03..667f686 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -66,7 +66,13 @@
class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
- SlowPathCode() {}
+ SlowPathCode() {
+ for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) {
+ saved_core_stack_offsets_[i] = kRegisterNotSaved;
+ saved_fpu_stack_offsets_[i] = kRegisterNotSaved;
+ }
+ }
+
virtual ~SlowPathCode() {}
virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
@@ -75,7 +81,27 @@
void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
+ bool IsCoreRegisterSaved(int reg) const {
+ return saved_core_stack_offsets_[reg] != kRegisterNotSaved;
+ }
+
+ bool IsFpuRegisterSaved(int reg) const {
+ return saved_fpu_stack_offsets_[reg] != kRegisterNotSaved;
+ }
+
+ uint32_t GetStackOffsetOfCoreRegister(int reg) const {
+ return saved_core_stack_offsets_[reg];
+ }
+
+ uint32_t GetStackOffsetOfFpuRegister(int reg) const {
+ return saved_fpu_stack_offsets_[reg];
+ }
+
private:
+ static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
+ static constexpr uint32_t kRegisterNotSaved = -1;
+ uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
+ uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
@@ -171,7 +197,7 @@
return (fpu_callee_save_mask_ & (1 << reg)) != 0;
}
- void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
+ void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
bool CanMoveNullCheckToUser(HNullCheck* null_check);
void MaybeRecordImplicitNullCheck(HInstruction* instruction);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index aed8c06..123f690 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -82,7 +82,7 @@
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
}
private:
@@ -98,7 +98,7 @@
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
}
private:
@@ -116,7 +116,7 @@
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ b(GetReturnLabel());
@@ -162,7 +162,7 @@
length_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
}
private:
@@ -196,7 +196,7 @@
int32_t entry_point_offset = do_clinit_
? QUICK_ENTRY_POINT(pInitializeStaticStorage)
: QUICK_ENTRY_POINT(pInitializeType);
- arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+ arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
// Move the class to the desired location.
Location out = locations->Out();
@@ -241,7 +241,7 @@
arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
__ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
RestoreLiveRegisters(codegen, locations);
@@ -284,11 +284,12 @@
Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
if (instruction_->IsInstanceOf()) {
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
}
RestoreLiveRegisters(codegen, locations);
@@ -857,10 +858,11 @@
void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
- uint32_t dex_pc) {
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
__ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
__ blx(LR);
- RecordPcInfo(instruction, dex_pc);
+ RecordPcInfo(instruction, dex_pc, slow_path);
DCHECK(instruction->IsSuspendCheck()
|| instruction->IsBoundsCheck()
|| instruction->IsNullCheck()
@@ -1674,14 +1676,16 @@
// Processing a Dex `float-to-long' instruction.
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
conversion,
- conversion->GetDexPc());
+ conversion->GetDexPc(),
+ nullptr);
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-long' instruction.
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
conversion,
- conversion->GetDexPc());
+ conversion->GetDexPc(),
+ nullptr);
break;
default:
@@ -2135,7 +2139,7 @@
DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
break;
}
@@ -2229,17 +2233,17 @@
}
case Primitive::kPrimLong: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
break;
}
case Primitive::kPrimFloat: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
break;
}
case Primitive::kPrimDouble: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
break;
}
@@ -2429,7 +2433,8 @@
__ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
- instruction->GetDexPc());
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
@@ -2448,7 +2453,8 @@
__ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
- instruction->GetDexPc());
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
@@ -3178,7 +3184,8 @@
DCHECK_EQ(value_type, Primitive::kPrimNot);
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
instruction,
- instruction->GetDexPc());
+ instruction->GetDexPc(),
+ nullptr);
}
break;
}
@@ -3665,7 +3672,7 @@
void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
codegen_->InvokeRuntime(
- QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
+ QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
}
void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
@@ -3758,7 +3765,8 @@
codegen_->InvokeRuntime(instruction->IsEnter()
? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
instruction,
- instruction->GetDexPc());
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index f1a3729..57e1d2f 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -246,7 +246,8 @@
void LoadCurrentMethod(Register reg);
// Generate code to invoke a runtime entry point.
- void InvokeRuntime(int32_t offset, HInstruction* instruction, uint32_t dex_pc);
+ void InvokeRuntime(
+ int32_t offset, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path);
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 93c4ce5..c48cab4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -147,7 +147,7 @@
index_location_, LocationFrom(calling_convention.GetRegisterAt(0)),
length_location_, LocationFrom(calling_convention.GetRegisterAt(1)));
arm64_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -167,7 +167,7 @@
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
arm64_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -198,7 +198,7 @@
arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
: QUICK_ENTRY_POINT(pInitializeType);
- arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+ arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>();
} else {
@@ -250,7 +250,7 @@
arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
__ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
arm64_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>();
Primitive::Type type = instruction_->GetType();
arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
@@ -273,7 +273,7 @@
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
arm64_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
@@ -294,7 +294,7 @@
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
arm64_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
+ QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
@@ -348,7 +348,8 @@
object_class_, LocationFrom(calling_convention.GetRegisterAt(1)));
if (instruction_->IsInstanceOf()) {
- arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
@@ -356,7 +357,7 @@
const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -984,11 +985,12 @@
void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
- uint32_t dex_pc) {
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
if (instruction != nullptr) {
- RecordPcInfo(instruction, dex_pc);
+ RecordPcInfo(instruction, dex_pc, slow_path);
DCHECK(instruction->IsSuspendCheck()
|| instruction->IsBoundsCheck()
|| instruction->IsNullCheck()
@@ -1298,7 +1300,8 @@
void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
Primitive::Type value_type = instruction->GetComponentType();
if (value_type == Primitive::kPrimNot) {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
} else {
LocationSummary* locations = instruction->GetLocations();
@@ -2066,7 +2069,8 @@
codegen_->InvokeRuntime(instruction->IsEnter()
? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
instruction,
- instruction->GetDexPc());
+ instruction->GetDexPc(),
+ nullptr);
CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
}
@@ -2172,7 +2176,8 @@
codegen_->InvokeRuntime(
GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
- instruction->GetDexPc());
+ instruction->GetDexPc(),
+ nullptr);
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
void*, uint32_t, int32_t, mirror::ArtMethod*>();
}
@@ -2198,7 +2203,8 @@
codegen_->InvokeRuntime(
GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
- instruction->GetDexPc());
+ instruction->GetDexPc(),
+ nullptr);
CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
}
@@ -2352,7 +2358,7 @@
case Primitive::kPrimDouble: {
int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
: QUICK_ENTRY_POINT(pFmod);
- codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc());
+ codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr);
break;
}
@@ -2527,7 +2533,7 @@
void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
codegen_->InvokeRuntime(
- QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
+ QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 48961d6..cbb2e5c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -300,7 +300,10 @@
void StoreRelease(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
// Generate code to invoke a runtime entry point.
- void InvokeRuntime(int32_t offset, HInstruction* instruction, uint32_t dex_pc);
+ void InvokeRuntime(int32_t offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path);
ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; }
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 3168801..63a0286 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -68,8 +68,7 @@
uint32_t register_mask,
BitVector* sp_mask,
uint32_t num_dex_registers,
- uint8_t inlining_depth,
- BitVector* live_dex_registers_mask) {
+ uint8_t inlining_depth) {
StackMapEntry entry;
entry.dex_pc = dex_pc;
entry.native_pc_offset = native_pc_offset;
@@ -79,7 +78,12 @@
entry.inlining_depth = inlining_depth;
entry.dex_register_locations_start_index = dex_register_locations_.Size();
entry.inline_infos_start_index = inline_infos_.Size();
- entry.live_dex_registers_mask = live_dex_registers_mask;
+ if (num_dex_registers != 0) {
+ entry.live_dex_registers_mask =
+ new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
+ } else {
+ entry.live_dex_registers_mask = nullptr;
+ }
stack_maps_.Add(entry);
if (sp_mask != nullptr) {
@@ -90,148 +94,6 @@
}
}
- void RecordEnvironment(HEnvironment* environment,
- size_t environment_size,
- LocationSummary* locations,
- uint32_t dex_pc,
- uint32_t native_pc,
- uint32_t register_mask,
- uint32_t inlining_depth) {
- if (environment == nullptr) {
- // For stack overflow checks.
- AddStackMapEntry(dex_pc, native_pc, 0, 0, 0, inlining_depth, nullptr);
- return;
- }
-
- BitVector* live_dex_registers_mask = new (allocator_) ArenaBitVector(allocator_, 0, true);
-
- AddStackMapEntry(
- dex_pc, native_pc, register_mask,
- locations->GetStackMask(), environment_size, inlining_depth, live_dex_registers_mask);
-
- // Walk over the environment, and record the location of dex registers.
- for (size_t i = 0; i < environment_size; ++i) {
- HInstruction* current = environment->GetInstructionAt(i);
- if (current == nullptr) {
- // No need to store anything, the `live_dex_registers_mask` will hold the
- // information that this register is not live.
- continue;
- }
-
- Location location = locations->GetEnvironmentAt(i);
- switch (location.GetKind()) {
- case Location::kConstant: {
- DCHECK_EQ(current, location.GetConstant());
- if (current->IsLongConstant()) {
- // TODO: Consider moving setting the bit in AddDexRegisterEntry to avoid
- // doing it manually here.
- live_dex_registers_mask->SetBit(i);
- live_dex_registers_mask->SetBit(i + 1);
- int64_t value = current->AsLongConstant()->GetValue();
- AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, High32Bits(value));
- ++i;
- DCHECK_LT(i, environment_size);
- } else if (current->IsDoubleConstant()) {
- live_dex_registers_mask->SetBit(i);
- live_dex_registers_mask->SetBit(i + 1);
- int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue());
- AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, High32Bits(value));
- ++i;
- DCHECK_LT(i, environment_size);
- } else if (current->IsIntConstant()) {
- live_dex_registers_mask->SetBit(i);
- int32_t value = current->AsIntConstant()->GetValue();
- AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
- } else if (current->IsNullConstant()) {
- live_dex_registers_mask->SetBit(i);
- AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
- } else {
- DCHECK(current->IsFloatConstant()) << current->DebugName();
- live_dex_registers_mask->SetBit(i);
- int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue());
- AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
- }
- break;
- }
-
- case Location::kStackSlot: {
- live_dex_registers_mask->SetBit(i);
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
- location.GetStackIndex());
- break;
- }
-
- case Location::kDoubleStackSlot: {
- live_dex_registers_mask->SetBit(i);
- live_dex_registers_mask->SetBit(i + 1);
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
- location.GetHighStackIndex(kVRegSize));
- ++i;
- DCHECK_LT(i, environment_size);
- break;
- }
-
- case Location::kRegister : {
- live_dex_registers_mask->SetBit(i);
- int id = location.reg();
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
- if (current->GetType() == Primitive::kPrimLong) {
- live_dex_registers_mask->SetBit(i + 1);
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
- ++i;
- DCHECK_LT(i, environment_size);
- }
- break;
- }
-
- case Location::kFpuRegister : {
- live_dex_registers_mask->SetBit(i);
- int id = location.reg();
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
- if (current->GetType() == Primitive::kPrimDouble) {
- live_dex_registers_mask->SetBit(i + 1);
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
- ++i;
- DCHECK_LT(i, environment_size);
- }
- break;
- }
-
- case Location::kFpuRegisterPair : {
- live_dex_registers_mask->SetBit(i);
- live_dex_registers_mask->SetBit(i + 1);
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, location.low());
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, location.high());
- ++i;
- DCHECK_LT(i, environment_size);
- break;
- }
-
- case Location::kRegisterPair : {
- live_dex_registers_mask->SetBit(i);
- live_dex_registers_mask->SetBit(i + 1);
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, location.low());
- AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, location.high());
- ++i;
- DCHECK_LT(i, environment_size);
- break;
- }
-
- case Location::kInvalid: {
- // No need to store anything, the `live_dex_registers_mask` will hold the
- // information that this register is not live.
- break;
- }
-
- default:
- LOG(FATAL) << "Unexpected kind " << location.GetKind();
- }
- }
- }
-
void AddInlineInfoEntry(uint32_t method_index) {
InlineInfoEntry entry;
entry.method_index = method_index;
@@ -384,14 +246,17 @@
}
}
- private:
- void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
- // Ensure we only use non-compressed location kind at this stage.
- DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
- << DexRegisterLocation::PrettyDescriptor(kind);
- dex_register_locations_.Add(DexRegisterLocation(kind, value));
+ void AddDexRegisterEntry(uint16_t dex_register, DexRegisterLocation::Kind kind, int32_t value) {
+ if (kind != DexRegisterLocation::Kind::kNone) {
+ // Ensure we only use non-compressed location kind at this stage.
+ DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
+ << DexRegisterLocation::PrettyDescriptor(kind);
+ dex_register_locations_.Add(DexRegisterLocation(kind, value));
+ stack_maps_.Get(stack_maps_.Size() - 1).live_dex_registers_mask->SetBit(dex_register);
+ }
}
+ private:
ArenaAllocator* allocator_;
GrowableArray<StackMapEntry> stack_maps_;
GrowableArray<DexRegisterLocation> dex_register_locations_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 4606bd6..87ac2e7 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -37,13 +37,10 @@
StackMapStream stream(&arena);
ArenaBitVector sp_mask(&arena, 0, false);
- ArenaBitVector live_registers_mask(&arena, 0, true);
- live_registers_mask.SetBit(0);
- live_registers_mask.SetBit(1);
size_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0, &live_registers_mask);
- stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, 0);
- stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInStack, 0);
+ stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -88,24 +85,18 @@
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
- ArenaBitVector live_registers_mask1(&arena, 0, true);
- live_registers_mask1.SetBit(0);
- live_registers_mask1.SetBit(1);
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2, &live_registers_mask1);
- stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, 0);
- stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
+ stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInStack, 0);
+ stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
stream.AddInlineInfoEntry(42);
stream.AddInlineInfoEntry(82);
ArenaBitVector sp_mask2(&arena, 0, true);
sp_mask2.SetBit(3);
sp_mask1.SetBit(8);
- ArenaBitVector live_registers_mask2(&arena, 0, true);
- live_registers_mask2.SetBit(0);
- live_registers_mask2.SetBit(1);
- stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0, &live_registers_mask2);
- stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, 18);
- stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, 3);
+ stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInRegister, 18);
+ stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kInFpuRegister, 3);
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -187,11 +178,10 @@
StackMapStream stream(&arena);
ArenaBitVector sp_mask(&arena, 0, false);
- ArenaBitVector live_registers_mask(&arena, 0, true);
- live_registers_mask.SetBit(1);
uint32_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0, &live_registers_mask);
- stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kNone, 0);
+ stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);