summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Nicolas Geoffray <ngeoffray@google.com> 2024-01-11 12:59:18 +0000
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2024-01-12 08:25:31 +0000
commitfdb76aa305a7be84231760a4b11154b90db0377d (patch)
tree508087b0caaf74bc1fab01d853cff2eaf4aa1249 /compiler
parent2cc7845451ca10f291461641310564458f69481f (diff)
Revert "Go back to incrementing hotness in backedges of baseline code."
This reverts commit 9bbb9f368274de998317cee1ebd300266b957e6d. Reason for revert: Fixing merge conflicts with 2906377 Change-Id: I7ed4ca7c125befb73a828fb6d12bf3125a44f86e
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc29
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc24
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc27
-rw-r--r--compiler/optimizing/code_generator_riscv64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc25
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc27
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
10 files changed, 62 insertions, 80 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f12df196e5..b90974ab80 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -847,8 +847,8 @@ class MethodEntryExitHooksSlowPathARM64 : public SlowPathCodeARM64 {
class CompileOptimizedSlowPathARM64 : public SlowPathCodeARM64 {
public:
- CompileOptimizedSlowPathARM64(HSuspendCheck* check, Register profiling_info)
- : SlowPathCodeARM64(check),
+ explicit CompileOptimizedSlowPathARM64(Register profiling_info)
+ : SlowPathCodeARM64(/* instruction= */ nullptr),
profiling_info_(profiling_info) {}
void EmitNativeCode(CodeGenerator* codegen) override {
@@ -861,18 +861,10 @@ class CompileOptimizedSlowPathARM64 : public SlowPathCodeARM64 {
__ Mov(counter, ProfilingInfo::GetOptimizeThreshold());
__ Strh(counter,
MemOperand(profiling_info_, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
- if (instruction_ != nullptr) {
- // Only saves live vector regs for SIMD.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
__ Ldr(lr, MemOperand(tr, entrypoint_offset));
// Note: we don't record the call here (and therefore don't generate a stack
// map), as the entrypoint should never be suspended.
__ Blr(lr);
- if (instruction_ != nullptr) {
- // Only restores live vector regs for SIMD.
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
- }
__ B(GetExitLabel());
}
@@ -1288,7 +1280,7 @@ void InstructionCodeGeneratorARM64::VisitMethodEntryHook(HMethodEntryHook* instr
GenerateMethodEntryExitHook(instruction);
}
-void CodeGeneratorARM64::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry) {
+void CodeGeneratorARM64::MaybeIncrementHotness(bool is_frame_entry) {
MacroAssembler* masm = GetVIXLAssembler();
if (GetCompilerOptions().CountHotnessInCompiledCode()) {
UseScratchRegisterScope temps(masm);
@@ -1306,15 +1298,20 @@ void CodeGeneratorARM64::MaybeIncrementHotness(HSuspendCheck* suspend_check, boo
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ is_frame_entry &&
+ !Runtime::Current()->IsAotCompiler()) {
+ // Note the slow path doesn't save SIMD registers, so if we were to
+ // call it on loop back edge, we would need to fix this.
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
uint64_t address = reinterpret_cast64<uint64_t>(info);
+ vixl::aarch64::Label done;
UseScratchRegisterScope temps(masm);
Register counter = temps.AcquireW();
- SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) CompileOptimizedSlowPathARM64(
- suspend_check, /* profiling_info= */ lr);
+ SlowPathCodeARM64* slow_path =
+ new (GetScopedAllocator()) CompileOptimizedSlowPathARM64(/* profiling_info= */ lr);
AddSlowPath(slow_path);
__ Ldr(lr, jit_patches_.DeduplicateUint64Literal(address));
__ Ldrh(counter, MemOperand(lr, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
@@ -1438,7 +1435,7 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
__ Str(wzr, MemOperand(sp, GetStackOffsetOfShouldDeoptimizeFlag()));
}
}
- MaybeIncrementHotness(/* suspend_check= */ nullptr, /* is_frame_entry= */ true);
+ MaybeIncrementHotness(/* is_frame_entry= */ true);
MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
@@ -3699,7 +3696,7 @@ void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* s
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->MaybeIncrementHotness(info->GetSuspendCheck(), /* is_frame_entry= */ false);
+ codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return; // `GenerateSuspendCheck()` emitted the jump.
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 9c7c42f4c7..7ff08f55cb 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -1017,7 +1017,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
}
void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl::aarch64::Register klass);
- void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
+ void MaybeIncrementHotness(bool is_frame_entry);
bool CanUseImplicitSuspendCheck() const;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 0f9dcb4a5a..3e01e2fd17 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -971,9 +971,8 @@ class MethodEntryExitHooksSlowPathARMVIXL : public SlowPathCodeARMVIXL {
class CompileOptimizedSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
- CompileOptimizedSlowPathARMVIXL(HSuspendCheck* suspend_check,
- vixl32::Register profiling_info)
- : SlowPathCodeARMVIXL(suspend_check),
+ explicit CompileOptimizedSlowPathARMVIXL(vixl32::Register profiling_info)
+ : SlowPathCodeARMVIXL(/* instruction= */ nullptr),
profiling_info_(profiling_info) {}
void EmitNativeCode(CodeGenerator* codegen) override {
@@ -2276,8 +2275,7 @@ void InstructionCodeGeneratorARMVIXL::VisitMethodEntryHook(HMethodEntryHook* ins
GenerateMethodEntryExitHook(instruction);
}
-void CodeGeneratorARMVIXL::MaybeIncrementHotness(HSuspendCheck* suspend_check,
- bool is_frame_entry) {
+void CodeGeneratorARMVIXL::MaybeIncrementHotness(bool is_frame_entry) {
if (GetCompilerOptions().CountHotnessInCompiledCode()) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
@@ -2301,15 +2299,19 @@ void CodeGeneratorARMVIXL::MaybeIncrementHotness(HSuspendCheck* suspend_check,
}
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ is_frame_entry &&
+ !Runtime::Current()->IsAotCompiler()) {
+ // Note the slow path doesn't save SIMD registers, so if we were to
+ // call it on loop back edge, we would need to fix this.
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
uint32_t address = reinterpret_cast32<uint32_t>(info);
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register tmp = temps.Acquire();
- SlowPathCodeARMVIXL* slow_path = new (GetScopedAllocator()) CompileOptimizedSlowPathARMVIXL(
- suspend_check, /* profiling_info= */ lr);
+ SlowPathCodeARMVIXL* slow_path =
+ new (GetScopedAllocator()) CompileOptimizedSlowPathARMVIXL(/* profiling_info= */ lr);
AddSlowPath(slow_path);
__ Mov(lr, address);
__ Ldrh(tmp, MemOperand(lr, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
@@ -2384,7 +2386,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
if (HasEmptyFrame()) {
// Ensure that the CFI opcode list is not empty.
GetAssembler()->cfi().Nop();
- MaybeIncrementHotness(/* suspend_check= */ nullptr, /* is_frame_entry= */ true);
+ MaybeIncrementHotness(/* is_frame_entry= */ true);
return;
}
@@ -2484,7 +2486,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
}
- MaybeIncrementHotness(/* suspend_check= */ nullptr, /* is_frame_entry= */ true);
+ MaybeIncrementHotness(/* is_frame_entry= */ true);
MaybeGenerateMarkingRegisterCheck(/* code= */ 1);
}
@@ -2829,7 +2831,7 @@ void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->MaybeIncrementHotness(info->GetSuspendCheck(), /* is_frame_entry= */ false);
+ codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 11b60b3fd2..00e0bfa399 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -885,7 +885,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
}
void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl32::Register klass);
- void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
+ void MaybeIncrementHotness(bool is_frame_entry);
private:
// Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 108c948345..6b1b9e30eb 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -266,8 +266,8 @@ void LocationsBuilderRISCV64::HandleInvoke(HInvoke* instruction) {
class CompileOptimizedSlowPathRISCV64 : public SlowPathCodeRISCV64 {
public:
- CompileOptimizedSlowPathRISCV64(HSuspendCheck* suspend_check, XRegister base, int32_t imm12)
- : SlowPathCodeRISCV64(suspend_check),
+ CompileOptimizedSlowPathRISCV64(XRegister base, int32_t imm12)
+ : SlowPathCodeRISCV64(/*instruction=*/ nullptr),
base_(base),
imm12_(imm12) {}
@@ -280,18 +280,10 @@ class CompileOptimizedSlowPathRISCV64 : public SlowPathCodeRISCV64 {
XRegister counter = srs.AllocateXRegister();
__ LoadConst32(counter, ProfilingInfo::GetOptimizeThreshold());
__ Sh(counter, base_, imm12_);
- if (instruction_ != nullptr) {
- // Only saves live vector regs for SIMD.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
__ Loadd(RA, TR, entrypoint_offset);
// Note: we don't record the call here (and therefore don't generate a stack
// map), as the entrypoint should never be suspended.
__ Jalr(RA);
- if (instruction_ != nullptr) {
- // Only restores live vector regs for SIMD.
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
- }
__ J(GetExitLabel());
}
@@ -2017,7 +2009,7 @@ void InstructionCodeGeneratorRISCV64::HandleGoto(HInstruction* instruction,
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->MaybeIncrementHotness(info->GetSuspendCheck(), /*is_frame_entry=*/ false);
+ codegen_->MaybeIncrementHotness(/*is_frame_entry=*/ false);
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return; // `GenerateSuspendCheck()` emitted the jump.
}
@@ -5702,8 +5694,7 @@ CodeGeneratorRISCV64::CodeGeneratorRISCV64(HGraph* graph,
AddAllocatedRegister(Location::RegisterLocation(RA));
}
-void CodeGeneratorRISCV64::MaybeIncrementHotness(HSuspendCheck* suspend_check,
- bool is_frame_entry) {
+void CodeGeneratorRISCV64::MaybeIncrementHotness(bool is_frame_entry) {
if (GetCompilerOptions().CountHotnessInCompiledCode()) {
ScratchRegisterScope srs(GetAssembler());
XRegister method = is_frame_entry ? kArtMethodRegister : srs.AllocateXRegister();
@@ -5723,7 +5714,11 @@ void CodeGeneratorRISCV64::MaybeIncrementHotness(HSuspendCheck* suspend_check,
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ is_frame_entry &&
+ !Runtime::Current()->IsAotCompiler()) {
+ // Note the slow path doesn't save SIMD registers, so if we were to
+ // call it on loop back edge, we would need to fix this.
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
@@ -5735,7 +5730,7 @@ void CodeGeneratorRISCV64::MaybeIncrementHotness(HSuspendCheck* suspend_check,
XRegister tmp = RA;
__ LoadConst64(tmp, base_address);
SlowPathCodeRISCV64* slow_path =
- new (GetScopedAllocator()) CompileOptimizedSlowPathRISCV64(suspend_check, tmp, imm12);
+ new (GetScopedAllocator()) CompileOptimizedSlowPathRISCV64(tmp, imm12);
AddSlowPath(slow_path);
__ Lhu(counter, tmp, imm12);
__ Beqz(counter, slow_path->GetEntryLabel()); // Can clobber `TMP` if taken.
@@ -5880,7 +5875,7 @@ void CodeGeneratorRISCV64::GenerateFrameEntry() {
__ Storew(Zero, SP, GetStackOffsetOfShouldDeoptimizeFlag());
}
}
- MaybeIncrementHotness(/* suspend_check= */ nullptr, /*is_frame_entry=*/ true);
+ MaybeIncrementHotness(/*is_frame_entry=*/ true);
}
void CodeGeneratorRISCV64::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index 29ccf9dc0b..1e0eb51258 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -642,7 +642,7 @@ class CodeGeneratorRISCV64 : public CodeGenerator {
void GenerateMemoryBarrier(MemBarrierKind kind);
- void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
+ void MaybeIncrementHotness(bool is_frame_entry);
bool CanUseImplicitSuspendCheck() const;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ed40673ad1..649b422f6d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -990,24 +990,16 @@ class MethodEntryExitHooksSlowPathX86 : public SlowPathCode {
class CompileOptimizedSlowPathX86 : public SlowPathCode {
public:
- CompileOptimizedSlowPathX86(HSuspendCheck* suspend_check, uint32_t counter_address)
- : SlowPathCode(suspend_check),
+ explicit CompileOptimizedSlowPathX86(uint32_t counter_address)
+ : SlowPathCode(/* instruction= */ nullptr),
counter_address_(counter_address) {}
void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
__ movw(Address::Absolute(counter_address_), Immediate(ProfilingInfo::GetOptimizeThreshold()));
- if (instruction_ != nullptr) {
- // Only saves full width XMM for SIMD.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
x86_codegen->GenerateInvokeRuntime(
GetThreadOffset<kX86PointerSize>(kQuickCompileOptimized).Int32Value());
- if (instruction_ != nullptr) {
- // Only restores full width XMM for SIMD.
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
- }
__ jmp(GetExitLabel());
}
@@ -1334,7 +1326,7 @@ void InstructionCodeGeneratorX86::VisitMethodEntryHook(HMethodEntryHook* instruc
GenerateMethodEntryExitHook(instruction);
}
-void CodeGeneratorX86::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry) {
+void CodeGeneratorX86::MaybeIncrementHotness(bool is_frame_entry) {
if (GetCompilerOptions().CountHotnessInCompiledCode()) {
Register reg = EAX;
if (is_frame_entry) {
@@ -1356,7 +1348,9 @@ void CodeGeneratorX86::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool
}
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ is_frame_entry &&
+ !Runtime::Current()->IsAotCompiler()) {
// Note the slow path doesn't save SIMD registers, so if we were to
// call it on loop back edge, we would need to fix this.
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
@@ -1364,8 +1358,7 @@ void CodeGeneratorX86::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool
uint32_t address = reinterpret_cast32<uint32_t>(info) +
ProfilingInfo::BaselineHotnessCountOffset().Int32Value();
DCHECK(!HasEmptyFrame());
- SlowPathCode* slow_path =
- new (GetScopedAllocator()) CompileOptimizedSlowPathX86(suspend_check, address);
+ SlowPathCode* slow_path = new (GetScopedAllocator()) CompileOptimizedSlowPathX86(address);
AddSlowPath(slow_path);
// With multiple threads, this can overflow. This is OK, we will eventually get to see
// it reaching 0. Also, at this point we have no register available to look
@@ -1452,7 +1445,7 @@ void CodeGeneratorX86::GenerateFrameEntry() {
}
}
- MaybeIncrementHotness(/* suspend_check= */ nullptr, /* is_frame_entry= */ true);
+ MaybeIncrementHotness(/* is_frame_entry= */ true);
}
void CodeGeneratorX86::GenerateFrameExit() {
@@ -1903,7 +1896,7 @@ void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* suc
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->MaybeIncrementHotness(info->GetSuspendCheck(), /* is_frame_entry= */ false);
+ codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index f455812bc7..5b59bfc7e3 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -728,7 +728,7 @@ class CodeGeneratorX86 : public CodeGenerator {
void GenerateExplicitNullCheck(HNullCheck* instruction) override;
void MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass);
- void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
+ void MaybeIncrementHotness(bool is_frame_entry);
// When we don't know the proper offset for the value, we use kPlaceholder32BitOffset.
// The correct value will be inserted when processing Assembler fixups.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 05b1de9f0a..b29ba67cb1 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1043,8 +1043,8 @@ class MethodEntryExitHooksSlowPathX86_64 : public SlowPathCode {
class CompileOptimizedSlowPathX86_64 : public SlowPathCode {
public:
- CompileOptimizedSlowPathX86_64(HSuspendCheck* suspend_check, uint64_t counter_address)
- : SlowPathCode(suspend_check),
+ explicit CompileOptimizedSlowPathX86_64(uint64_t counter_address)
+ : SlowPathCode(/* instruction= */ nullptr),
counter_address_(counter_address) {}
void EmitNativeCode(CodeGenerator* codegen) override {
@@ -1052,16 +1052,8 @@ class CompileOptimizedSlowPathX86_64 : public SlowPathCode {
__ Bind(GetEntryLabel());
__ movq(CpuRegister(TMP), Immediate(counter_address_));
__ movw(Address(CpuRegister(TMP), 0), Immediate(ProfilingInfo::GetOptimizeThreshold()));
- if (instruction_ != nullptr) {
- // Only saves full width XMM for SIMD.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
x86_64_codegen->GenerateInvokeRuntime(
GetThreadOffset<kX86_64PointerSize>(kQuickCompileOptimized).Int32Value());
- if (instruction_ != nullptr) {
- // Only restores full width XMM for SIMD.
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
- }
__ jmp(GetExitLabel());
}
@@ -1771,7 +1763,7 @@ void InstructionCodeGeneratorX86_64::VisitMethodExitHook(HMethodExitHook* instru
GenerateMethodEntryExitHook(instruction);
}
-void CodeGeneratorX86_64::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry) {
+void CodeGeneratorX86_64::MaybeIncrementHotness(bool is_frame_entry) {
if (GetCompilerOptions().CountHotnessInCompiledCode()) {
NearLabel overflow;
Register method = kMethodRegisterArgument;
@@ -1788,14 +1780,17 @@ void CodeGeneratorX86_64::MaybeIncrementHotness(HSuspendCheck* suspend_check, bo
__ Bind(&overflow);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ is_frame_entry &&
+ !Runtime::Current()->IsAotCompiler()) {
+ // Note the slow path doesn't save SIMD registers, so if we were to
+ // call it on loop back edge, we would need to fix this.
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
CHECK(!HasEmptyFrame());
uint64_t address = reinterpret_cast64<uint64_t>(info) +
ProfilingInfo::BaselineHotnessCountOffset().Int32Value();
- SlowPathCode* slow_path =
- new (GetScopedAllocator()) CompileOptimizedSlowPathX86_64(suspend_check, address);
+ SlowPathCode* slow_path = new (GetScopedAllocator()) CompileOptimizedSlowPathX86_64(address);
AddSlowPath(slow_path);
// Note: if the address was in the 32bit range, we could use
// Address::Absolute and avoid this movq.
@@ -1900,7 +1895,7 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
- MaybeIncrementHotness(/* suspend_check= */ nullptr, /* is_frame_entry= */ true);
+ MaybeIncrementHotness(/* is_frame_entry= */ true);
}
void CodeGeneratorX86_64::GenerateFrameExit() {
@@ -2087,7 +2082,7 @@ void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->MaybeIncrementHotness(info->GetSuspendCheck(), /* is_frame_entry= */ false);
+ codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 1698c2634a..e4d3eac6bc 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -695,7 +695,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void GenerateExplicitNullCheck(HNullCheck* instruction) override;
void MaybeGenerateInlineCacheCheck(HInstruction* instruction, CpuRegister cls);
- void MaybeIncrementHotness(HSuspendCheck* suspend_check, bool is_frame_entry);
+ void MaybeIncrementHotness(bool is_frame_entry);
static void BlockNonVolatileXmmRegisters(LocationSummary* locations);