summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc17
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc18
-rw-r--r--compiler/optimizing/code_generator_mips.cc4
-rw-r--r--compiler/optimizing/code_generator_mips64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc11
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc11
-rw-r--r--compiler/optimizing/inliner.cc9
7 files changed, 70 insertions, 4 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 13bbffa1e3..1380596ab2 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1488,6 +1488,14 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Ldrh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
if (do_overflow_check) {
@@ -3501,6 +3509,15 @@ void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* s
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp1 = temps.AcquireX();
+ Register temp2 = temps.AcquireX();
+ __ Ldr(temp1, MemOperand(sp, 0));
+ __ Ldrh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp2, temp2, 1);
+ __ Strh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 577fe00dcd..18e7d1cc46 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2485,6 +2485,14 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
+
if (HasEmptyFrame()) {
return;
}
@@ -2786,6 +2794,16 @@ void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Push(vixl32::Register(kMethodRegister));
+ GetAssembler()->LoadFromOffset(kLoadWord, kMethodRegister, sp, kArmWordSize);
+ __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Pop(vixl32::Register(kMethodRegister));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 5c8e46ed19..51fb4dacfd 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1276,6 +1276,10 @@ static dwarf::Reg DWARFReg(Register reg) {
void CodeGeneratorMIPS::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ LOG(WARNING) << "Unimplemented hotness update in mips backend";
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index bcfe051c90..480b9178d2 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1079,6 +1079,10 @@ static dwarf::Reg DWARFReg(FpuRegister reg) {
void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ LOG(WARNING) << "Unimplemented hotness update in mips64 backend";
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index cbe9e0a35c..c52c7ff7f1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1061,6 +1061,11 @@ void CodeGeneratorX86::GenerateFrameEntry() {
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ addw(Address(kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
+
if (!skip_overflow_check) {
size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
__ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
@@ -1357,6 +1362,12 @@ void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* suc
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ pushl(EAX);
+ __ movl(EAX, Address(ESP, kX86WordSize));
+ __ addw(Address(EAX, ArtMethod::HotnessCountOffset().Int32Value()), Immediate(1));
+ __ popl(EAX);
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 510eec4f30..ee5918de71 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1268,6 +1268,12 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
&& !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ addw(Address(CpuRegister(kMethodRegisterArgument),
+ ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
+
if (!skip_overflow_check) {
size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
__ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
@@ -1459,6 +1465,11 @@ void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ movq(CpuRegister(TMP), Address(CpuRegister(RSP), 0));
+ __ addw(Address(CpuRegister(TMP), ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 452be6feae..035e5ce3e1 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -392,8 +392,9 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
return single_impl;
}
-static bool AlwaysThrows(ArtMethod* method) {
- CodeItemDataAccessor accessor(method);
+static bool AlwaysThrows(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CodeItemDataAccessor accessor(method->DexInstructionData());
// Skip native methods, methods with try blocks, and methods that are too large.
if (!accessor.HasCodeItem() ||
accessor.TriesSize() != 0 ||
@@ -1418,7 +1419,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
bool same_dex_file = IsSameDexFile(*outer_compilation_unit_.GetDexFile(), *method->GetDexFile());
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
if (!accessor.HasCodeItem()) {
LOG_FAIL_NO_STAT()
@@ -1697,7 +1698,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
- CodeItemDebugInfoAccessor code_item_accessor(resolved_method);
+ CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
caller_compilation_unit_.GetDexCache(),