diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/builder.cc | 61 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 89 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 70 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 72 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 21 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 35 |
9 files changed, 276 insertions, 78 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index a03588f4fd..33b00d2ac9 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -331,18 +331,61 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, bool is_range, uint32_t* args, uint32_t register_index) { + Instruction::Code opcode = instruction.Opcode(); + InvokeType invoke_type; + switch (opcode) { + case Instruction::INVOKE_STATIC: + case Instruction::INVOKE_STATIC_RANGE: + invoke_type = kStatic; + break; + case Instruction::INVOKE_DIRECT: + case Instruction::INVOKE_DIRECT_RANGE: + invoke_type = kDirect; + break; + case Instruction::INVOKE_VIRTUAL: + case Instruction::INVOKE_VIRTUAL_RANGE: + invoke_type = kVirtual; + break; + case Instruction::INVOKE_INTERFACE: + case Instruction::INVOKE_INTERFACE_RANGE: + invoke_type = kInterface; + break; + case Instruction::INVOKE_SUPER_RANGE: + case Instruction::INVOKE_SUPER: + invoke_type = kSuper; + break; + default: + LOG(FATAL) << "Unexpected invoke op: " << opcode; + return false; + } + const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx); const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(method_id.proto_idx_); const char* descriptor = dex_file_->StringDataByIdx(proto_id.shorty_idx_); Primitive::Type return_type = Primitive::GetType(descriptor[0]); - bool is_instance_call = - instruction.Opcode() != Instruction::INVOKE_STATIC - && instruction.Opcode() != Instruction::INVOKE_STATIC_RANGE; + bool is_instance_call = invoke_type != kStatic; const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1); - // Treat invoke-direct like static calls for now. - HInvoke* invoke = new (arena_) HInvokeStatic( - arena_, number_of_arguments, return_type, dex_offset, method_idx); + HInvoke* invoke = nullptr; + if (invoke_type == kVirtual) { + MethodReference target_method(dex_file_, method_idx); + uintptr_t direct_code; + uintptr_t direct_method; + int vtable_index; + // TODO: Add devirtualization support. + compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true, + &invoke_type, &target_method, &vtable_index, + &direct_code, &direct_method); + if (vtable_index == -1) { + return false; + } + invoke = new (arena_) HInvokeVirtual( + arena_, number_of_arguments, return_type, dex_offset, vtable_index); + } else { + // Treat invoke-direct like static calls for now. + invoke = new (arena_) HInvokeStatic( + arena_, number_of_arguments, return_type, dex_offset, method_idx); + } size_t start_index = 0; Temporaries temps(graph_, is_instance_call ? 1 : 0); @@ -620,7 +663,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 } case Instruction::INVOKE_STATIC: - case Instruction::INVOKE_DIRECT: { + case Instruction::INVOKE_DIRECT: + case Instruction::INVOKE_VIRTUAL: { uint32_t method_idx = instruction.VRegB_35c(); uint32_t number_of_vreg_arguments = instruction.VRegA_35c(); uint32_t args[5]; @@ -632,7 +676,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 } case Instruction::INVOKE_STATIC_RANGE: - case Instruction::INVOKE_DIRECT_RANGE: { + case Instruction::INVOKE_DIRECT_RANGE: + case Instruction::INVOKE_VIRTUAL_RANGE: { uint32_t method_idx = instruction.VRegB_3rc(); uint32_t number_of_vreg_arguments = instruction.VRegA_3rc(); uint32_t register_index = instruction.VRegC(); diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 99030922a7..ad622798a6 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -20,6 +20,7 @@ #include "gc/accounting/card_table.h" #include "mirror/array.h" #include "mirror/art_method.h" +#include "mirror/class.h" #include "thread.h" #include "utils/assembler.h" #include "utils/arm/assembler_arm.h" @@ -818,6 +819,47 @@ void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) { } void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) { + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) { + __ ldr(reg, Address(SP, kCurrentMethodStackOffset)); +} + +void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) { + Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister(); + uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>); + size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() + + invoke->GetIndexInDexCache() * kArmWordSize; + + // TODO: Implement all kinds of calls: + // 1) boot -> boot + // 2) app -> boot + // 3) app -> app + // + // Currently we implement the app -> app logic, which looks up in the resolve cache. + + // temp = method; + LoadCurrentMethod(temp); + // temp = temp->dex_cache_resolved_methods_; + __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); + // temp = temp[index_in_cache] + __ ldr(temp, Address(temp, index_in_cache)); + // LR = temp[offset_of_quick_compiled_code] + __ ldr(LR, Address(temp, + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())); + // LR() + __ blx(LR); + + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + DCHECK(!codegen_->IsLeafMethod()); +} + +void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { + HandleInvoke(invoke); +} + +void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); locations->AddTemp(ArmCoreLocation(R0)); @@ -852,37 +894,30 @@ void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) { } } -void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) { - __ ldr(reg, Address(SP, kCurrentMethodStackOffset)); -} -void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) { +void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister(); - uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>); - size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() + - invoke->GetIndexInDexCache() * kArmWordSize; - - // TODO: Implement all kinds of calls: - // 1) boot -> boot - // 2) app -> boot - // 3) app -> app - // - // Currently we implement the app -> app logic, which looks up in the resolve cache. - - // temp = method; - LoadCurrentMethod(temp); - // temp = temp->dex_cache_resolved_methods_; - __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); - // temp = temp[index_in_cache] - __ ldr(temp, Address(temp, index_in_cache)); - // LR = temp[offset_of_quick_compiled_code] - __ ldr(LR, Address(temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())); - // LR() + uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + + invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + LocationSummary* locations = invoke->GetLocations(); + Location receiver = locations->InAt(0); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + // temp = object->GetClass(); + if (receiver.IsStackSlot()) { + __ ldr(temp, Address(SP, receiver.GetStackIndex())); + __ ldr(temp, Address(temp, class_offset)); + } else { + __ ldr(temp, Address(receiver.AsArm().AsCoreRegister(), class_offset)); + } + // temp = temp->GetMethodAt(method_offset); + uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(); + __ ldr(temp, Address(temp, method_offset)); + // LR = temp->GetEntryPoint(); + __ ldr(LR, Address(temp, entry_point)); + // LR(); __ blx(LR); - - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); DCHECK(!codegen_->IsLeafMethod()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void LocationsBuilderARM::VisitAdd(HAdd* add) { diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 660294b147..2480960f32 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -93,6 +93,8 @@ class LocationsBuilderARM : public HGraphVisitor { #undef DECLARE_VISIT_INSTRUCTION + void HandleInvoke(HInvoke* invoke); + private: CodeGeneratorARM* const codegen_; InvokeDexCallingConventionVisitor parameter_visitor_; diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 3dd9b37158..3383cb2117 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -20,6 +20,7 @@ #include "gc/accounting/card_table.h" #include "mirror/array.h" #include "mirror/art_method.h" +#include "mirror/class.h" #include "thread.h" #include "utils/assembler.h" #include "utils/stack_checks.h" @@ -763,6 +764,40 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) { } void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) { + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) { + Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister(); + uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>); + size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() + + invoke->GetIndexInDexCache() * kX86WordSize; + + // TODO: Implement all kinds of calls: + // 1) boot -> boot + // 2) app -> boot + // 3) app -> app + // + // Currently we implement the app -> app logic, which looks up in the resolve cache. + + // temp = method; + LoadCurrentMethod(temp); + // temp = temp->dex_cache_resolved_methods_; + __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); + // temp = temp[index_in_cache] + __ movl(temp, Address(temp, index_in_cache)); + // (temp + offset_of_quick_compiled_code)() + __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())); + + DCHECK(!codegen_->IsLeafMethod()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); +} + +void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { + HandleInvoke(invoke); +} + +void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); locations->AddTemp(X86CpuLocation(EAX)); @@ -799,26 +834,23 @@ void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) { invoke->SetLocations(locations); } -void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) { +void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister(); - uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>); - size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() + - invoke->GetIndexInDexCache() * kX86WordSize; - - // TODO: Implement all kinds of calls: - // 1) boot -> boot - // 2) app -> boot - // 3) app -> app - // - // Currently we implement the app -> app logic, which looks up in the resolve cache. - - // temp = method; - LoadCurrentMethod(temp); - // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); - // temp = temp[index_in_cache] - __ movl(temp, Address(temp, index_in_cache)); - // (temp + offset_of_quick_compiled_code)() + uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + + invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + LocationSummary* locations = invoke->GetLocations(); + Location receiver = locations->InAt(0); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + // temp = object->GetClass(); + if (receiver.IsStackSlot()) { + __ movl(temp, Address(ESP, receiver.GetStackIndex())); + __ movl(temp, Address(temp, class_offset)); + } else { + __ movl(temp, Address(receiver.AsX86().AsCpuRegister(), class_offset)); + } + // temp = temp->GetMethodAt(method_offset); + __ movl(temp, Address(temp, method_offset)); + // call temp->GetEntryPoint(); __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())); DCHECK(!codegen_->IsLeafMethod()); diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 7c502049d8..f1be0ad5b7 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -94,6 +94,8 @@ class LocationsBuilderX86 : public HGraphVisitor { #undef DECLARE_VISIT_INSTRUCTION + void HandleInvoke(HInvoke* invoke); + private: CodeGeneratorX86* const codegen_; InvokeDexCallingConventionVisitor parameter_visitor_; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 2f352e0838..ca03af8e9f 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -20,6 +20,7 @@ #include "gc/accounting/card_table.h" #include "mirror/array.h" #include "mirror/art_method.h" +#include "mirror/class.h" #include "mirror/object_reference.h" #include "thread.h" #include "utils/assembler.h" @@ -709,12 +710,46 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type } void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) { + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) { + CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister(); + uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>); + size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() + + invoke->GetIndexInDexCache() * heap_reference_size; + + // TODO: Implement all kinds of calls: + // 1) boot -> boot + // 2) app -> boot + // 3) app -> app + // + // Currently we implement the app -> app logic, which looks up in the resolve cache. + + // temp = method; + LoadCurrentMethod(temp); + // temp = temp->dex_cache_resolved_methods_; + __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); + // temp = temp[index_in_cache] + __ movl(temp, Address(temp, index_in_cache)); + // (temp + offset_of_quick_compiled_code)() + __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue())); + + DCHECK(!codegen_->IsLeafMethod()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); +} + +void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) { + HandleInvoke(invoke); +} + +void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); locations->AddTemp(X86_64CpuLocation(RDI)); InvokeDexCallingConventionVisitor calling_convention_visitor; - for (size_t i = 0; i < invoke->InputCount(); ++i) { + for (size_t i = 0; i < invoke->InputCount(); i++) { HInstruction* input = invoke->InputAt(i); locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); } @@ -740,26 +775,23 @@ void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) { } } -void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) { +void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) { CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister(); - uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>); - size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() + - invoke->GetIndexInDexCache() * heap_reference_size; - - // TODO: Implement all kinds of calls: - // 1) boot -> boot - // 2) app -> boot - // 3) app -> app - // - // Currently we implement the app -> app logic, which looks up in the resolve cache. - - // temp = method; - LoadCurrentMethod(temp); - // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); - // temp = temp[index_in_cache] - __ movl(temp, Address(temp, index_in_cache)); - // (temp + offset_of_quick_compiled_code)() + size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + + invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + LocationSummary* locations = invoke->GetLocations(); + Location receiver = locations->InAt(0); + size_t class_offset = mirror::Object::ClassOffset().SizeValue(); + // temp = object->GetClass(); + if (receiver.IsStackSlot()) { + __ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex())); + __ movq(temp, Address(temp, class_offset)); + } else { + __ movq(temp, Address(receiver.AsX86_64().AsCpuRegister(), class_offset)); + } + // temp = temp->GetMethodAt(method_offset); + __ movl(temp, Address(temp, method_offset)); + // call temp->GetEntryPoint(); __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue())); DCHECK(!codegen_->IsLeafMethod()); diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 44552ea465..78b60fe93c 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -91,6 +91,8 @@ class LocationsBuilderX86_64 : public HGraphVisitor { #undef DECLARE_VISIT_INSTRUCTION + void HandleInvoke(HInvoke* invoke); + private: CodeGeneratorX86_64* const codegen_; InvokeDexCallingConventionVisitor parameter_visitor_; diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index ed6dd939de..d6dfeaede8 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -422,6 +422,7 @@ class HBasicBlock : public ArenaObject { M(If) \ M(IntConstant) \ M(InvokeStatic) \ + M(InvokeVirtual) \ M(LoadLocal) \ M(Local) \ M(LongConstant) \ @@ -1272,6 +1273,26 @@ class HInvokeStatic : public HInvoke { DISALLOW_COPY_AND_ASSIGN(HInvokeStatic); }; +class HInvokeVirtual : public HInvoke { + public: + HInvokeVirtual(ArenaAllocator* arena, + uint32_t number_of_arguments, + Primitive::Type return_type, + uint32_t dex_pc, + uint32_t vtable_index) + : HInvoke(arena, number_of_arguments, return_type, dex_pc), + vtable_index_(vtable_index) {} + + uint32_t GetVTableIndex() const { return vtable_index_; } + + DECLARE_INSTRUCTION(InvokeVirtual); + + private: + const uint32_t vtable_index_; + + DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual); +}; + class HNewInstance : public HExpression<0> { public: HNewInstance(uint32_t dex_pc, uint16_t type_index) diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 75f41557e2..a53919268f 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -38,7 +38,7 @@ namespace art { */ class CodeVectorAllocator FINAL : public CodeAllocator { public: - CodeVectorAllocator() { } + CodeVectorAllocator() {} virtual uint8_t* Allocate(size_t size) { size_ = size; @@ -70,6 +70,7 @@ static const char* kStringFilter = ""; class OptimizingCompiler FINAL : public Compiler { public: explicit OptimizingCompiler(CompilerDriver* driver); + ~OptimizingCompiler(); bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const OVERRIDE; @@ -113,6 +114,13 @@ class OptimizingCompiler FINAL : public Compiler { void UnInit() const OVERRIDE; private: + // Whether we should run any optimization or register allocation. If false, will + // just run the code generation after the graph was built. + const bool run_optimizations_; + mutable AtomicInteger total_compiled_methods_; + mutable AtomicInteger unoptimized_compiled_methods_; + mutable AtomicInteger optimized_compiled_methods_; + std::unique_ptr<std::ostream> visualizer_output_; // Delegate to another compiler in case the optimizing compiler cannot compile a method. @@ -122,8 +130,16 @@ class OptimizingCompiler FINAL : public Compiler { DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); }; -OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : Compiler(driver, 100), - delegate_(Create(driver, Compiler::Kind::kQuick)) { +static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */ + +OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) + : Compiler(driver, kMaximumCompilationTimeBeforeWarning), + run_optimizations_( + driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime), + total_compiled_methods_(0), + unoptimized_compiled_methods_(0), + optimized_compiled_methods_(0), + delegate_(Create(driver, Compiler::Kind::kQuick)) { if (kIsVisualizerEnabled) { visualizer_output_.reset(new std::ofstream("art.cfg")); } @@ -137,6 +153,14 @@ void OptimizingCompiler::UnInit() const { delegate_->UnInit(); } +OptimizingCompiler::~OptimizingCompiler() { + size_t unoptimized_percent = (unoptimized_compiled_methods_ * 100 / total_compiled_methods_); + size_t optimized_percent = (optimized_compiled_methods_ * 100 / total_compiled_methods_); + LOG(INFO) << "Compiled " << total_compiled_methods_ << " methods: " + << unoptimized_percent << "% (" << unoptimized_compiled_methods_ << ") unoptimized, " + << optimized_percent << "% (" << optimized_compiled_methods_ << ") optimized."; +} + bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const { return delegate_->CanCompileMethod(method_idx, dex_file, cu); @@ -173,6 +197,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite uint32_t method_idx, jobject class_loader, const DexFile& dex_file) const { + total_compiled_methods_++; InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet(); // Always use the thumb2 assembler: some runtime functionality (like implicit stack // overflow checks) assume thumb2. @@ -222,7 +247,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite CodeVectorAllocator allocator; - if (RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) { + if (run_optimizations_ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) { + optimized_compiled_methods_++; graph->BuildDominatorTree(); graph->TransformToSSA(); visualizer.DumpGraph("ssa"); @@ -262,6 +288,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite LOG(FATAL) << "Could not allocate registers in optimizing compiler"; return nullptr; } else { + unoptimized_compiled_methods_++; codegen->CompileBaseline(&allocator); // Run these phases to get some test coverage. |