diff options
Diffstat (limited to 'compiler')
30 files changed, 987 insertions, 573 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk index c6662c2181..27bc3a32cc 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -90,6 +90,7 @@ LIBART_COMPILER_SRC_FILES := \ utils/x86/assembler_x86.cc \ utils/x86/managed_register_x86.cc \ buffered_output_stream.cc \ + compiler_backend.cc \ elf_fixup.cc \ elf_stripper.cc \ elf_writer.cc \ diff --git a/compiler/compiler_backend.cc b/compiler/compiler_backend.cc new file mode 100644 index 0000000000..b8f21a9bf5 --- /dev/null +++ b/compiler/compiler_backend.cc @@ -0,0 +1,293 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiler_backend.h" +#include "elf_writer_quick.h" +#include "dex/quick/mir_to_lir.h" +#include "dex/mir_graph.h" +#include "driver/compiler_driver.h" +#include "mirror/art_method-inl.h" + +#ifdef ART_USE_PORTABLE_COMPILER +#include "dex/portable/mir_to_gbc.h" +#include "elf_writer_mclinker.h" +#endif + +namespace art { + +#ifdef ART_SEA_IR_MODE +extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file); +#endif + +extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver); +extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& driver); +extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& compiler, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file); + +extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler, + uint32_t access_flags, uint32_t method_idx, + const art::DexFile& dex_file); + + +static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& compiler, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file) { +#ifdef ART_SEA_IR_MODE + bool use_sea = Runtime::Current()->IsSeaIRMode(); + use_sea = use_sea && + (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci")); + if (use_sea) { + LOG(INFO) << "Using SEA IR to compile..." << std::endl; + return SeaIrCompileMethod(compiler, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + class_loader, + dex_file); + } +#endif + return nullptr; +} + + +class QuickBackend : public CompilerBackend { + public: + QuickBackend() : CompilerBackend(100) {} + + void Init(CompilerDriver& driver) const { + ArtInitQuickCompilerContext(driver); + } + + void UnInit(CompilerDriver& driver) const { + ArtUnInitQuickCompilerContext(driver); + } + + CompiledMethod* Compile(CompilerDriver& compiler, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file) const { + CompiledMethod* method = TryCompileWithSeaIR(compiler, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + class_loader, + dex_file); + if (method != nullptr) return method; + + return ArtQuickCompileMethod(compiler, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + class_loader, + dex_file); + } + + CompiledMethod* JniCompile(CompilerDriver& driver, + uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file) const { + return ArtQuickJniCompileMethod(driver, access_flags, method_idx, dex_file); + } + + uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const { + return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()); + } + + bool WriteElf(art::File* file, + OatWriter& oat_writer, + const std::vector<const art::DexFile*>& dex_files, + const std::string& android_root, + bool is_host, const CompilerDriver& driver) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, driver); + } + + Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const { + Mir2Lir* mir_to_lir = nullptr; + switch (cu->instruction_set) { + case kThumb2: + mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena); + break; + case kMips: + mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena); + break; + case kX86: + mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena); + break; + default: + LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set; + } + + /* The number of compiler temporaries depends on backend so set it up now if possible */ + if (mir_to_lir) { + size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps(); + bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps); + CHECK(set_max); + } + return mir_to_lir;; + } + + void InitCompilationUnit(CompilationUnit& cu) const {} + + private: + DISALLOW_COPY_AND_ASSIGN(QuickBackend); +}; + +#ifdef ART_USE_PORTABLE_COMPILER + +extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver); +extern "C" void ArtUnInitCompilerContext(art::CompilerDriver& driver); +extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver, + const art::DexFile::CodeItem* code_item, + uint32_t access_flags, + art::InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const art::DexFile& dex_file); +extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver, + uint32_t access_flags, uint32_t method_idx, + const art::DexFile& dex_file); + + +class LLVMBackend : public CompilerBackend { + public: + LLVMBackend() : CompilerBackend(1000) {} + + void Init(CompilerDriver& driver) const { + ArtInitCompilerContext(driver); + } + + void UnInit(CompilerDriver& driver) const { + ArtUnInitCompilerContext(driver); + } + + CompiledMethod* Compile(CompilerDriver& compiler, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file) const { + CompiledMethod* method = TryCompileWithSeaIR(compiler, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + class_loader, + dex_file); + if (method != nullptr) return method; + + return ArtCompileMethod(compiler, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + class_loader, + dex_file); + } + + CompiledMethod* JniCompile(CompilerDriver& driver, + uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file) const { + return ArtLLVMJniCompileMethod(driver, access_flags, method_idx, dex_file); + } + + uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const { + return reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode()); + } + + bool WriteElf(art::File* file, + OatWriter& oat_writer, + const std::vector<const art::DexFile*>& dex_files, + const std::string& android_root, + bool is_host, const CompilerDriver& driver) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return art::ElfWriterMclinker::Create( + file, oat_writer, dex_files, android_root, is_host, driver); + } + + Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const { + return PortableCodeGenerator( + cu, cu->mir_graph.get(), &cu->arena, + reinterpret_cast<art::llvm::LlvmCompilationUnit*>(compilation_unit)); + } + + void InitCompilationUnit(CompilationUnit& cu) const { + // Fused long branches not currently useful in bitcode. + cu.disable_opt |= + (1 << kBranchFusing) | + (1 << kSuppressExceptionEdges); + } + + bool isPortable() const { return true; } + + private: + DISALLOW_COPY_AND_ASSIGN(LLVMBackend); +}; +#endif + +CompilerBackend* CompilerBackend::Create(CompilerBackend::Kind kind) { + switch (kind) { + case kQuick: + return new QuickBackend(); + break; + case kPortable: +#ifdef ART_USE_PORTABLE_COMPILER + return new LLVMBackend(); +#else + LOG(FATAL) << "Portable compiler not compiled"; +#endif + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } + return nullptr; +} + +} // namespace art diff --git a/compiler/compiler_backend.h b/compiler/compiler_backend.h new file mode 100644 index 0000000000..b20c125f0a --- /dev/null +++ b/compiler/compiler_backend.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMPILER_BACKEND_H_ +#define ART_COMPILER_COMPILER_BACKEND_H_ + +#include "dex_file.h" +#include "os.h" + +namespace art { + +class Backend; +class CompilationUnit; +class CompilerDriver; +class CompiledMethod; +class MIRGraph; +class OatWriter; + +namespace mirror { + class ArtMethod; +} + +class CompilerBackend { + public: + enum Kind { + kQuick, + kPortable + }; + + explicit CompilerBackend(int warning) + : maximum_compilation_time_before_warning_(warning) {} + + static CompilerBackend* Create(Kind kind); + + virtual void Init(CompilerDriver& driver) const = 0; + + virtual void UnInit(CompilerDriver& driver) const = 0; + + virtual CompiledMethod* Compile(CompilerDriver& compiler, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file) const = 0; + + virtual CompiledMethod* JniCompile(CompilerDriver& driver, + uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file) const = 0; + + virtual uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const = 0; + + virtual bool WriteElf(art::File* file, + OatWriter& oat_writer, + const std::vector<const art::DexFile*>& dex_files, + const std::string& android_root, + bool is_host, const CompilerDriver& driver) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + + virtual Backend* GetCodeGenerator(CompilationUnit* cu, + void* compilation_unit) const = 0; + + uint64_t GetMaximumCompilationTimeBeforeWarning() const { + return maximum_compilation_time_before_warning_; + } + + virtual bool IsPortable() const { return false; } + + virtual void InitCompilationUnit(CompilationUnit& cu) const = 0; + + virtual ~CompilerBackend() {} + + private: + uint64_t maximum_compilation_time_before_warning_; + + DISALLOW_COPY_AND_ASSIGN(CompilerBackend); +}; + +} // namespace art + +#endif // ART_COMPILER_COMPILER_BACKEND_H_ diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h index 32fd79bab8..ea8eb1cfa2 100644 --- a/compiler/dex/compiler_ir.h +++ b/compiler/dex/compiler_ir.h @@ -68,7 +68,7 @@ struct CompilationUnit { uint32_t disable_opt; // opt_control_vector flags. uint32_t enable_debug; // debugControlVector flags. bool verbose; - CompilerBackend compiler_backend; + const CompilerBackend* compiler_backend; InstructionSet instruction_set; const InstructionSetFeatures& GetInstructionSetFeatures() { diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 8f83cd039d..591d92a73d 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -14,8 +14,7 @@ * limitations under the License. */ -#include <llvm/Support/Threading.h> - +#include "compiler_backend.h" #include "compiler_internals.h" #include "driver/compiler_driver.h" #include "dataflow_iterator-inl.h" @@ -27,42 +26,9 @@ #include "base/logging.h" #include "base/timing_logger.h" -#if defined(ART_USE_PORTABLE_COMPILER) -#include "dex/portable/mir_to_gbc.h" -#include "llvm/llvm_compilation_unit.h" -#endif - #include "dex/quick/dex_file_to_method_inliner_map.h" -namespace { -#if !defined(ART_USE_PORTABLE_COMPILER) - pthread_once_t llvm_multi_init = PTHREAD_ONCE_INIT; -#endif - void InitializeLLVMForQuick() { - ::llvm::llvm_start_multithreaded(); - } -} - namespace art { -namespace llvm { -::llvm::Module* makeLLVMModuleContents(::llvm::Module* module); -} - -LLVMInfo::LLVMInfo() { -#if !defined(ART_USE_PORTABLE_COMPILER) - pthread_once(&llvm_multi_init, InitializeLLVMForQuick); -#endif - // Create context, module, intrinsic helper & ir builder - llvm_context_.reset(new ::llvm::LLVMContext()); - llvm_module_ = new ::llvm::Module("art", *llvm_context_); - ::llvm::StructType::create(*llvm_context_, "JavaObject"); - art::llvm::makeLLVMModuleContents(llvm_module_); - intrinsic_helper_.reset(new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_)); - ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_)); -} - -LLVMInfo::~LLVMInfo() { -} extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver) { CHECK(driver.GetCompilerContext() == NULL); @@ -123,7 +89,7 @@ CompilationUnit::CompilationUnit(ArenaPool* pool) disable_opt(0), enable_debug(0), verbose(false), - compiler_backend(kNoBackend), + compiler_backend(NULL), instruction_set(kNone), num_dalvik_registers(0), insns(NULL), @@ -163,15 +129,12 @@ void CompilationUnit::EndTiming() { } static CompiledMethod* CompileMethod(CompilerDriver& compiler, - const CompilerBackend compiler_backend, + CompilerBackend* compiler_backend, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - jobject class_loader, const DexFile& dex_file -#if defined(ART_USE_PORTABLE_COMPILER) - , llvm::LlvmCompilationUnit* llvm_compilation_unit -#endif -) { + jobject class_loader, const DexFile& dex_file, + void* llvm_compilation_unit) { VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "..."; if (code_item->insns_size_in_code_units_ >= 0x10000) { LOG(INFO) << "Method size exceeds compiler limits: " << code_item->insns_size_in_code_units_ @@ -211,12 +174,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, * MIR and backend flags? Need command-line setting as well. */ - if (compiler_backend == kPortable) { - // Fused long branches not currently useful in bitcode. - cu.disable_opt |= - (1 << kBranchFusing) | - (1 << kSuppressExceptionEdges); - } + compiler_backend->InitCompilationUnit(cu); if (cu.instruction_set == kMips) { // Disable some optimizations for mips for now @@ -241,37 +199,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, * The reason we do this is that optimizations on the MIR graph may need to get information * that is only available if a CG exists. */ -#if defined(ART_USE_PORTABLE_COMPILER) - if (compiler_backend == kPortable) { - cu.cg.reset(PortableCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena, llvm_compilation_unit)); - } else { -#endif - Mir2Lir* mir_to_lir = nullptr; - switch (compiler.GetInstructionSet()) { - case kThumb2: - mir_to_lir = ArmCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena); - break; - case kMips: - mir_to_lir = MipsCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena); - break; - case kX86: - mir_to_lir = X86CodeGenerator(&cu, cu.mir_graph.get(), &cu.arena); - break; - default: - LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet(); - } - - cu.cg.reset(mir_to_lir); - - /* The number of compiler temporaries depends on backend so set it up now if possible */ - if (mir_to_lir) { - size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps(); - bool set_max = cu.mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps); - CHECK(set_max); - } -#if defined(ART_USE_PORTABLE_COMPILER) - } -#endif + cu.cg.reset(compiler_backend->GetCodeGenerator(&cu, llvm_compilation_unit)); /* Gathering opcode stats? */ if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) { @@ -283,11 +211,9 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, class_loader, dex_file); cu.NewTimingSplit("MIROpt:CheckFilters"); -#if !defined(ART_USE_PORTABLE_COMPILER) if (cu.mir_graph->SkipCompilation(Runtime::Current()->GetCompilerFilter())) { return NULL; } -#endif /* Create the pass driver and launch it */ PassDriver driver(&cu); @@ -338,7 +264,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler, } CompiledMethod* CompileOneMethod(CompilerDriver& compiler, - const CompilerBackend backend, + CompilerBackend* backend, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, @@ -346,13 +272,9 @@ CompiledMethod* CompileOneMethod(CompilerDriver& compiler, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, - llvm::LlvmCompilationUnit* llvm_compilation_unit) { + void* compilation_unit) { return CompileMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx, - method_idx, class_loader, dex_file -#if defined(ART_USE_PORTABLE_COMPILER) - , llvm_compilation_unit -#endif - ); // NOLINT(whitespace/parens) + method_idx, class_loader, dex_file, compilation_unit); } } // namespace art @@ -364,7 +286,7 @@ extern "C" art::CompiledMethod* uint16_t class_def_idx, uint32_t method_idx, jobject class_loader, const art::DexFile& dex_file) { // TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default - art::CompilerBackend backend = compiler.GetCompilerBackend(); + art::CompilerBackend* backend = compiler.GetCompilerBackend(); return art::CompileOneMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx, method_idx, class_loader, dex_file, NULL /* use thread llvm_info */); diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 10bcdb9771..dfd8e6338d 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -881,6 +881,23 @@ void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) { } } +MIR* MIRGraph::GetNextUnconditionalMir(BasicBlock* bb, MIR* current) { + MIR* next_mir = nullptr; + + if (current != nullptr) { + next_mir = current->next; + } + + if (next_mir == nullptr) { + // Only look for next MIR that follows unconditionally. + if ((bb->taken == NullBasicBlockId) && (bb->fall_through != NullBasicBlockId)) { + next_mir = GetBasicBlock(bb->fall_through)->first_mir_insn; + } + } + + return next_mir; +} + char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { DecodedInstruction insn = mir->dalvikInsn; std::string str; diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index f8706c49b6..e866612d35 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -721,6 +721,17 @@ class MIRGraph { void AppendMIR(BasicBlock* bb, MIR* mir); void PrependMIR(BasicBlock* bb, MIR* mir); void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir); + + /** + * @brief Used to obtain the next MIR that follows unconditionally. + * @details The implementation does not guarantee that a MIR does not + * follow even if this method returns nullptr. + * @param bb The basic block of "current" MIR. + * @param current The MIR for which to find an unconditional follower. + * @return Returns the following MIR if one can be found. + */ + MIR* GetNextUnconditionalMir(BasicBlock* bb, MIR* current); + char* GetDalvikDisassembly(const MIR* mir); void ReplaceSpecialChars(std::string& str); std::string GetSSAName(int ssa_reg); diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 209ed3dca8..b91ef28680 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -406,7 +406,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { // Is this the select pattern? // TODO: flesh out support for Mips. NOTE: llvm's select op doesn't quite work here. // TUNING: expand to support IF_xx compare & branches - if ((cu_->compiler_backend != kPortable) && + if (cu_->compiler_backend->IsPortable() && (cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86) && ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) || (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) { diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index e6cc2de80a..3187fbb28c 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -41,6 +41,22 @@ const char kNormalBlock = 'L'; const char kCatchBlock = 'C'; namespace art { +namespace llvm { +::llvm::Module* makeLLVMModuleContents(::llvm::Module* module); +} + +LLVMInfo::LLVMInfo() { + // Create context, module, intrinsic helper & ir builder + llvm_context_.reset(new ::llvm::LLVMContext()); + llvm_module_ = new ::llvm::Module("art", *llvm_context_); + ::llvm::StructType::create(*llvm_context_, "JavaObject"); + art::llvm::makeLLVMModuleContents(llvm_module_); + intrinsic_helper_.reset(new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_)); + ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_)); +} + +LLVMInfo::~LLVMInfo() { +} ::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id) { return id_to_block_map_.Get(id); diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index a30e80a575..b36dde98b2 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -18,225 +18,11 @@ #include "arm_lir.h" #include "codegen_arm.h" -#include "dex/quick/dex_file_method_inliner.h" #include "dex/quick/mir_to_lir-inl.h" #include "entrypoints/quick/quick_entrypoints.h" namespace art { -// TODO: generalize & move to RegUtil.cc -// The number of dalvik registers passed in core registers. -constexpr int kInArgsInCoreRegs = 3; -// The core register corresponding to the first (index 0) input argument. -constexpr int kInArg0CoreReg = r1; // r0 is Method*. -// Offset, in words, for getting args from stack (even core reg args have space on stack). -constexpr int kInArgToStackOffset = 1; - -/* Lock argument if it's in register. */ -void ArmMir2Lir::LockArg(int in_position, bool wide) { - if (in_position < kInArgsInCoreRegs) { - LockTemp(kInArg0CoreReg + in_position); - } - if (wide && in_position + 1 < kInArgsInCoreRegs) { - LockTemp(kInArg0CoreReg + in_position + 1); - } -} - -/* Load argument into register. LockArg(in_position, wide) must have been previously called. */ -int ArmMir2Lir::LoadArg(int in_position, bool wide) { - if (in_position < kInArgsInCoreRegs) { - int low_reg = kInArg0CoreReg + in_position; - if (!wide) { - return low_reg; - } - int high_reg = (in_position != kInArgsInCoreRegs - 1) ? low_reg + 1 : LoadArg(in_position + 1); - return (low_reg & 0xff) | ((high_reg & 0xff) << 8); - } - int low_reg = AllocTemp(); - int offset = (in_position + kInArgToStackOffset) * sizeof(uint32_t); - if (!wide) { - LoadWordDisp(rARM_SP, offset, low_reg); - return low_reg; - } - int high_reg = AllocTemp(); - LoadBaseDispWide(rARM_SP, offset, low_reg, high_reg, INVALID_SREG); - return (low_reg & 0xff) | ((high_reg & 0xff) << 8); -} - -void ArmMir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { - int reg = kInArg0CoreReg + in_position; - int offset = (in_position + kInArgToStackOffset) * sizeof(uint32_t); - if (!rl_dest.wide) { - if (in_position < kInArgsInCoreRegs) { - OpRegCopy(rl_dest.low_reg, reg); - } else { - LoadWordDisp(rARM_SP, offset, rl_dest.low_reg); - } - } else { - if (in_position < kInArgsInCoreRegs - 1) { - OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, reg, reg + 1); - } else if (in_position == kInArgsInCoreRegs - 1) { - OpRegCopy(rl_dest.low_reg, reg); - LoadWordDisp(rARM_SP, offset + sizeof(uint32_t), rl_dest.high_reg); - } else { - LoadBaseDispWide(rARM_SP, offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG); - } - } -} - -/* Find the next MIR, which may be in a following basic block */ -// TODO: make this a utility in mir_graph. -MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) { - BasicBlock* bb = *p_bb; - MIR* orig_mir = mir; - while (bb != NULL) { - if (mir != NULL) { - mir = mir->next; - } - if (mir != NULL) { - return mir; - } else { - bb = mir_graph_->GetBasicBlock(bb->fall_through); - *p_bb = bb; - if (bb) { - mir = bb->first_mir_insn; - if (mir != NULL) { - return mir; - } - } - } - } - return orig_mir; -} - -/* Used for the "verbose" listing */ -// TODO: move to common code -void ArmMir2Lir::GenPrintLabel(MIR* mir) { - /* Mark the beginning of a Dalvik instruction for line tracking */ - if (cu_->verbose) { - char* inst_str = mir_graph_->GetDalvikDisassembly(mir); - MarkBoundary(mir->offset, inst_str); - } -} - -MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir, const InlineMethod& special) { - // FastInstance() already checked by DexFileMethodInliner. - const InlineIGetIPutData& data = special.d.ifield_data; - if (data.method_is_static || data.object_arg != 0) { - return NULL; // The object is not "this" and has to be null-checked. - } - - DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong. - bool wide = (data.op_size == kLong); - - // Point of no return - no aborts after this - ArmMir2Lir::GenPrintLabel(mir); - LockArg(data.object_arg); - RegLocation rl_dest = wide ? GetReturnWide(false) : GetReturn(false); - int reg_obj = LoadArg(data.object_arg); - if (wide) { - LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG); - } else { - LoadBaseDisp(reg_obj, data.field_offset, rl_dest.low_reg, kWord, INVALID_SREG); - } - if (data.is_volatile) { - GenMemBarrier(kLoadLoad); - } - return GetNextMir(bb, mir); -} - -MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir, const InlineMethod& special) { - // FastInstance() already checked by DexFileMethodInliner. - const InlineIGetIPutData& data = special.d.ifield_data; - if (data.method_is_static || data.object_arg != 0) { - return NULL; // The object is not "this" and has to be null-checked. - } - - DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong. - bool wide = (data.op_size == kLong); - - // Point of no return - no aborts after this - ArmMir2Lir::GenPrintLabel(mir); - LockArg(data.object_arg); - LockArg(data.src_arg, wide); - int reg_obj = LoadArg(data.object_arg); - int reg_src = LoadArg(data.src_arg, wide); - if (data.is_volatile) { - GenMemBarrier(kStoreStore); - } - if (wide) { - StoreBaseDispWide(reg_obj, data.field_offset, reg_src & 0xff, reg_src >> 8); - } else { - StoreBaseDisp(reg_obj, data.field_offset, reg_src, kWord); - } - if (data.is_volatile) { - GenMemBarrier(kLoadLoad); - } - if (data.is_object) { - MarkGCCard(reg_src, reg_obj); - } - return GetNextMir(bb, mir); -} - -MIR* ArmMir2Lir::SpecialIdentity(MIR* mir, const InlineMethod& special) { - const InlineReturnArgData& data = special.d.return_data; - DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong. - bool wide = (data.op_size == kLong); - - // Point of no return - no aborts after this - ArmMir2Lir::GenPrintLabel(mir); - LockArg(data.arg, wide); - RegLocation rl_dest = wide ? GetReturnWide(false) : GetReturn(false); - LoadArgDirect(data.arg, rl_dest); - return mir; -} - -/* - * Special-case code genration for simple non-throwing leaf methods. - */ -void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - const InlineMethod& special) { - DCHECK(special.flags & kInlineSpecial); - current_dalvik_offset_ = mir->offset; - MIR* next_mir = NULL; - switch (special.opcode) { - case kInlineOpNop: - DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID); - next_mir = mir; - break; - case kInlineOpConst: - ArmMir2Lir::GenPrintLabel(mir); - LoadConstant(rARM_RET0, static_cast<int>(special.d.data)); - next_mir = GetNextMir(&bb, mir); - break; - case kInlineOpIGet: - next_mir = SpecialIGet(&bb, mir, special); - break; - case kInlineOpIPut: - next_mir = SpecialIPut(&bb, mir, special); - break; - case kInlineOpReturnArg: - next_mir = SpecialIdentity(mir, special); - break; - default: - return; - } - if (next_mir != NULL) { - current_dalvik_offset_ = next_mir->offset; - if (special.opcode != kInlineOpReturnArg) { - ArmMir2Lir::GenPrintLabel(next_mir); - } - NewLIR1(kThumbBx, rARM_LR); - core_spill_mask_ = 0; - num_core_spills_ = 0; - fp_spill_mask_ = 0; - num_fp_spills_ = 0; - frame_size_ = 0; - core_vmap_table_.clear(); - fp_vmap_table_.clear(); - } -} - /* * The sparse table in the literal pool is an array of <key,displacement> * pairs. For each set, we'll load them as a pair using ldmia. @@ -610,4 +396,8 @@ void ArmMir2Lir::GenExitSequence() { } } +void ArmMir2Lir::GenSpecialExitSequence() { + NewLIR1(kThumbBx, rARM_LR); +} + } // namespace art diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index 7ee241c54f..65dee807a1 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -52,6 +52,7 @@ class ArmMir2Lir : public Mir2Lir { int AllocTypedTempPair(bool fp_hint, int reg_class); int S2d(int low_reg, int high_reg); int TargetReg(SpecialTargetRegister reg); + int GetArgMappingToPhysicalReg(int arg_num); RegLocation GetReturnAlt(); RegLocation GetReturnWideAlt(); RegLocation LocCReturn(); @@ -122,6 +123,7 @@ class ArmMir2Lir : public Mir2Lir { void GenDivZeroCheck(int reg_lo, int reg_hi); void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method); void GenExitSequence(); + void GenSpecialExitSequence(); void GenFillArrayData(DexOffset table_offset, RegLocation rl_src); void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double); void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir); @@ -136,7 +138,6 @@ class ArmMir2Lir : public Mir2Lir { void GenNegFloat(RegLocation rl_dest, RegLocation rl_src); void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); - void GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); // Required for target - single operation generators. LIR* OpUnconditionalBranch(LIR* target); @@ -170,7 +171,6 @@ class ArmMir2Lir : public Mir2Lir { LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg); LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size); - void GenPrintLabel(MIR* mir); LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift); LIR* OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, int shift); static const ArmEncodingMap EncodingMap[kArmLast]; @@ -185,13 +185,6 @@ class ArmMir2Lir : public Mir2Lir { private: void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val, ConditionCode ccode); - void LockArg(int in_position, bool wide = false); - int LoadArg(int in_position, bool wide = false); - void LoadArgDirect(int in_position, RegLocation rl_dest); - MIR* GetNextMir(BasicBlock** p_bb, MIR* mir); - MIR* SpecialIGet(BasicBlock** bb, MIR* mir, const InlineMethod& special); - MIR* SpecialIPut(BasicBlock** bb, MIR* mir, const InlineMethod& special); - MIR* SpecialIdentity(MIR* mir, const InlineMethod& special); LIR* LoadFPConstantValue(int r_dest, int value); void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir); void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir); diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index ceec7d50ce..83431ad235 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -83,6 +83,19 @@ int ArmMir2Lir::TargetReg(SpecialTargetRegister reg) { return res; } +int ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) { + // For the 32-bit internal ABI, the first 3 arguments are passed in registers. + switch (arg_num) { + case 0: + return rARM_ARG1; + case 1: + return rARM_ARG2; + case 2: + return rARM_ARG3; + default: + return INVALID_REG; + } +} // Create a double from a pair of singles. int ArmMir2Lir::S2d(int low_reg, int high_reg) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 05eb360a6b..c5dccda228 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1017,19 +1017,13 @@ void Mir2Lir::Materialize() { /* Allocate Registers using simple local allocation scheme */ SimpleRegAlloc(); - /* - * Custom codegen for special cases. If for any reason the - * special codegen doesn't succeed, first_lir_insn_ will be - * set to NULL; - */ - // TODO: Clean up GenSpecial() and return true only if special implementation is emitted. - // Currently, GenSpecial() returns IsSpecial() but doesn't check after SpecialMIR2LIR(). + /* First try the custom light codegen for special cases. */ DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); - cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) + bool special_worked = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) ->GenSpecial(this, cu_->method_idx); - /* Convert MIR to LIR, etc. */ - if (first_lir_insn_ == NULL) { + /* Take normal path for converting MIR to LIR only if the special codegen did not succeed. */ + if (special_worked == false) { MethodMIR2LIR(); } diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index 389dd9ac35..cb424d9169 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -271,6 +271,13 @@ DexFileMethodInliner::~DexFileMethodInliner() { } bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier) { + InlineMethod method; + bool success = AnalyseMethodCode(verifier, &method); + return success && AddInlineMethod(verifier->GetMethodReference().dex_method_index, method); +} + +bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier, + InlineMethod* method) { // We currently support only plain return or 2-instruction methods. const DexFile::CodeItem* code_item = verifier->CodeItem(); @@ -278,27 +285,22 @@ bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier) const Instruction* instruction = Instruction::At(code_item->insns_); Instruction::Code opcode = instruction->Opcode(); - InlineMethod method; - bool success; switch (opcode) { case Instruction::RETURN_VOID: - method.opcode = kInlineOpNop; - method.flags = kInlineSpecial; - method.d.data = 0u; - success = true; - break; + method->opcode = kInlineOpNop; + method->flags = kInlineSpecial; + method->d.data = 0u; + return true; case Instruction::RETURN: case Instruction::RETURN_OBJECT: case Instruction::RETURN_WIDE: - success = AnalyseReturnMethod(code_item, &method); - break; + return AnalyseReturnMethod(code_item, method); case Instruction::CONST: case Instruction::CONST_4: case Instruction::CONST_16: case Instruction::CONST_HIGH16: // TODO: Support wide constants (RETURN_WIDE). - success = AnalyseConstMethod(code_item, &method); - break; + return AnalyseConstMethod(code_item, method); case Instruction::IGET: case Instruction::IGET_OBJECT: case Instruction::IGET_BOOLEAN: @@ -306,8 +308,7 @@ bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier) case Instruction::IGET_CHAR: case Instruction::IGET_SHORT: case Instruction::IGET_WIDE: - success = AnalyseIGetMethod(verifier, &method); - break; + return AnalyseIGetMethod(verifier, method); case Instruction::IPUT: case Instruction::IPUT_OBJECT: case Instruction::IPUT_BOOLEAN: @@ -315,13 +316,10 @@ bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier) case Instruction::IPUT_CHAR: case Instruction::IPUT_SHORT: case Instruction::IPUT_WIDE: - success = AnalyseIPutMethod(verifier, &method); - break; + return AnalyseIPutMethod(verifier, method); default: - success = false; - break; + return false; } - return success && AddInlineMethod(verifier->GetMethodReference().dex_method_index, method); } bool DexFileMethodInliner::IsIntrinsic(uint32_t method_index) { @@ -407,9 +405,7 @@ bool DexFileMethodInliner::GenSpecial(Mir2Lir* backend, uint32_t method_idx) { } special = it->second; } - // TODO: Return true only if special implementation is emitted. - backend->SpecialMIR2LIR(special); - return true; + return backend->SpecialMIR2LIR(special); } uint32_t DexFileMethodInliner::FindClassIndex(const DexFile* dex_file, IndexCache* cache, @@ -596,7 +592,7 @@ bool DexFileMethodInliner::AnalyseConstMethod(const DexFile::CodeItem* code_item if (return_opcode == Instruction::RETURN_OBJECT && vB != 0) { return false; // Returning non-null reference constant? } - result->opcode = kInlineOpConst; + result->opcode = kInlineOpNonWideConst; result->flags = kInlineSpecial; result->d.data = static_cast<uint64_t>(vB); return true; @@ -633,6 +629,11 @@ bool DexFileMethodInliner::AnalyseIGetMethod(verifier::MethodVerifier* verifier, return false; // Not returning the value retrieved by IGET? } + if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) { + // TODO: Support inlining IGET on other register than "this". + return false; + } + if (!CompilerDriver::ComputeSpecialAccessorInfo(field_idx, false, verifier, &result->d.ifield_data)) { return false; @@ -645,6 +646,7 @@ bool DexFileMethodInliner::AnalyseIGetMethod(verifier::MethodVerifier* verifier, data->is_object = (opcode == Instruction::IGET_OBJECT) ? 1u : 0u; data->object_arg = object_reg - arg_start; // Allow IGET on any register, not just "this". data->src_arg = 0; + data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0; data->reserved = 0; return true; } @@ -676,6 +678,11 @@ bool DexFileMethodInliner::AnalyseIPutMethod(verifier::MethodVerifier* verifier, DCHECK_GE(src_reg, arg_start); DCHECK_LT(size == kLong ? src_reg + 1 : src_reg, code_item->registers_size_); + if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) { + // TODO: Support inlining IPUT on other register than "this". + return false; + } + if (!CompilerDriver::ComputeSpecialAccessorInfo(field_idx, true, verifier, &result->d.ifield_data)) { return false; @@ -688,6 +695,7 @@ bool DexFileMethodInliner::AnalyseIPutMethod(verifier::MethodVerifier* verifier, data->is_object = (opcode == Instruction::IPUT_OBJECT) ? 1u : 0u; data->object_arg = object_reg - arg_start; // Allow IPUT on any register, not just "this". data->src_arg = src_reg - arg_start; + data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0; data->reserved = 0; return true; } diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h index fb7528e8b1..3dcb964fab 100644 --- a/compiler/dex/quick/dex_file_method_inliner.h +++ b/compiler/dex/quick/dex_file_method_inliner.h @@ -57,7 +57,7 @@ enum InlineMethodOpcode : uint16_t { kInlineOpNop, kInlineOpReturnArg, - kInlineOpConst, + kInlineOpNonWideConst, kInlineOpIGet, kInlineOpIPut, }; @@ -150,13 +150,24 @@ class DexFileMethodInliner { * Analyse method code to determine if the method is a candidate for inlining. * If it is, record its data for later. * - * @param method_idx the index of the inlining candidate. - * @param code_item a previously verified code item of the method. + * @param verifier the method verifier holding data about the method to analyse. + * @return true if the method is a candidate for inlining, false otherwise. */ bool AnalyseMethodCode(verifier::MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); /** + * Analyse method code to determine if the method is a candidate for inlining. + * If it is, record the inlining data. + * + * @param verifier the method verifier holding data about the method to analyse. + * @param method placeholder for the inline method data. + * @return true if the method is a candidate for inlining, false otherwise. + */ + bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* method) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + + /** * Check whether a particular method index corresponds to an intrinsic function. */ bool IsIntrinsic(uint32_t method_index) LOCKS_EXCLUDED(lock_); diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index ee61c8b795..5fa4596055 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -293,10 +293,10 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0)); } - if (cu_->num_ins == 0) + if (cu_->num_ins == 0) { return; - const int num_arg_regs = 3; - static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3}; + } + int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; /* * Copy incoming arguments to their proper home locations. @@ -312,15 +312,17 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { */ for (int i = 0; i < cu_->num_ins; i++) { PromotionMap* v_map = &promotion_map_[start_vreg + i]; - if (i < num_arg_regs) { + int reg = GetArgMappingToPhysicalReg(i); + + if (reg != INVALID_REG) { // If arriving in register bool need_flush = true; RegLocation* t_loc = &ArgLocs[i]; if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { - OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i])); + OpRegCopy(v_map->core_reg, reg); need_flush = false; } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { - OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i])); + OpRegCopy(v_map->FpReg, reg); need_flush = false; } else { need_flush = true; @@ -350,8 +352,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { } } if (need_flush) { - StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), - TargetReg(arg_regs[i]), kWord); + StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord); } } else { // If arriving in frame & promoted diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index 2e385a380a..a663519b82 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -23,9 +23,10 @@ namespace art { -void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, +bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) { - // TODO + // TODO + return false; } /* @@ -345,4 +346,8 @@ void MipsMir2Lir::GenExitSequence() { OpReg(kOpBx, r_RA); } +void MipsMir2Lir::GenSpecialExitSequence() { + OpReg(kOpBx, r_RA); +} + } // namespace art diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 11b8f83058..dad8a3b492 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -52,6 +52,7 @@ class MipsMir2Lir : public Mir2Lir { int AllocTypedTempPair(bool fp_hint, int reg_class); int S2d(int low_reg, int high_reg); int TargetReg(SpecialTargetRegister reg); + int GetArgMappingToPhysicalReg(int arg_num); RegLocation GetReturnAlt(); RegLocation GetReturnWideAlt(); RegLocation LocCReturn(); @@ -121,6 +122,7 @@ class MipsMir2Lir : public Mir2Lir { void GenDivZeroCheck(int reg_lo, int reg_hi); void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method); void GenExitSequence(); + void GenSpecialExitSequence(); void GenFillArrayData(uint32_t table_offset, RegLocation rl_src); void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double); void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir); @@ -133,7 +135,7 @@ class MipsMir2Lir : public Mir2Lir { void GenNegFloat(RegLocation rl_dest, RegLocation rl_src); void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src); void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src); - void GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); + bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); // Required for target - single operation generators. LIR* OpUnconditionalBranch(LIR* target); diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index b744adcd97..224e8f21f2 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -86,6 +86,20 @@ int MipsMir2Lir::TargetReg(SpecialTargetRegister reg) { return res; } +int MipsMir2Lir::GetArgMappingToPhysicalReg(int arg_num) { + // For the 32-bit internal ABI, the first 3 arguments are passed in registers. + switch (arg_num) { + case 0: + return rMIPS_ARG1; + case 1: + return rMIPS_ARG2; + case 2: + return rMIPS_ARG3; + default: + return INVALID_REG; + } +} + // Create a double from a pair of singles. int MipsMir2Lir::S2d(int low_reg, int high_reg) { return MIPS_S2D(low_reg, high_reg); diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index ae54fb8287..8c2ed3667b 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -16,12 +16,244 @@ #include "dex/compiler_internals.h" #include "dex/dataflow_iterator-inl.h" +#include "dex/quick/dex_file_method_inliner.h" #include "mir_to_lir-inl.h" #include "object_utils.h" #include "thread-inl.h" namespace art { +void Mir2Lir::LockArg(int in_position, bool wide) { + int reg_arg_low = GetArgMappingToPhysicalReg(in_position); + int reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) : INVALID_REG; + + if (reg_arg_low != INVALID_REG) { + LockTemp(reg_arg_low); + } + if (reg_arg_high != INVALID_REG && reg_arg_low != reg_arg_high) { + LockTemp(reg_arg_high); + } +} + +int Mir2Lir::LoadArg(int in_position, bool wide) { + int reg_arg_low = GetArgMappingToPhysicalReg(in_position); + int reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) : INVALID_REG; + + int offset = StackVisitor::GetOutVROffset(in_position); + if (cu_->instruction_set == kX86) { + /* + * When doing a call for x86, it moves the stack pointer in order to push return. + * Thus, we add another 4 bytes to figure out the out of caller (in of callee). + * TODO: This needs revisited for 64-bit. + */ + offset += sizeof(uint32_t); + } + + // If the VR is wide and there is no register for high part, we need to load it. + if (wide && reg_arg_high == INVALID_REG) { + // If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg. + if (reg_arg_low == INVALID_REG) { + int new_regs = AllocTypedTempPair(false, kAnyReg); + DECODE_REG_PAIR(new_regs, reg_arg_low, reg_arg_high); + LoadBaseDispWide(TargetReg(kSp), offset, reg_arg_low, reg_arg_high, INVALID_SREG); + } else { + reg_arg_high = AllocTemp(); + int offset_high = offset + sizeof(uint32_t); + LoadWordDisp(TargetReg(kSp), offset_high, reg_arg_high); + } + } + + // If the low part is not in a register yet, we need to load it. + if (reg_arg_low == INVALID_REG) { + reg_arg_low = AllocTemp(); + LoadWordDisp(TargetReg(kSp), offset, reg_arg_low); + } + + if (wide) { + return ENCODE_REG_PAIR(reg_arg_low, reg_arg_high); + } else { + return reg_arg_low; + } +} + +void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { + int offset = StackVisitor::GetOutVROffset(in_position); + if (cu_->instruction_set == kX86) { + /* + * When doing a call for x86, it moves the stack pointer in order to push return. + * Thus, we add another 4 bytes to figure out the out of caller (in of callee). + * TODO: This needs revisited for 64-bit. + */ + offset += sizeof(uint32_t); + } + + if (!rl_dest.wide) { + int reg = GetArgMappingToPhysicalReg(in_position); + if (reg != INVALID_REG) { + OpRegCopy(rl_dest.low_reg, reg); + } else { + LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg); + } + } else { + int reg_arg_low = GetArgMappingToPhysicalReg(in_position); + int reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1); + + if (reg_arg_low != INVALID_REG && reg_arg_high != INVALID_REG) { + OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, reg_arg_low, reg_arg_high); + } else if (reg_arg_low != INVALID_REG && reg_arg_high == INVALID_REG) { + OpRegCopy(rl_dest.low_reg, reg_arg_low); + int offset_high = offset + sizeof(uint32_t); + LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.high_reg); + } else if (reg_arg_low == INVALID_REG && reg_arg_high != INVALID_REG) { + OpRegCopy(rl_dest.high_reg, reg_arg_high); + LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg); + } else { + LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG); + } + } +} + +bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { + // FastInstance() already checked by DexFileMethodInliner. + const InlineIGetIPutData& data = special.d.ifield_data; + if (data.method_is_static || data.object_arg != 0) { + // The object is not "this" and has to be null-checked. + return false; + } + + DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong. + bool wide = (data.op_size == kLong); + bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D'; + + // Point of no return - no aborts after this + GenPrintLabel(mir); + LockArg(data.object_arg); + RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float); + int reg_obj = LoadArg(data.object_arg); + if (wide) { + LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG); + } else { + LoadBaseDisp(reg_obj, data.field_offset, rl_dest.low_reg, kWord, INVALID_SREG); + } + if (data.is_volatile) { + GenMemBarrier(kLoadLoad); + } + return true; +} + +bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) { + // FastInstance() already checked by DexFileMethodInliner. + const InlineIGetIPutData& data = special.d.ifield_data; + if (data.method_is_static || data.object_arg != 0) { + // The object is not "this" and has to be null-checked. + return false; + } + + DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong. + bool wide = (data.op_size == kLong); + + // Point of no return - no aborts after this + GenPrintLabel(mir); + LockArg(data.object_arg); + LockArg(data.src_arg, wide); + int reg_obj = LoadArg(data.object_arg); + int reg_src = LoadArg(data.src_arg, wide); + if (data.is_volatile) { + GenMemBarrier(kStoreStore); + } + if (wide) { + int low_reg, high_reg; + DECODE_REG_PAIR(reg_src, low_reg, high_reg); + StoreBaseDispWide(reg_obj, data.field_offset, low_reg, high_reg); + } else { + StoreBaseDisp(reg_obj, data.field_offset, reg_src, kWord); + } + if (data.is_volatile) { + GenMemBarrier(kLoadLoad); + } + if (data.is_object) { + MarkGCCard(reg_src, reg_obj); + } + return true; +} + +bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) { + const InlineReturnArgData& data = special.d.return_data; + DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong. + bool wide = (data.op_size == kLong); + bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D'; + + // Point of no return - no aborts after this + GenPrintLabel(mir); + LockArg(data.arg, wide); + RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float); + LoadArgDirect(data.arg, rl_dest); + return true; +} + +/* + * Special-case code generation for simple non-throwing leaf methods. + */ +bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) { + DCHECK(special.flags & kInlineSpecial); + current_dalvik_offset_ = mir->offset; + MIR* return_mir = nullptr; + bool successful = false; + + switch (special.opcode) { + case kInlineOpNop: + successful = true; + DCHECK_EQ(mir->dalvikInsn.opcode, Instruction::RETURN_VOID); + return_mir = mir; + break; + case kInlineOpNonWideConst: { + successful = true; + RegLocation rl_dest = GetReturn(cu_->shorty[0] == 'F'); + GenPrintLabel(mir); + LoadConstant(rl_dest.low_reg, static_cast<int>(special.d.data)); + return_mir = mir_graph_->GetNextUnconditionalMir(bb, mir); + break; + } + case kInlineOpReturnArg: + successful = GenSpecialIdentity(mir, special); + return_mir = mir; + break; + case kInlineOpIGet: + successful = GenSpecialIGet(mir, special); + return_mir = mir_graph_->GetNextUnconditionalMir(bb, mir); + break; + case kInlineOpIPut: + successful = GenSpecialIPut(mir, special); + return_mir = mir_graph_->GetNextUnconditionalMir(bb, mir); + break; + default: + break; + } + + if (successful) { + // Handle verbosity for return MIR. + if (return_mir != nullptr) { + current_dalvik_offset_ = return_mir->offset; + // Not handling special identity case because it already generated code as part + // of the return. The label should have been added before any code was generated. + if (special.opcode != kInlineOpReturnArg) { + GenPrintLabel(return_mir); + } + } + GenSpecialExitSequence(); + + core_spill_mask_ = 0; + num_core_spills_ = 0; + fp_spill_mask_ = 0; + num_fp_spills_ = 0; + frame_size_ = 0; + core_vmap_table_.clear(); + fp_vmap_table_.clear(); + } + + return successful; +} + /* * Target-independent code generation. Use only high-level * load/store utilities here, or target-dependent genXX() handlers @@ -693,6 +925,14 @@ void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) { } } +void Mir2Lir::GenPrintLabel(MIR* mir) { + // Mark the beginning of a Dalvik instruction for line tracking. + if (cu_->verbose) { + char* inst_str = mir_graph_->GetDalvikDisassembly(mir); + MarkBoundary(mir->offset, inst_str); + } +} + // Handle the content in each basic block. bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { if (bb->block_type == kDead) return false; @@ -745,11 +985,8 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { current_dalvik_offset_ = mir->offset; int opcode = mir->dalvikInsn.opcode; - // Mark the beginning of a Dalvik instruction for line tracking. - if (cu_->verbose) { - char* inst_str = mir_graph_->GetDalvikDisassembly(mir); - MarkBoundary(mir->offset, inst_str); - } + GenPrintLabel(mir); + // Remember the first LIR for this block. if (head_lir == NULL) { head_lir = &block_label_list_[bb->id]; @@ -786,7 +1023,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { return false; } -void Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) { +bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) { cu_->NewTimingSplit("SpecialMIR2LIR"); // Find the first DalvikByteCode block. int num_reachable_blocks = mir_graph_->GetNumReachableBlocks(); @@ -800,7 +1037,7 @@ void Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) { } } if (bb == NULL) { - return; + return false; } DCHECK_EQ(bb->start_offset, 0); DCHECK(bb->first_mir_insn != NULL); @@ -813,7 +1050,7 @@ void Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) { ResetDefTracking(); ClobberAllRegs(); - GenSpecialCase(bb, mir, special); + return GenSpecialCase(bb, mir, special); } void Mir2Lir::MethodMIR2LIR() { diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 5d4439faac..729aaeee46 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -185,6 +185,13 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, #define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \ ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS) +#define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8)) +#define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \ + do { \ + low_reg = both_regs & 0xff; \ + high_reg = (both_regs >> 8) & 0xff; \ + } while (false) + // Mask to denote sreg as the start of a double. Must not interfere with low 16 bits. #define STARTING_DOUBLE_SREG 0x10000 @@ -738,7 +745,7 @@ class Mir2Lir : public Backend { void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); bool MethodBlockCodeGen(BasicBlock* bb); - void SpecialMIR2LIR(const InlineMethod& special); + bool SpecialMIR2LIR(const InlineMethod& special); void MethodMIR2LIR(); /* @@ -809,6 +816,7 @@ class Mir2Lir : public Backend { virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0; virtual int S2d(int low_reg, int high_reg) = 0; virtual int TargetReg(SpecialTargetRegister reg) = 0; + virtual int GetArgMappingToPhysicalReg(int arg_num) = 0; virtual RegLocation GetReturnAlt() = 0; virtual RegLocation GetReturnWideAlt() = 0; virtual RegLocation LocCReturn() = 0; @@ -949,8 +957,6 @@ class Mir2Lir : public Backend { RegLocation rl_src) = 0; virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; - virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, - const InlineMethod& special) = 0; virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index, RegLocation rl_dest, int scale) = 0; virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, @@ -1084,6 +1090,30 @@ class Mir2Lir : public Backend { uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); + /** + * @brief Used to insert marker that can be used to associate MIR with LIR. + * @details Only inserts marker if verbosity is enabled. + * @param mir The mir that is currently being generated. + */ + void GenPrintLabel(MIR* mir); + + /** + * @brief Used to generate return sequence when there is no frame. + * @details Assumes that the return registers have already been populated. + */ + virtual void GenSpecialExitSequence() = 0; + + /** + * @brief Used to generate code for special methods that are known to be + * small enough to work in frameless mode. + * @param bb The basic block of the first MIR. + * @param mir The first MIR of the special method. + * @param special Information about the special method. + * @return Returns whether or not this was handled successfully. Returns false + * if caller should punt to normal MIR2LIR conversion. + */ + virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); + private: void ClobberBody(RegisterInfo* p); void ResetDefBody(RegisterInfo* p) { @@ -1095,6 +1125,55 @@ class Mir2Lir : public Backend { current_dalvik_offset_ = dexpc; } + /** + * @brief Used to lock register if argument at in_position was passed that way. + * @details Does nothing if the argument is passed via stack. + * @param in_position The argument number whose register to lock. + * @param wide Whether the argument is wide. + */ + void LockArg(int in_position, bool wide = false); + + /** + * @brief Used to load VR argument to a physical register. + * @details The load is only done if the argument is not already in physical register. + * LockArg must have been previously called. + * @param in_position The argument number to load. + * @param wide Whether the argument is 64-bit or not. + * @return Returns the register (or register pair) for the loaded argument. + */ + int LoadArg(int in_position, bool wide = false); + + /** + * @brief Used to load a VR argument directly to a specified register location. + * @param in_position The argument number to place in register. + * @param rl_dest The register location where to place argument. + */ + void LoadArgDirect(int in_position, RegLocation rl_dest); + + /** + * @brief Used to generate LIR for special getter method. + * @param mir The mir that represents the iget. + * @param special Information about the special getter method. + * @return Returns whether LIR was successfully generated. + */ + bool GenSpecialIGet(MIR* mir, const InlineMethod& special); + + /** + * @brief Used to generate LIR for special setter method. + * @param mir The mir that represents the iput. + * @param special Information about the special setter method. + * @return Returns whether LIR was successfully generated. + */ + bool GenSpecialIPut(MIR* mir, const InlineMethod& special); + + /** + * @brief Used to generate LIR for special return-args method. + * @param mir The mir that represents the return of argument. + * @param special Information about the special return-args method. + * @return Returns whether LIR was successfully generated. + */ + bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); + public: // TODO: add accessors for these. diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 7f646e0d25..0613cdff7a 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -22,11 +22,6 @@ namespace art { -void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, - const InlineMethod& special) { - // TODO -} - /* * The sparse table in the literal pool is an array of <key,displacement> * pairs. @@ -255,4 +250,8 @@ void X86Mir2Lir::GenExitSequence() { NewLIR0(kX86Ret); } +void X86Mir2Lir::GenSpecialExitSequence() { + NewLIR0(kX86Ret); +} + } // namespace art diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 70263d8d07..6100a1d158 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -52,6 +52,7 @@ class X86Mir2Lir : public Mir2Lir { int AllocTypedTempPair(bool fp_hint, int reg_class); int S2d(int low_reg, int high_reg); int TargetReg(SpecialTargetRegister reg); + int GetArgMappingToPhysicalReg(int arg_num); RegLocation GetReturnAlt(); RegLocation GetReturnWideAlt(); RegLocation LocCReturn(); @@ -123,6 +124,7 @@ class X86Mir2Lir : public Mir2Lir { void GenDivZeroCheck(int reg_lo, int reg_hi); void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method); void GenExitSequence(); + void GenSpecialExitSequence(); void GenFillArrayData(DexOffset table_offset, RegLocation rl_src); void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double); void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir); @@ -135,7 +137,7 @@ class X86Mir2Lir : public Mir2Lir { void GenNegFloat(RegLocation rl_dest, RegLocation rl_src); void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); - void GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); + /* * @brief Generate a two address long operation with a constant value * @param rl_dest location of result diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index f4ae18fe47..5f04b7d152 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -167,7 +167,14 @@ void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, NewLIR2(kX86MovdrxRR, dest_hi, src_lo); } else { // Handle overlap - if (src_hi == dest_lo) { + if (src_hi == dest_lo && src_lo == dest_hi) { + // Deal with cycles. + int temp_reg = AllocTemp(); + OpRegCopy(temp_reg, dest_hi); + OpRegCopy(dest_hi, dest_lo); + OpRegCopy(dest_lo, temp_reg); + FreeTemp(temp_reg); + } else if (src_hi == dest_lo) { OpRegCopy(dest_hi, src_hi); OpRegCopy(dest_lo, src_lo); } else { @@ -978,6 +985,10 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation } // Nope. Do it the hard way + // Check for V*V. We can eliminate a multiply in that case, as 2L*1H == 2H*1L. + bool is_square = mir_graph_->SRegToVReg(rl_src1.s_reg_low) == + mir_graph_->SRegToVReg(rl_src2.s_reg_low); + FlushAllRegs(); LockCallTemps(); // Prepare for explicit register usage. rl_src1 = UpdateLocWide(rl_src1); @@ -995,36 +1006,52 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation kWord, GetSRegHi(rl_src1.s_reg_low)); } - // EAX <- 2H - if (src2_in_reg) { - NewLIR2(kX86Mov32RR, r0, rl_src2.high_reg); - } else { - LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0, - kWord, GetSRegHi(rl_src2.s_reg_low)); - } + if (is_square) { + // Take advantage of the fact that the values are the same. + // ECX <- ECX * 2L (1H * 2L) + if (src2_in_reg) { + NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg); + } else { + int displacement = SRegOffset(rl_src2.s_reg_low); + LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET); + AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, + true /* is_load */, true /* is_64bit */); + } - // EAX <- EAX * 1L (2H * 1L) - if (src1_in_reg) { - NewLIR2(kX86Imul32RR, r0, rl_src1.low_reg); + // ECX <- 2*ECX (2H * 1L) + (1H * 2L) + NewLIR2(kX86Add32RR, r1, r1); } else { - int displacement = SRegOffset(rl_src1.s_reg_low); - LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET); - AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, - true /* is_load */, true /* is_64bit */); - } + // EAX <- 2H + if (src2_in_reg) { + NewLIR2(kX86Mov32RR, r0, rl_src2.high_reg); + } else { + LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0, + kWord, GetSRegHi(rl_src2.s_reg_low)); + } - // ECX <- ECX * 2L (1H * 2L) - if (src2_in_reg) { - NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg); - } else { - int displacement = SRegOffset(rl_src2.s_reg_low); - LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET); - AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, - true /* is_load */, true /* is_64bit */); - } + // EAX <- EAX * 1L (2H * 1L) + if (src1_in_reg) { + NewLIR2(kX86Imul32RR, r0, rl_src1.low_reg); + } else { + int displacement = SRegOffset(rl_src1.s_reg_low); + LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET); + AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, + true /* is_load */, true /* is_64bit */); + } - // ECX <- ECX + EAX (2H * 1L) + (1H * 2L) - NewLIR2(kX86Add32RR, r1, r0); + // ECX <- ECX * 2L (1H * 2L) + if (src2_in_reg) { + NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg); + } else { + int displacement = SRegOffset(rl_src2.s_reg_low); + LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET); + AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, + true /* is_load */, true /* is_64bit */); + } + + // ECX <- ECX + EAX (2H * 1L) + (1H * 2L) + NewLIR2(kX86Add32RR, r1, r0); + } // EAX <- 2L if (src2_in_reg) { diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 1893ffc044..8e04e64a42 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -92,6 +92,21 @@ int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { return res; } +int X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { + // For the 32-bit internal ABI, the first 3 arguments are passed in registers. + // TODO: This is not 64-bit compliant and depends on new internal ABI. + switch (arg_num) { + case 0: + return rX86_ARG1; + case 1: + return rX86_ARG2; + case 2: + return rX86_ARG3; + default: + return INVALID_REG; + } +} + // Create a double from a pair of singles. int X86Mir2Lir::S2d(int low_reg, int high_reg) { return X86_S2D(low_reg, high_reg); diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 8678ad9294..b46ae2b85d 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -25,6 +25,7 @@ #include "base/stl_util.h" #include "base/timing_logger.h" #include "class_linker.h" +#include "compiler_backend.h" #include "dex_compilation_unit.h" #include "dex_file-inl.h" #include "dex/verification_results.h" @@ -53,12 +54,6 @@ #include "verifier/method_verifier.h" #include "verifier/method_verifier-inl.h" -#if defined(ART_USE_PORTABLE_COMPILER) -#include "elf_writer_mclinker.h" -#else -#include "elf_writer_quick.h" -#endif - namespace art { static double Percentage(size_t x, size_t y) { @@ -288,28 +283,6 @@ class AOTCompilationStats { DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats); }; -extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver); -extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver); - -extern "C" void ArtUnInitCompilerContext(art::CompilerDriver& driver); -extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& driver); - -extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver, - const art::DexFile::CodeItem* code_item, - uint32_t access_flags, - art::InvokeType invoke_type, - uint16_t class_def_idx, - uint32_t method_idx, - jobject class_loader, - const art::DexFile& dex_file); -extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& compiler, - const art::DexFile::CodeItem* code_item, - uint32_t access_flags, - art::InvokeType invoke_type, - uint16_t class_def_idx, - uint32_t method_idx, - jobject class_loader, - const art::DexFile& dex_file); extern "C" art::CompiledMethod* ArtCompileDEX(art::CompilerDriver& compiler, const art::DexFile::CodeItem* code_item, @@ -319,36 +292,20 @@ extern "C" art::CompiledMethod* ArtCompileDEX(art::CompilerDriver& compiler, uint32_t method_idx, jobject class_loader, const art::DexFile& dex_file); -#ifdef ART_SEA_IR_MODE -extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler, - const art::DexFile::CodeItem* code_item, - uint32_t access_flags, - art::InvokeType invoke_type, - uint16_t class_def_idx, - uint32_t method_idx, - jobject class_loader, - const art::DexFile& dex_file); -#endif -extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver, - uint32_t access_flags, uint32_t method_idx, - const art::DexFile& dex_file); - -extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler, - uint32_t access_flags, uint32_t method_idx, - const art::DexFile& dex_file); extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver, std::string const& filename); CompilerDriver::CompilerDriver(VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map, - CompilerBackend compiler_backend, InstructionSet instruction_set, + CompilerBackend::Kind compiler_backend_kind, + InstructionSet instruction_set, InstructionSetFeatures instruction_set_features, bool image, DescriptorSet* image_classes, size_t thread_count, bool dump_stats, bool dump_passes, CumulativeLogger* timer) : verification_results_(verification_results), method_inliner_map_(method_inliner_map), - compiler_backend_(compiler_backend), + compiler_backend_(CompilerBackend::Create(compiler_backend_kind)), instruction_set_(instruction_set), instruction_set_features_(instruction_set_features), freezing_constructor_lock_("freezing constructor lock"), @@ -363,9 +320,7 @@ CompilerDriver::CompilerDriver(VerificationResults* verification_results, dump_passes_(dump_passes), timings_logger_(timer), compiler_library_(NULL), - compiler_(NULL), compiler_context_(NULL), - jni_compiler_(NULL), compiler_enable_auto_elf_loading_(NULL), compiler_get_method_code_addr_(NULL), support_boot_image_fixup_(instruction_set != kMips), @@ -376,34 +331,9 @@ CompilerDriver::CompilerDriver(VerificationResults* verification_results, CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key"); - // TODO: more work needed to combine initializations and allow per-method backend selection - typedef void (*InitCompilerContextFn)(CompilerDriver&); - InitCompilerContextFn init_compiler_context; - if (compiler_backend_ == kPortable) { - // Initialize compiler_context_ - init_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtInitCompilerContext); - compiler_ = reinterpret_cast<CompilerFn>(ArtCompileMethod); - } else { - init_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtInitQuickCompilerContext); - compiler_ = reinterpret_cast<CompilerFn>(ArtQuickCompileMethod); - } - dex_to_dex_compiler_ = reinterpret_cast<DexToDexCompilerFn>(ArtCompileDEX); -#ifdef ART_SEA_IR_MODE - sea_ir_compiler_ = NULL; - if (Runtime::Current()->IsSeaIRMode()) { - sea_ir_compiler_ = reinterpret_cast<CompilerFn>(SeaIrCompileMethod); - } -#endif - - init_compiler_context(*this); - - if (compiler_backend_ == kPortable) { - jni_compiler_ = reinterpret_cast<JniCompilerFn>(ArtLLVMJniCompileMethod); - } else { - jni_compiler_ = reinterpret_cast<JniCompilerFn>(ArtQuickJniCompileMethod); - } + compiler_backend_->Init(*this); CHECK(!Runtime::Current()->IsStarted()); if (!image_) { @@ -450,16 +380,7 @@ CompilerDriver::~CompilerDriver() { STLDeleteElements(&classes_to_patch_); } CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key"); - typedef void (*UninitCompilerContextFn)(CompilerDriver&); - UninitCompilerContextFn uninit_compiler_context; - // Uninitialize compiler_context_ - // TODO: rework to combine initialization/uninitialization - if (compiler_backend_ == kPortable) { - uninit_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtUnInitCompilerContext); - } else { - uninit_compiler_context = reinterpret_cast<void (*)(CompilerDriver&)>(ArtUnInitQuickCompilerContext); - } - uninit_compiler_context(*this); + compiler_backend_->UnInit(*this); } CompilerTls* CompilerDriver::GetTls() { @@ -1017,7 +938,6 @@ bool CompilerDriver::ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put, return false; } DCHECK_GE(field->GetOffset().Int32Value(), 0); - result->method_is_static = method->IsStatic(); result->field_idx = field_idx; result->field_offset = field->GetOffset().Int32Value(); result->is_volatile = field->IsVolatile(); @@ -1154,7 +1074,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType *direct_method = 0; bool use_dex_cache = false; const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot(); - if (compiler_backend_ == kPortable) { + if (compiler_backend_->IsPortable()) { if (sharp_type != kStatic && sharp_type != kDirect) { return; } @@ -1231,23 +1151,13 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType CHECK(!method->IsAbstract()); *type = sharp_type; *direct_method = reinterpret_cast<uintptr_t>(method); - if (compiler_backend_ == kQuick) { - *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()); - } else { - CHECK_EQ(compiler_backend_, kPortable); - *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode()); - } + *direct_code = compiler_backend_->GetEntryPointOf(method); target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile(); target_method->dex_method_index = method->GetDexMethodIndex(); } else if (!must_use_direct_pointers) { // Set the code and rely on the dex cache for the method. *type = sharp_type; - if (compiler_backend_ == kQuick) { - *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()); - } else { - CHECK_EQ(compiler_backend_, kPortable); - *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode()); - } + *direct_code = compiler_backend_->GetEntryPointOf(method); } else { // Direct pointers were required but none were available. VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method); @@ -2017,7 +1927,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t uint64_t start_ns = NanoTime(); if ((access_flags & kAccNative) != 0) { - compiled_method = (*jni_compiler_)(*this, access_flags, method_idx, dex_file); + compiled_method = compiler_backend_->JniCompile(*this, access_flags, method_idx, dex_file); CHECK(compiled_method != NULL); } else if ((access_flags & kAccAbstract) != 0) { } else { @@ -2025,19 +1935,10 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t bool compile = VerificationResults::IsCandidateForCompilation(method_ref, access_flags); if (compile) { - CompilerFn compiler = compiler_; -#ifdef ART_SEA_IR_MODE - bool use_sea = Runtime::Current()->IsSeaIRMode(); - use_sea = use_sea && - (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci")); - if (use_sea) { - compiler = sea_ir_compiler_; - LOG(INFO) << "Using SEA IR to compile..." << std::endl; - } -#endif // NOTE: if compiler declines to compile this method, it will return NULL. - compiled_method = (*compiler)(*this, code_item, access_flags, invoke_type, class_def_idx, - method_idx, class_loader, dex_file); + compiled_method = compiler_backend_->Compile( + *this, code_item, access_flags, invoke_type, class_def_idx, + method_idx, class_loader, dex_file); } else if (dex_to_dex_compilation_level != kDontDexToDexCompile) { // TODO: add a mode to disable DEX-to-DEX compilation ? (*dex_to_dex_compiler_)(*this, code_item, access_flags, @@ -2047,12 +1948,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t } } uint64_t duration_ns = NanoTime() - start_ns; -#ifdef ART_USE_PORTABLE_COMPILER - const uint64_t kWarnMilliSeconds = 1000; -#else - const uint64_t kWarnMilliSeconds = 100; -#endif - if (duration_ns > MsToNs(kWarnMilliSeconds)) { + if (duration_ns > MsToNs(compiler_backend_->GetMaximumCompilationTimeBeforeWarning())) { LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file) << " took " << PrettyDuration(duration_ns); } @@ -2149,11 +2045,7 @@ bool CompilerDriver::WriteElf(const std::string& android_root, OatWriter& oat_writer, art::File* file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(ART_USE_PORTABLE_COMPILER) - return art::ElfWriterMclinker::Create(file, oat_writer, dex_files, android_root, is_host, *this); -#else - return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, *this); -#endif + return compiler_backend_->WriteElf(file, oat_writer, dex_files, android_root, is_host, *this); } void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set, std::string& target_triple, diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index c4ac9db777..a9e029d510 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -26,6 +26,7 @@ #include "class_reference.h" #include "compiled_class.h" #include "compiled_method.h" +#include "compiler_backend.h" #include "dex_file.h" #include "dex/arena_allocator.h" #include "instruction_set.h" @@ -44,21 +45,15 @@ class MethodVerifier; } // namespace verifier class AOTCompilationStats; -class ParallelCompilationManager; class DexCompilationUnit; class DexFileToMethodInlinerMap; class InlineIGetIPutData; class OatWriter; +class ParallelCompilationManager; class TimingLogger; class VerificationResults; class VerifiedMethod; -enum CompilerBackend { - kQuick, - kPortable, - kNoBackend -}; - enum EntryPointCallingConvention { // ABI of invocations to a method's interpreter entry point. kInterpreterAbi, @@ -101,7 +96,8 @@ class CompilerDriver { // classes. explicit CompilerDriver(VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map, - CompilerBackend compiler_backend, InstructionSet instruction_set, + CompilerBackend::Kind compiler_backend_kind, + InstructionSet instruction_set, InstructionSetFeatures instruction_set_features, bool image, DescriptorSet* image_classes, size_t thread_count, bool dump_stats, bool dump_passes, @@ -133,8 +129,8 @@ class CompilerDriver { return instruction_set_features_; } - CompilerBackend GetCompilerBackend() const { - return compiler_backend_; + CompilerBackend* GetCompilerBackend() const { + return compiler_backend_.get(); } // Are we compiling and creating an image file? @@ -560,7 +556,7 @@ class CompilerDriver { VerificationResults* verification_results_; DexFileToMethodInlinerMap* method_inliner_map_; - CompilerBackend compiler_backend_; + UniquePtr<CompilerBackend> compiler_backend_; const InstructionSet instruction_set_; const InstructionSetFeatures instruction_set_features_; @@ -601,32 +597,16 @@ class CompilerDriver { void* compiler_library_; - typedef CompiledMethod* (*CompilerFn)(CompilerDriver& driver, - const DexFile::CodeItem* code_item, - uint32_t access_flags, InvokeType invoke_type, - uint32_t class_dex_idx, uint32_t method_idx, - jobject class_loader, const DexFile& dex_file); - typedef void (*DexToDexCompilerFn)(CompilerDriver& driver, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint32_t class_dex_idx, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, DexToDexCompilationLevel dex_to_dex_compilation_level); - CompilerFn compiler_; -#ifdef ART_SEA_IR_MODE - CompilerFn sea_ir_compiler_; -#endif - DexToDexCompilerFn dex_to_dex_compiler_; void* compiler_context_; - typedef CompiledMethod* (*JniCompilerFn)(CompilerDriver& driver, - uint32_t access_flags, uint32_t method_idx, - const DexFile& dex_file); - JniCompilerFn jni_compiler_; - pthread_key_t tls_key_; // Arena pool used by the compiler. diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc index 6563eb5475..a5acd2a332 100644 --- a/compiler/llvm/compiler_llvm.cc +++ b/compiler/llvm/compiler_llvm.cc @@ -39,12 +39,12 @@ namespace art { void CompileOneMethod(CompilerDriver& driver, - const CompilerBackend compilerBackend, + CompilerBackend* compilerBackend, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, - llvm::LlvmCompilationUnit* llvm_info); + void* llvm_info); } namespace llvm { @@ -142,7 +142,7 @@ CompileDexMethod(DexCompilationUnit* dex_compilation_unit, InvokeType invoke_typ cunit->SetCompilerDriver(compiler_driver_); // TODO: consolidate ArtCompileMethods CompileOneMethod(*compiler_driver_, - kPortable, + compiler_driver_->GetCompilerBackend(), dex_compilation_unit->GetCodeItem(), dex_compilation_unit->GetAccessFlags(), invoke_type, diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index f6b511c4a4..10d2c5c10f 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -15,6 +15,7 @@ */ #include "compiler/oat_writer.h" +#include "compiler/compiler_backend.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" @@ -84,12 +85,14 @@ TEST_F(OatTest, WriteRead) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); // TODO: make selectable. - CompilerBackend compiler_backend = kUsePortableCompiler ? kPortable : kQuick; + CompilerBackend::Kind compiler_backend = kUsePortableCompiler + ? CompilerBackend::kPortable + : CompilerBackend::kQuick; InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86; InstructionSetFeatures insn_features; verification_results_.reset(new VerificationResults); - method_inliner_map_.reset(compiler_backend == kQuick ? new DexFileToMethodInlinerMap : nullptr); + method_inliner_map_.reset(new DexFileToMethodInlinerMap); callbacks_.Reset(verification_results_.get(), method_inliner_map_.get()); timer_.reset(new CumulativeLogger("Compilation times")); compiler_driver_.reset(new CompilerDriver(verification_results_.get(), diff --git a/compiler/sea_ir/frontend.cc b/compiler/sea_ir/frontend.cc index 6c779c8d10..b57007bbb6 100644 --- a/compiler/sea_ir/frontend.cc +++ b/compiler/sea_ir/frontend.cc @@ -38,15 +38,12 @@ namespace art { static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler, - const CompilerBackend compiler_backend, + CompilerBackend* compiler_backend, const DexFile::CodeItem* code_item, uint32_t method_access_flags, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - jobject class_loader, const DexFile& dex_file -#if defined(ART_USE_PORTABLE_COMPILER) - , llvm::LlvmCompilationUnit* llvm_compilation_unit -#endif -) { + jobject class_loader, const DexFile& dex_file, + void* llvm_compilation_unit) { LOG(INFO) << "Compiling " << PrettyMethod(method_idx, dex_file) << "."; sea_ir::SeaGraph* ir_graph = sea_ir::SeaGraph::GetGraph(dex_file); std::string symbol = "dex_" + MangleForJni(PrettyMethod(method_idx, dex_file)); @@ -65,7 +62,7 @@ static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler, } CompiledMethod* SeaIrCompileOneMethod(CompilerDriver& compiler, - const CompilerBackend backend, + CompilerBackend* backend, const DexFile::CodeItem* code_item, uint32_t method_access_flags, InvokeType invoke_type, @@ -73,13 +70,9 @@ CompiledMethod* SeaIrCompileOneMethod(CompilerDriver& compiler, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, - llvm::LlvmCompilationUnit* llvm_compilation_unit) { + void* llvm_compilation_unit) { return CompileMethodWithSeaIr(compiler, backend, code_item, method_access_flags, invoke_type, - class_def_idx, method_idx, class_loader, dex_file -#if defined(ART_USE_PORTABLE_COMPILER) - , llvm_compilation_unit -#endif - ); // NOLINT + class_def_idx, method_idx, class_loader, dex_file, llvm_compilation_unit); } extern "C" art::CompiledMethod* @@ -90,7 +83,7 @@ extern "C" art::CompiledMethod* const art::DexFile& dex_file) { // TODO: Check method fingerprint here to determine appropriate backend type. // Until then, use build default - art::CompilerBackend backend = compiler.GetCompilerBackend(); + art::CompilerBackend* backend = compiler.GetCompilerBackend(); return art::SeaIrCompileOneMethod(compiler, backend, code_item, method_access_flags, invoke_type, class_def_idx, method_idx, class_loader, dex_file, NULL /* use thread llvm_info */); |