diff options
| author | 2016-02-05 15:47:16 +0000 | |
|---|---|---|
| committer | 2016-02-05 15:47:16 +0000 | |
| commit | 11aa3f5c5efd5ce8e761dfd76378a91d9f8dc42b (patch) | |
| tree | 746ab81bbc05e03fb918ca32cc8d5b296d083113 /compiler | |
| parent | 7ac9aeb501520089334c7b9e0deca1e089945bc8 (diff) | |
| parent | bd89a5c556324062b7d841843b039392e84cfaf4 (diff) | |
Merge "Revert "Implement on-stack replacement for arm/arm64/x86/x86_64.""
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/compiler.h | 3 | ||||
| -rw-r--r-- | compiler/jit/jit_compiler.cc | 11 | ||||
| -rw-r--r-- | compiler/jit/jit_compiler.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/builder.cc | 68 | ||||
| -rw-r--r-- | compiler/optimizing/builder.h | 1 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator.cc | 106 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/inliner.cc | 1 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.cc | 4 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 79 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 42 |
11 files changed, 93 insertions, 226 deletions
diff --git a/compiler/compiler.h b/compiler/compiler.h index 97c60de8c0..3a9ce1bc0e 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -64,8 +64,7 @@ class Compiler { virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED, jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED, - ArtMethod* method ATTRIBUTE_UNUSED, - bool osr ATTRIBUTE_UNUSED) + ArtMethod* method ATTRIBUTE_UNUSED) SHARED_REQUIRES(Locks::mutator_lock_) { return false; } diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 68f4783741..67747586c4 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -60,12 +60,11 @@ extern "C" void jit_unload(void* handle) { delete reinterpret_cast<JitCompiler*>(handle); } -extern "C" bool jit_compile_method( - void* handle, ArtMethod* method, Thread* self, bool osr) +extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle); DCHECK(jit_compiler != nullptr); - return jit_compiler->CompileMethod(self, method, osr); + return jit_compiler->CompileMethod(self, method); } extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count) @@ -202,7 +201,7 @@ JitCompiler::~JitCompiler() { } } -bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { +bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) { TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit)); const uint64_t start_time = NanoTime(); StackHandleScope<2> hs(self); @@ -224,8 +223,8 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { // of that proxy method, as the compiler does not expect a proxy method. ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*)); JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); - success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method_to_compile, osr); - if (success && (perf_file_ != nullptr)) { + success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method_to_compile); + if (success && perf_file_ != nullptr) { const void* ptr = method_to_compile->GetEntryPointFromQuickCompiledCode(); std::ostringstream stream; stream << std::hex diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index 5294d0ee35..037a18ac7a 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -37,7 +37,7 @@ class JitCompiler { public: static JitCompiler* Create(); virtual ~JitCompiler(); - bool CompileMethod(Thread* self, ArtMethod* method, bool osr) + bool CompileMethod(Thread* self, ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); CompilerCallbacks* GetCompilerCallbacks() const; size_t GetTotalCompileTime() const { diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 8d77daf183..c7430e7eb6 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -72,6 +72,74 @@ class Temporaries : public ValueObject { size_t index_; }; +class SwitchTable : public ValueObject { + public: + SwitchTable(const Instruction& instruction, uint32_t dex_pc, bool sparse) + : instruction_(instruction), dex_pc_(dex_pc), sparse_(sparse) { + int32_t table_offset = instruction.VRegB_31t(); + const uint16_t* table = reinterpret_cast<const uint16_t*>(&instruction) + table_offset; + if (sparse) { + CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature)); + } else { + CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature)); + } + num_entries_ = table[1]; + values_ = reinterpret_cast<const int32_t*>(&table[2]); + } + + uint16_t GetNumEntries() const { + return num_entries_; + } + + void CheckIndex(size_t index) const { + if (sparse_) { + // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order. + DCHECK_LT(index, 2 * static_cast<size_t>(num_entries_)); + } else { + // In a packed table, we have the starting key and num_entries_ values. + DCHECK_LT(index, 1 + static_cast<size_t>(num_entries_)); + } + } + + int32_t GetEntryAt(size_t index) const { + CheckIndex(index); + return values_[index]; + } + + uint32_t GetDexPcForIndex(size_t index) const { + CheckIndex(index); + return dex_pc_ + + (reinterpret_cast<const int16_t*>(values_ + index) - + reinterpret_cast<const int16_t*>(&instruction_)); + } + + // Index of the first value in the table. + size_t GetFirstValueIndex() const { + if (sparse_) { + // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order. + return num_entries_; + } else { + // In a packed table, we have the starting key and num_entries_ values. + return 1; + } + } + + private: + const Instruction& instruction_; + const uint32_t dex_pc_; + + // Whether this is a sparse-switch table (or a packed-switch one). + const bool sparse_; + + // This can't be const as it needs to be computed off of the given instruction, and complicated + // expressions in the initializer list seemed very ugly. + uint16_t num_entries_; + + const int32_t* values_; + + DISALLOW_COPY_AND_ASSIGN(SwitchTable); +}; + void HGraphBuilder::InitializeLocals(uint16_t count) { graph_->SetNumberOfVRegs(count); locals_.resize(count); diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 93e17d6422..1d604e7135 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -30,6 +30,7 @@ namespace art { class Instruction; +class SwitchTable; class HGraphBuilder : public ValueObject { public: diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index ffec382547..a3bbfdbd27 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -629,72 +629,8 @@ size_t CodeGenerator::ComputeStackMapsSize() { return stack_map_stream_.PrepareForFillIn(); } -static void CheckCovers(uint32_t dex_pc, - const HGraph& graph, - const CodeInfo& code_info, - const ArenaVector<HSuspendCheck*>& loop_headers, - ArenaVector<size_t>* covered) { - StackMapEncoding encoding = code_info.ExtractEncoding(); - for (size_t i = 0; i < loop_headers.size(); ++i) { - if (loop_headers[i]->GetDexPc() == dex_pc) { - if (graph.IsCompilingOsr()) { - DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc, encoding).IsValid()); - } - ++(*covered)[i]; - } - } -} - -// Debug helper to ensure loop entries in compiled code are matched by -// dex branch instructions. -static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, - const CodeInfo& code_info, - const DexFile::CodeItem& code_item) { - ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc)); - for (HReversePostOrderIterator it(graph); !it.Done(); it.Advance()) { - if (it.Current()->IsLoopHeader()) { - HSuspendCheck* suspend_check = it.Current()->GetLoopInformation()->GetSuspendCheck(); - if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) { - loop_headers.push_back(suspend_check); - } - } - } - ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc)); - const uint16_t* code_ptr = code_item.insns_; - const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_; - - size_t dex_pc = 0; - while (code_ptr < code_end) { - const Instruction& instruction = *Instruction::At(code_ptr); - if (instruction.IsBranch()) { - uint32_t target = dex_pc + instruction.GetTargetOffset(); - CheckCovers(target, graph, code_info, loop_headers, &covered); - } else if (instruction.IsSwitch()) { - SwitchTable table(instruction, dex_pc, instruction.Opcode() == Instruction::SPARSE_SWITCH); - uint16_t num_entries = table.GetNumEntries(); - size_t offset = table.GetFirstValueIndex(); - - // Use a larger loop counter type to avoid overflow issues. - for (size_t i = 0; i < num_entries; ++i) { - // The target of the case. - uint32_t target = dex_pc + table.GetEntryAt(i + offset); - CheckCovers(target, graph, code_info, loop_headers, &covered); - } - } - dex_pc += instruction.SizeInCodeUnits(); - code_ptr += instruction.SizeInCodeUnits(); - } - - for (size_t i = 0; i < covered.size(); ++i) { - DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent"; - } -} - -void CodeGenerator::BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item) { +void CodeGenerator::BuildStackMaps(MemoryRegion region) { stack_map_stream_.FillIn(region); - if (kIsDebugBuild) { - CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(region), code_item); - } } void CodeGenerator::RecordPcInfo(HInstruction* instruction, @@ -769,46 +705,6 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, EmitEnvironment(instruction->GetEnvironment(), slow_path); stack_map_stream_.EndStackMapEntry(); - - HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); - if (instruction->IsSuspendCheck() && - (info != nullptr) && - graph_->IsCompilingOsr() && - (inlining_depth == 0)) { - DCHECK_EQ(info->GetSuspendCheck(), instruction); - // We duplicate the stack map as a marker that this stack map can be an OSR entry. - // Duplicating it avoids having the runtime recognize and skip an OSR stack map. - DCHECK(info->IsIrreducible()); - stack_map_stream_.BeginStackMapEntry( - dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0); - EmitEnvironment(instruction->GetEnvironment(), slow_path); - stack_map_stream_.EndStackMapEntry(); - if (kIsDebugBuild) { - HEnvironment* environment = instruction->GetEnvironment(); - for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { - HInstruction* in_environment = environment->GetInstructionAt(i); - if (in_environment != nullptr) { - DCHECK(in_environment->IsPhi() || in_environment->IsConstant()); - Location location = environment->GetLocationAt(i); - DCHECK(location.IsStackSlot() || - location.IsDoubleStackSlot() || - location.IsConstant() || - location.IsInvalid()); - if (location.IsStackSlot() || location.IsDoubleStackSlot()) { - DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize())); - } - } - } - } - } else if (kIsDebugBuild) { - // Ensure stack maps are unique, by checking that the native pc in the stack map - // last emitted is different than the native pc of the stack map just emitted. - size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps(); - if (number_of_stack_maps > 1) { - DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_offset, - stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_offset); - } - } } bool CodeGenerator::HasStackMapAtCurrentPc() { diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 0a688cf649..4f8f146753 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -288,7 +288,7 @@ class CodeGenerator { slow_paths_.push_back(slow_path); } - void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item); + void BuildStackMaps(MemoryRegion region); size_t ComputeStackMapsSize(); bool IsLeafMethod() const { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index a8841d31c5..9b91b53813 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -758,7 +758,6 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, compiler_driver_->GetInstructionSet(), invoke_type, graph_->IsDebuggable(), - /* osr */ false, graph_->GetCurrentInstructionId()); callee_graph->SetArtMethod(resolved_method); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index f269885907..3dda8501d2 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -647,10 +647,6 @@ void HLoopInformation::Populate() { header_->GetGraph()->SetHasIrreducibleLoops(true); PopulateIrreducibleRecursive(back_edge); } else { - if (header_->GetGraph()->IsCompilingOsr()) { - irreducible_ = true; - header_->GetGraph()->SetHasIrreducibleLoops(true); - } PopulateRecursive(back_edge); } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index daec096f3e..6c63af71a3 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -274,7 +274,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { InstructionSet instruction_set, InvokeType invoke_type = kInvalidInvokeType, bool debuggable = false, - bool osr = false, int start_instruction_id = 0) : arena_(arena), blocks_(arena->Adapter(kArenaAllocBlockList)), @@ -303,8 +302,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)), cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)), cached_current_method_(nullptr), - inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()), - osr_(osr) { + inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()) { blocks_.reserve(kDefaultNumberOfBlocks); } @@ -480,8 +478,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { return instruction_set_; } - bool IsCompilingOsr() const { return osr_; } - bool HasTryCatch() const { return has_try_catch_; } void SetHasTryCatch(bool value) { has_try_catch_ = value; } @@ -610,11 +606,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // collection pointer to passes which may create NullConstant. ReferenceTypeInfo inexact_object_rti_; - // Whether we are compiling this graph for on stack replacement: this will - // make all loops seen as irreducible and emit special stack maps to mark - // compiled code entries which the interpreter can directly jump to. - const bool osr_; - friend class SsaBuilder; // For caching constants. friend class SsaLivenessAnalysis; // For the linear order. ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1); @@ -6050,74 +6041,6 @@ inline bool IsSameDexFile(const DexFile& lhs, const DexFile& rhs) { FOR_EACH_CONCRETE_INSTRUCTION(INSTRUCTION_TYPE_CHECK) #undef INSTRUCTION_TYPE_CHECK -class SwitchTable : public ValueObject { - public: - SwitchTable(const Instruction& instruction, uint32_t dex_pc, bool sparse) - : instruction_(instruction), dex_pc_(dex_pc), sparse_(sparse) { - int32_t table_offset = instruction.VRegB_31t(); - const uint16_t* table = reinterpret_cast<const uint16_t*>(&instruction) + table_offset; - if (sparse) { - CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature)); - } else { - CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature)); - } - num_entries_ = table[1]; - values_ = reinterpret_cast<const int32_t*>(&table[2]); - } - - uint16_t GetNumEntries() const { - return num_entries_; - } - - void CheckIndex(size_t index) const { - if (sparse_) { - // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order. - DCHECK_LT(index, 2 * static_cast<size_t>(num_entries_)); - } else { - // In a packed table, we have the starting key and num_entries_ values. - DCHECK_LT(index, 1 + static_cast<size_t>(num_entries_)); - } - } - - int32_t GetEntryAt(size_t index) const { - CheckIndex(index); - return values_[index]; - } - - uint32_t GetDexPcForIndex(size_t index) const { - CheckIndex(index); - return dex_pc_ + - (reinterpret_cast<const int16_t*>(values_ + index) - - reinterpret_cast<const int16_t*>(&instruction_)); - } - - // Index of the first value in the table. - size_t GetFirstValueIndex() const { - if (sparse_) { - // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order. - return num_entries_; - } else { - // In a packed table, we have the starting key and num_entries_ values. - return 1; - } - } - - private: - const Instruction& instruction_; - const uint32_t dex_pc_; - - // Whether this is a sparse-switch table (or a packed-switch one). - const bool sparse_; - - // This can't be const as it needs to be computed off of the given instruction, and complicated - // expressions in the initializer list seemed very ugly. - uint16_t num_entries_; - - const int32_t* values_; - - DISALLOW_COPY_AND_ASSIGN(SwitchTable); -}; - } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_H_ diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 736ac32011..bdc664b3eb 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -300,7 +300,7 @@ class OptimizingCompiler FINAL : public Compiler { } } - bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr) + bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); @@ -309,8 +309,7 @@ class OptimizingCompiler FINAL : public Compiler { CompiledMethod* Emit(ArenaAllocator* arena, CodeVectorAllocator* code_allocator, CodeGenerator* codegen, - CompilerDriver* driver, - const DexFile::CodeItem* item) const; + CompilerDriver* driver) const; // Try compiling a method and return the code generator used for // compiling it. @@ -328,8 +327,7 @@ class OptimizingCompiler FINAL : public Compiler { uint32_t method_idx, jobject class_loader, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, - bool osr) const; + Handle<mirror::DexCache> dex_cache) const; std::unique_ptr<OptimizingCompilerStats> compilation_stats_; @@ -582,12 +580,11 @@ static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, CodeVectorAllocator* code_allocator, CodeGenerator* codegen, - CompilerDriver* compiler_driver, - const DexFile::CodeItem* code_item) const { + CompilerDriver* compiler_driver) const { ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); stack_map.resize(codegen->ComputeStackMapsSize()); - codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item); + codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size())); CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod( compiler_driver, @@ -618,8 +615,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, uint32_t method_idx, jobject class_loader, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, - bool osr) const { + Handle<mirror::DexCache> dex_cache) const { MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); CompilerDriver* compiler_driver = GetCompilerDriver(); InstructionSet instruction_set = compiler_driver->GetInstructionSet(); @@ -667,14 +663,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, dex_compilation_unit.GetDexFile(), dex_compilation_unit.GetClassDefIndex()); HGraph* graph = new (arena) HGraph( - arena, - dex_file, - method_idx, - requires_barrier, - compiler_driver->GetInstructionSet(), - kInvalidInvokeType, - compiler_driver->GetCompilerOptions().GetDebuggable(), - osr); + arena, dex_file, method_idx, requires_barrier, compiler_driver->GetInstructionSet(), + kInvalidInvokeType, compiler_driver->GetCompilerOptions().GetDebuggable()); std::unique_ptr<CodeGenerator> codegen( CodeGenerator::Create(graph, @@ -807,11 +797,10 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, method_idx, jclass_loader, dex_file, - dex_cache, - /* osr */ false)); + dex_cache)); if (codegen.get() != nullptr) { MaybeRecordStat(MethodCompilationStat::kCompiled); - method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item); + method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver); } } else { if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { @@ -854,8 +843,7 @@ bool IsCompilingWithCoreImage() { bool OptimizingCompiler::JitCompile(Thread* self, jit::JitCodeCache* code_cache, - ArtMethod* method, - bool osr) { + ArtMethod* method) { StackHandleScope<2> hs(self); Handle<mirror::ClassLoader> class_loader(hs.NewHandle( method->GetDeclaringClass()->GetClassLoader())); @@ -885,8 +873,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, method_idx, jclass_loader, *dex_file, - dex_cache, - osr)); + dex_cache)); if (codegen.get() == nullptr) { return false; } @@ -898,7 +885,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, return false; } MaybeRecordStat(MethodCompilationStat::kCompiled); - codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item); + codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size)); const void* code = code_cache->CommitCode( self, method, @@ -909,8 +896,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, codegen->GetCoreSpillMask(), codegen->GetFpuSpillMask(), code_allocator.GetMemory().data(), - code_allocator.GetSize(), - osr); + code_allocator.GetSize()); if (code == nullptr) { code_cache->ClearData(self, stack_map_data); |