From 174b2e27ebf933b80f4e8b64b4b024ab4306aaac Mon Sep 17 00:00:00 2001 From: Vladimir Marko Date: Thu, 12 Oct 2017 13:34:49 +0100 Subject: Use ScopedArenaAllocator for code generation. Reuse the memory previously allocated on the ArenaStack by optimization passes. This CL handles only the architecture-independent codegen and slow paths, architecture-dependent codegen allocations shall be moved to the ScopedArenaAllocator in a follow-up. Memory needed to compile the two most expensive methods for aosp_angler-userdebug boot image: BatteryStats.dumpCheckinLocked() : 19.6MiB -> 18.5MiB (-1189KiB) BatteryStats.dumpLocked(): 39.3MiB -> 37.0MiB (-2379KiB) Also move definitions of functions that use bit_vector-inl.h from bit_vector.h also to bit_vector-inl.h . Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Bug: 64312607 Change-Id: I84688c3a5a95bf90f56bd3a150bc31fedc95f29c --- compiler/optimizing/code_generator.cc | 348 ++++++++++++++++----- compiler/optimizing/code_generator.h | 89 ++---- compiler/optimizing/code_generator_arm64.cc | 54 ++-- compiler/optimizing/code_generator_arm_vixl.cc | 67 ++-- compiler/optimizing/code_generator_mips.cc | 72 ++--- compiler/optimizing/code_generator_mips.h | 4 +- compiler/optimizing/code_generator_mips64.cc | 60 ++-- compiler/optimizing/code_generator_x86.cc | 68 ++-- compiler/optimizing/code_generator_x86.h | 4 +- compiler/optimizing/code_generator_x86_64.cc | 68 ++-- compiler/optimizing/code_generator_x86_64.h | 4 +- compiler/optimizing/intrinsics_arm64.cc | 28 +- compiler/optimizing/intrinsics_arm_vixl.cc | 24 +- compiler/optimizing/intrinsics_mips.cc | 27 +- compiler/optimizing/intrinsics_mips64.cc | 22 +- compiler/optimizing/intrinsics_x86.cc | 23 +- compiler/optimizing/intrinsics_x86_64.cc | 23 +- compiler/optimizing/optimizing_cfi_test.cc | 1 + compiler/optimizing/optimizing_compiler.cc | 26 +- .../optimizing/register_allocation_resolver.cc | 1 + compiler/optimizing/stack_map_stream.cc | 34 +- compiler/optimizing/stack_map_stream.h | 35 +-- compiler/optimizing/stack_map_test.cc | 30 +- 23 files changed, 627 insertions(+), 485 deletions(-) (limited to 'compiler/optimizing') diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 84f01828b2..9d0b5c865d 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -42,6 +42,7 @@ #include "base/bit_utils.h" #include "base/bit_utils_iterator.h" +#include "base/casts.h" #include "bytecode_utils.h" #include "class_linker.h" #include "compiled_method.h" @@ -59,6 +60,7 @@ #include "parallel_move_resolver.h" #include "scoped_thread_state_change-inl.h" #include "ssa_liveness_analysis.h" +#include "stack_map_stream.h" #include "thread-current-inl.h" #include "utils/assembler.h" @@ -141,6 +143,158 @@ static bool CheckTypeConsistency(HInstruction* instruction) { return true; } +class CodeGenerator::CodeGenerationData : public DeletableArenaObject { + public: + static std::unique_ptr Create(ArenaStack* arena_stack, + InstructionSet instruction_set) { + ScopedArenaAllocator allocator(arena_stack); + void* memory = allocator.Alloc(kArenaAllocCodeGenerator); + return std::unique_ptr( + ::new (memory) CodeGenerationData(std::move(allocator), instruction_set)); + } + + ScopedArenaAllocator* GetScopedAllocator() { + return &allocator_; + } + + void AddSlowPath(SlowPathCode* slow_path) { + slow_paths_.emplace_back(std::unique_ptr(slow_path)); + } + + ArrayRef> GetSlowPaths() const { + return ArrayRef>(slow_paths_); + } + + StackMapStream* GetStackMapStream() { return &stack_map_stream_; } + + void ReserveJitStringRoot(StringReference string_reference, Handle string) { + jit_string_roots_.Overwrite(string_reference, + reinterpret_cast64(string.GetReference())); + } + + uint64_t GetJitStringRootIndex(StringReference string_reference) const { + return jit_string_roots_.Get(string_reference); + } + + size_t GetNumberOfJitStringRoots() const { + return jit_string_roots_.size(); + } + + void ReserveJitClassRoot(TypeReference type_reference, Handle klass) { + jit_class_roots_.Overwrite(type_reference, reinterpret_cast64(klass.GetReference())); + } + + uint64_t GetJitClassRootIndex(TypeReference type_reference) const { + return jit_class_roots_.Get(type_reference); + } + + size_t GetNumberOfJitClassRoots() const { + return jit_class_roots_.size(); + } + + size_t GetNumberOfJitRoots() const { + return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots(); + } + + void EmitJitRoots(Handle> roots) + REQUIRES_SHARED(Locks::mutator_lock_); + + private: + CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set) + : allocator_(std::move(allocator)), + stack_map_stream_(&allocator_, instruction_set), + slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)), + jit_string_roots_(StringReferenceValueComparator(), + allocator_.Adapter(kArenaAllocCodeGenerator)), + jit_class_roots_(TypeReferenceValueComparator(), + allocator_.Adapter(kArenaAllocCodeGenerator)) { + slow_paths_.reserve(kDefaultSlowPathsCapacity); + } + + static constexpr size_t kDefaultSlowPathsCapacity = 8; + + ScopedArenaAllocator allocator_; + StackMapStream stack_map_stream_; + ScopedArenaVector> slow_paths_; + + // Maps a StringReference (dex_file, string_index) to the index in the literal table. + // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` + // will compute all the indices. + ScopedArenaSafeMap jit_string_roots_; + + // Maps a ClassReference (dex_file, type_index) to the index in the literal table. + // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` + // will compute all the indices. + ScopedArenaSafeMap jit_class_roots_; +}; + +void CodeGenerator::CodeGenerationData::EmitJitRoots( + Handle> roots) { + DCHECK_EQ(static_cast(roots->GetLength()), GetNumberOfJitRoots()); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + size_t index = 0; + for (auto& entry : jit_string_roots_) { + // Update the `roots` with the string, and replace the address temporarily + // stored to the index in the table. + uint64_t address = entry.second; + roots->Set(index, reinterpret_cast*>(address)->AsMirrorPtr()); + DCHECK(roots->Get(index) != nullptr); + entry.second = index; + // Ensure the string is strongly interned. This is a requirement on how the JIT + // handles strings. b/32995596 + class_linker->GetInternTable()->InternStrong( + reinterpret_cast(roots->Get(index))); + ++index; + } + for (auto& entry : jit_class_roots_) { + // Update the `roots` with the class, and replace the address temporarily + // stored to the index in the table. + uint64_t address = entry.second; + roots->Set(index, reinterpret_cast*>(address)->AsMirrorPtr()); + DCHECK(roots->Get(index) != nullptr); + entry.second = index; + ++index; + } +} + +ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetScopedAllocator(); +} + +StackMapStream* CodeGenerator::GetStackMapStream() { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetStackMapStream(); +} + +void CodeGenerator::ReserveJitStringRoot(StringReference string_reference, + Handle string) { + DCHECK(code_generation_data_ != nullptr); + code_generation_data_->ReserveJitStringRoot(string_reference, string); +} + +uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetJitStringRootIndex(string_reference); +} + +void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle klass) { + DCHECK(code_generation_data_ != nullptr); + code_generation_data_->ReserveJitClassRoot(type_reference, klass); +} + +uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetJitClassRootIndex(type_reference); +} + +void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED, + const uint8_t* roots_data ATTRIBUTE_UNUSED) { + DCHECK(code_generation_data_ != nullptr); + DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u); + DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u); +} + size_t CodeGenerator::GetCacheOffset(uint32_t index) { return sizeof(GcRoot) * index; } @@ -210,9 +364,10 @@ class DisassemblyScope { void CodeGenerator::GenerateSlowPaths() { + DCHECK(code_generation_data_ != nullptr); size_t code_start = 0; - for (const std::unique_ptr& slow_path_unique_ptr : slow_paths_) { - SlowPathCode* slow_path = slow_path_unique_ptr.get(); + for (const std::unique_ptr& slow_path_ptr : code_generation_data_->GetSlowPaths()) { + SlowPathCode* slow_path = slow_path_ptr.get(); current_slow_path_ = slow_path; if (disasm_info_ != nullptr) { code_start = GetAssembler()->CodeSize(); @@ -227,7 +382,14 @@ void CodeGenerator::GenerateSlowPaths() { current_slow_path_ = nullptr; } +void CodeGenerator::InitializeCodeGenerationData() { + DCHECK(code_generation_data_ == nullptr); + code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet()); +} + void CodeGenerator::Compile(CodeAllocator* allocator) { + InitializeCodeGenerationData(); + // The register allocator already called `InitializeCodeGeneration`, // where the frame size has been computed. DCHECK(block_order_ != nullptr); @@ -667,12 +829,54 @@ std::unique_ptr CodeGenerator::Create(HGraph* graph, } } +CodeGenerator::CodeGenerator(HGraph* graph, + size_t number_of_core_registers, + size_t number_of_fpu_registers, + size_t number_of_register_pairs, + uint32_t core_callee_save_mask, + uint32_t fpu_callee_save_mask, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats) + : frame_size_(0), + core_spill_mask_(0), + fpu_spill_mask_(0), + first_register_slot_in_slow_path_(0), + allocated_registers_(RegisterSet::Empty()), + blocked_core_registers_(graph->GetAllocator()->AllocArray(number_of_core_registers, + kArenaAllocCodeGenerator)), + blocked_fpu_registers_(graph->GetAllocator()->AllocArray(number_of_fpu_registers, + kArenaAllocCodeGenerator)), + number_of_core_registers_(number_of_core_registers), + number_of_fpu_registers_(number_of_fpu_registers), + number_of_register_pairs_(number_of_register_pairs), + core_callee_save_mask_(core_callee_save_mask), + fpu_callee_save_mask_(fpu_callee_save_mask), + block_order_(nullptr), + disasm_info_(nullptr), + stats_(stats), + graph_(graph), + compiler_options_(compiler_options), + current_slow_path_(nullptr), + current_block_index_(0), + is_leaf_(true), + requires_current_method_(false), + code_generation_data_() { +} + +CodeGenerator::~CodeGenerator() {} + void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size) { DCHECK(stack_map_size != nullptr); DCHECK(method_info_size != nullptr); - *stack_map_size = stack_map_stream_.PrepareForFillIn(); - *method_info_size = stack_map_stream_.ComputeMethodInfoSize(); + StackMapStream* stack_map_stream = GetStackMapStream(); + *stack_map_size = stack_map_stream->PrepareForFillIn(); + *method_info_size = stack_map_stream->ComputeMethodInfoSize(); +} + +size_t CodeGenerator::GetNumberOfJitRoots() const { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetNumberOfJitRoots(); } static void CheckCovers(uint32_t dex_pc, @@ -740,8 +944,9 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region, MemoryRegion method_info_region, const DexFile::CodeItem& code_item) { - stack_map_stream_.FillInCodeInfo(stack_map_region); - stack_map_stream_.FillInMethodInfo(method_info_region); + StackMapStream* stack_map_stream = GetStackMapStream(); + stack_map_stream->FillInCodeInfo(stack_map_region); + stack_map_stream->FillInMethodInfo(method_info_region); if (kIsDebugBuild) { CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item); } @@ -791,11 +996,12 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, // Collect PC infos for the mapping table. uint32_t native_pc = GetAssembler()->CodePosition(); + StackMapStream* stack_map_stream = GetStackMapStream(); if (instruction == nullptr) { // For stack overflow checks and native-debug-info entries without dex register // mapping (i.e. start of basic block or start of slow path). - stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0); - stack_map_stream_.EndStackMapEntry(); + stack_map_stream->BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0); + stack_map_stream->EndStackMapEntry(); return; } LocationSummary* locations = instruction->GetLocations(); @@ -814,7 +1020,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, // The register mask must be a subset of callee-save registers. DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask); } - stack_map_stream_.BeginStackMapEntry(outer_dex_pc, + stack_map_stream->BeginStackMapEntry(outer_dex_pc, native_pc, register_mask, locations->GetStackMask(), @@ -830,10 +1036,10 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, instruction->IsInvoke() && instruction->IsInvokeStaticOrDirect()) { HInvoke* const invoke = instruction->AsInvoke(); - stack_map_stream_.AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex()); + stack_map_stream->AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex()); } } - stack_map_stream_.EndStackMapEntry(); + stack_map_stream->EndStackMapEntry(); HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); if (instruction->IsSuspendCheck() && @@ -844,10 +1050,10 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, // We duplicate the stack map as a marker that this stack map can be an OSR entry. // Duplicating it avoids having the runtime recognize and skip an OSR stack map. DCHECK(info->IsIrreducible()); - stack_map_stream_.BeginStackMapEntry( + stack_map_stream->BeginStackMapEntry( dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0); EmitEnvironment(instruction->GetEnvironment(), slow_path); - stack_map_stream_.EndStackMapEntry(); + stack_map_stream->EndStackMapEntry(); if (kIsDebugBuild) { for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { HInstruction* in_environment = environment->GetInstructionAt(i); @@ -867,21 +1073,22 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, } else if (kIsDebugBuild) { // Ensure stack maps are unique, by checking that the native pc in the stack map // last emitted is different than the native pc of the stack map just emitted. - size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps(); + size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps(); if (number_of_stack_maps > 1) { - DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset, - stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset); + DCHECK_NE(stack_map_stream->GetStackMap(number_of_stack_maps - 1).native_pc_code_offset, + stack_map_stream->GetStackMap(number_of_stack_maps - 2).native_pc_code_offset); } } } bool CodeGenerator::HasStackMapAtCurrentPc() { uint32_t pc = GetAssembler()->CodeSize(); - size_t count = stack_map_stream_.GetNumberOfStackMaps(); + StackMapStream* stack_map_stream = GetStackMapStream(); + size_t count = stack_map_stream->GetNumberOfStackMaps(); if (count == 0) { return false; } - CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset; + CodeOffset native_pc_offset = stack_map_stream->GetStackMap(count - 1).native_pc_code_offset; return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc); } @@ -899,6 +1106,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, void CodeGenerator::RecordCatchBlockInfo() { ArenaAllocator* allocator = graph_->GetAllocator(); + StackMapStream* stack_map_stream = GetStackMapStream(); for (HBasicBlock* block : *block_order_) { if (!block->IsCatchBlock()) { @@ -915,7 +1123,7 @@ void CodeGenerator::RecordCatchBlockInfo() { ArenaBitVector* stack_mask = ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator); - stack_map_stream_.BeginStackMapEntry(dex_pc, + stack_map_stream->BeginStackMapEntry(dex_pc, native_pc, register_mask, stack_mask, @@ -933,19 +1141,19 @@ void CodeGenerator::RecordCatchBlockInfo() { } if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); } else { Location location = current_phi->GetLocations()->Out(); switch (location.GetKind()) { case Location::kStackSlot: { - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); break; } case Location::kDoubleStackSlot: { - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize)); ++vreg; DCHECK_LT(vreg, num_vregs); @@ -960,17 +1168,23 @@ void CodeGenerator::RecordCatchBlockInfo() { } } - stack_map_stream_.EndStackMapEntry(); + stack_map_stream->EndStackMapEntry(); } } +void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) { + DCHECK(code_generation_data_ != nullptr); + code_generation_data_->AddSlowPath(slow_path); +} + void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) { if (environment == nullptr) return; + StackMapStream* stack_map_stream = GetStackMapStream(); if (environment->GetParent() != nullptr) { // We emit the parent environment first. EmitEnvironment(environment->GetParent(), slow_path); - stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(), + stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(), environment->GetDexPc(), environment->Size(), &graph_->GetDexFile()); @@ -980,7 +1194,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { HInstruction* current = environment->GetInstructionAt(i); if (current == nullptr) { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); continue; } @@ -990,43 +1204,43 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo DCHECK_EQ(current, location.GetConstant()); if (current->IsLongConstant()) { int64_t value = current->AsLongConstant()->GetValue(); - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kConstant, Low32Bits(value)); - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kConstant, High32Bits(value)); ++i; DCHECK_LT(i, environment_size); } else if (current->IsDoubleConstant()) { int64_t value = bit_cast(current->AsDoubleConstant()->GetValue()); - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kConstant, Low32Bits(value)); - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kConstant, High32Bits(value)); ++i; DCHECK_LT(i, environment_size); } else if (current->IsIntConstant()) { int32_t value = current->AsIntConstant()->GetValue(); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); } else if (current->IsNullConstant()) { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0); } else { DCHECK(current->IsFloatConstant()) << current->DebugName(); int32_t value = bit_cast(current->AsFloatConstant()->GetValue()); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); } break; } case Location::kStackSlot: { - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); break; } case Location::kDoubleStackSlot: { - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize)); ++i; DCHECK_LT(i, environment_size); @@ -1037,17 +1251,17 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int id = location.reg(); if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) { uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); if (current->GetType() == DataType::Type::kInt64) { - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, offset + kVRegSize); ++i; DCHECK_LT(i, environment_size); } } else { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id); if (current->GetType() == DataType::Type::kInt64) { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id); ++i; DCHECK_LT(i, environment_size); } @@ -1059,17 +1273,17 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int id = location.reg(); if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) { uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); if (current->GetType() == DataType::Type::kFloat64) { - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInStack, offset + kVRegSize); ++i; DCHECK_LT(i, environment_size); } } else { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id); if (current->GetType() == DataType::Type::kFloat64) { - stack_map_stream_.AddDexRegisterEntry( + stack_map_stream->AddDexRegisterEntry( DexRegisterLocation::Kind::kInFpuRegisterHigh, id); ++i; DCHECK_LT(i, environment_size); @@ -1083,16 +1297,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int high = location.high(); if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) { uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); } else { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low); } if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) { uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); ++i; } else { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high); ++i; } DCHECK_LT(i, environment_size); @@ -1104,15 +1318,15 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int high = location.high(); if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) { uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); } else { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low); } if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) { uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high); - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); } else { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high); } ++i; DCHECK_LT(i, environment_size); @@ -1120,7 +1334,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } case Location::kInvalid: { - stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); break; } @@ -1130,7 +1344,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } if (environment->GetParent() != nullptr) { - stack_map_stream_.EndInlineInfoEntry(); + stack_map_stream->EndInlineInfoEntry(); } } @@ -1408,31 +1622,7 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) { void CodeGenerator::EmitJitRoots(uint8_t* code, Handle> roots, const uint8_t* roots_data) { - DCHECK_EQ(static_cast(roots->GetLength()), GetNumberOfJitRoots()); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - size_t index = 0; - for (auto& entry : jit_string_roots_) { - // Update the `roots` with the string, and replace the address temporarily - // stored to the index in the table. - uint64_t address = entry.second; - roots->Set(index, reinterpret_cast*>(address)->AsMirrorPtr()); - DCHECK(roots->Get(index) != nullptr); - entry.second = index; - // Ensure the string is strongly interned. This is a requirement on how the JIT - // handles strings. b/32995596 - class_linker->GetInternTable()->InternStrong( - reinterpret_cast(roots->Get(index))); - ++index; - } - for (auto& entry : jit_class_roots_) { - // Update the `roots` with the class, and replace the address temporarily - // stored to the index in the table. - uint64_t address = entry.second; - roots->Set(index, reinterpret_cast*>(address)->AsMirrorPtr()); - DCHECK(roots->Get(index) != nullptr); - entry.second = index; - ++index; - } + code_generation_data_->EmitJitRoots(roots); EmitJitRootPatches(code, roots_data); } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 2904b71991..64c88eb67c 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -32,7 +32,7 @@ #include "optimizing_compiler_stats.h" #include "read_barrier_option.h" #include "stack.h" -#include "stack_map_stream.h" +#include "stack_map.h" #include "string_reference.h" #include "type_reference.h" #include "utils/label.h" @@ -61,6 +61,7 @@ class Assembler; class CodeGenerator; class CompilerDriver; class CompilerOptions; +class StackMapStream; class ParallelMoveResolver; namespace linker { @@ -190,7 +191,7 @@ class CodeGenerator : public DeletableArenaObject { const InstructionSetFeatures& isa_features, const CompilerOptions& compiler_options, OptimizingCompilerStats* stats = nullptr); - virtual ~CodeGenerator() {} + virtual ~CodeGenerator(); // Get the graph. This is the outermost graph, never the graph of a method being inlined. HGraph* GetGraph() const { return graph_; } @@ -338,18 +339,16 @@ class CodeGenerator : public DeletableArenaObject { // TODO: Replace with a catch-entering instruction that records the environment. void RecordCatchBlockInfo(); - // TODO: Avoid creating the `std::unique_ptr` here. - void AddSlowPath(SlowPathCode* slow_path) { - slow_paths_.push_back(std::unique_ptr(slow_path)); - } + // Get the ScopedArenaAllocator used for codegen memory allocation. + ScopedArenaAllocator* GetScopedAllocator(); + + void AddSlowPath(SlowPathCode* slow_path); void BuildStackMaps(MemoryRegion stack_map_region, MemoryRegion method_info_region, const DexFile::CodeItem& code_item); void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size); - size_t GetNumberOfJitRoots() const { - return jit_string_roots_.size() + jit_class_roots_.size(); - } + size_t GetNumberOfJitRoots() const; // Fills the `literals` array with literals collected during code generation. // Also emits literal patches. @@ -600,38 +599,7 @@ class CodeGenerator : public DeletableArenaObject { uint32_t core_callee_save_mask, uint32_t fpu_callee_save_mask, const CompilerOptions& compiler_options, - OptimizingCompilerStats* stats) - : frame_size_(0), - core_spill_mask_(0), - fpu_spill_mask_(0), - first_register_slot_in_slow_path_(0), - allocated_registers_(RegisterSet::Empty()), - blocked_core_registers_(graph->GetAllocator()->AllocArray(number_of_core_registers, - kArenaAllocCodeGenerator)), - blocked_fpu_registers_(graph->GetAllocator()->AllocArray(number_of_fpu_registers, - kArenaAllocCodeGenerator)), - number_of_core_registers_(number_of_core_registers), - number_of_fpu_registers_(number_of_fpu_registers), - number_of_register_pairs_(number_of_register_pairs), - core_callee_save_mask_(core_callee_save_mask), - fpu_callee_save_mask_(fpu_callee_save_mask), - stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()), - block_order_(nullptr), - jit_string_roots_(StringReferenceValueComparator(), - graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), - jit_class_roots_(TypeReferenceValueComparator(), - graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), - disasm_info_(nullptr), - stats_(stats), - graph_(graph), - compiler_options_(compiler_options), - slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), - current_slow_path_(nullptr), - current_block_index_(0), - is_leaf_(true), - requires_current_method_(false) { - slow_paths_.reserve(8); - } + OptimizingCompilerStats* stats); virtual HGraphVisitor* GetLocationBuilder() = 0; virtual HGraphVisitor* GetInstructionVisitor() = 0; @@ -687,12 +655,15 @@ class CodeGenerator : public DeletableArenaObject { return current_slow_path_; } + StackMapStream* GetStackMapStream(); + + void ReserveJitStringRoot(StringReference string_reference, Handle string); + uint64_t GetJitStringRootIndex(StringReference string_reference); + void ReserveJitClassRoot(TypeReference type_reference, Handle klass); + uint64_t GetJitClassRootIndex(TypeReference type_reference); + // Emit the patches assocatied with JIT roots. Only applies to JIT compiled code. - virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED, - const uint8_t* roots_data ATTRIBUTE_UNUSED) { - DCHECK_EQ(jit_string_roots_.size(), 0u); - DCHECK_EQ(jit_class_roots_.size(), 0u); - } + virtual void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data); // Frame size required for this method. uint32_t frame_size_; @@ -714,24 +685,15 @@ class CodeGenerator : public DeletableArenaObject { const uint32_t core_callee_save_mask_; const uint32_t fpu_callee_save_mask_; - StackMapStream stack_map_stream_; - // The order to use for code generation. const ArenaVector* block_order_; - // Maps a StringReference (dex_file, string_index) to the index in the literal table. - // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` - // will compute all the indices. - ArenaSafeMap jit_string_roots_; - - // Maps a ClassReference (dex_file, type_index) to the index in the literal table. - // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` - // will compute all the indices. - ArenaSafeMap jit_class_roots_; - DisassemblyInformation* disasm_info_; private: + class CodeGenerationData; + + void InitializeCodeGenerationData(); size_t GetStackOffsetOfSavedRegister(size_t index); void GenerateSlowPaths(); void BlockIfInRegister(Location location, bool is_out = false) const; @@ -742,8 +704,6 @@ class CodeGenerator : public DeletableArenaObject { HGraph* const graph_; const CompilerOptions& compiler_options_; - ArenaVector> slow_paths_; - // The current slow-path that we're generating code for. SlowPathCode* current_slow_path_; @@ -759,6 +719,12 @@ class CodeGenerator : public DeletableArenaObject { // needs the environment including a valid stack frame. bool requires_current_method_; + // The CodeGenerationData contains a ScopedArenaAllocator intended for reusing the + // ArenaStack memory allocated in previous passes instead of adding to the memory + // held by the ArenaAllocator. This ScopedArenaAllocator is created in + // CodeGenerator::Compile() and remains alive until the CodeGenerator is destroyed. + std::unique_ptr code_generation_data_; + friend class OptimizingCFITest; DISALLOW_COPY_AND_ASSIGN(CodeGenerator); @@ -863,7 +829,8 @@ class SlowPathGenerator { {{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}}); } // Cannot share: create and add new slow-path for this particular dex-pc. - SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction); + SlowPathCodeType* slow_path = + new (codegen_->GetScopedAllocator()) SlowPathCodeType(instruction); iter->second.emplace_back(std::make_pair(instruction, slow_path)); codegen_->AddSlowPath(slow_path); return slow_path; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index e6e69846e4..0b65b41015 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2204,7 +2204,8 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct SuspendCheckSlowPathARM64* slow_path = down_cast(instruction->GetSlowPath()); if (slow_path == nullptr) { - slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor); + slow_path = + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARM64(instruction, successor); instruction->SetSlowPath(slow_path); codegen_->AddSlowPath(slow_path); if (successor != nullptr) { @@ -3011,7 +3012,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction); + slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { vixl::aarch64::Label non_zero; @@ -3126,7 +3127,7 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { BoundsCheckSlowPathARM64* slow_path = - new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction); + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); __ B(slow_path->GetEntryLabel(), hs); @@ -3143,7 +3144,7 @@ void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. - SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64( + SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64( check->GetLoadClass(), check, check->GetDexPc(), true); codegen_->AddSlowPath(slow_path); GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); @@ -3500,7 +3501,7 @@ void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { SlowPathCodeARM64* slow_path = - new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction); + new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); @@ -4055,8 +4056,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { kWithoutReadBarrier); __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(ne, slow_path->GetEntryLabel()); __ Mov(out, 1); @@ -4087,8 +4088,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); if (zero.IsLinked()) { @@ -4176,8 +4177,8 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeARM64* type_check_slow_path = - new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction, - is_type_check_slow_path_fatal); + new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( + instruction, is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); vixl::aarch64::Label done; @@ -4685,8 +4686,7 @@ vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateBootImageAddres vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateJitStringLiteral( const DexFile& dex_file, dex::StringIndex string_index, Handle handle) { - jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), - reinterpret_cast64(handle.GetReference())); + ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), [this]() { return __ CreateLiteralDestroyedWithPool(/* placeholder */ 0u); }); @@ -4694,8 +4694,7 @@ vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateJitStringLitera vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateJitClassLiteral( const DexFile& dex_file, dex::TypeIndex type_index, Handle handle) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), - reinterpret_cast64(handle.GetReference())); + ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); return jit_class_patches_.GetOrCreate( TypeReference(&dex_file, type_index), [this]() { return __ CreateLiteralDestroyedWithPool(/* placeholder */ 0u); }); @@ -5010,7 +5009,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA bool do_clinit = cls->MustGenerateClinitCheck(); if (generate_null_check || do_clinit) { DCHECK(cls->CanCallRuntime()); - SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64( + SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64( cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -5150,7 +5149,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD ldr_label, kCompilerReadBarrierOption); SlowPathCodeARM64* slow_path = - new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label); + new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label); codegen_->AddSlowPath(slow_path); __ Cbz(out.X(), slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -5391,8 +5390,7 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCodeARM64* slow_path = - new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction); + SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) NullCheckSlowPathARM64(instruction); AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -6034,7 +6032,7 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. The entrypoint will // be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root); + new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root); codegen_->AddSlowPath(slow_path); // /* GcRoot */ root = *(obj + offset) @@ -6293,7 +6291,7 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* // Slow path marking the object `ref` when the GC is marking. The // entrypoint will be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64( + new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64( instruction, ref, obj, @@ -6351,7 +6349,7 @@ void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* // Slow path updating the object reference at address `obj + field_offset` // when the GC is marking. The entrypoint will be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64( + new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64( instruction, ref, obj, @@ -6478,7 +6476,7 @@ void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) + SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -6514,7 +6512,7 @@ void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instructio // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root); + new (GetScopedAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root); AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); @@ -6560,17 +6558,13 @@ void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_ for (const auto& entry : jit_string_patches_) { const StringReference& string_reference = entry.first; vixl::aarch64::Literal* table_entry_literal = entry.second; - const auto it = jit_string_roots_.find(string_reference); - DCHECK(it != jit_string_roots_.end()); - uint64_t index_in_table = it->second; + uint64_t index_in_table = GetJitStringRootIndex(string_reference); PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); } for (const auto& entry : jit_class_patches_) { const TypeReference& type_reference = entry.first; vixl::aarch64::Literal* table_entry_literal = entry.second; - const auto it = jit_class_roots_.find(type_reference); - DCHECK(it != jit_class_roots_.end()); - uint64_t index_in_table = it->second; + uint64_t index_in_table = GetJitClassRootIndex(type_reference); PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); } } diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 251f390ce3..32acd667ab 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -4733,7 +4733,7 @@ void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { DivZeroCheckSlowPathARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction); + new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARMVIXL(instruction); codegen_->AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -5959,7 +5959,7 @@ void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) { void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) { NullCheckSlowPathARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction); + new (GetScopedAllocator()) NullCheckSlowPathARMVIXL(instruction); AddSlowPath(slow_path); __ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); } @@ -6432,7 +6432,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { SlowPathCodeARMVIXL* slow_path = nullptr; if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction); + slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARMVIXL(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { vixl32::Label non_zero; @@ -6693,7 +6693,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction int32_t index = Int32ConstantFrom(index_loc); if (index < 0 || index >= length) { SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction); + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); } else { @@ -6704,13 +6704,13 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction } SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction); + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction); __ Cmp(RegisterFrom(index_loc), length); codegen_->AddSlowPath(slow_path); __ B(hs, slow_path->GetEntryLabel()); } else { SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction); + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction); __ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0)); codegen_->AddSlowPath(slow_path); __ B(ls, slow_path->GetEntryLabel()); @@ -6777,7 +6777,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instru down_cast(instruction->GetSlowPath()); if (slow_path == nullptr) { slow_path = - new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor); + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor); instruction->SetSlowPath(slow_path); codegen_->AddSlowPath(slow_path); if (successor != nullptr) { @@ -7214,8 +7214,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL( - cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); + LoadClassSlowPathARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL( + cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); codegen_->AddSlowPath(slow_path); if (generate_null_check) { __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); @@ -7241,10 +7242,10 @@ void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. LoadClassSlowPathARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(), - check, - check->GetDexPc(), - /* do_clinit */ true); + new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(), + check, + check->GetDexPc(), + /* do_clinit */ true); codegen_->AddSlowPath(slow_path); GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); } @@ -7354,7 +7355,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE codegen_->EmitMovwMovtPlaceholder(labels, temp); GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption); LoadStringSlowPathARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load); + new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load); codegen_->AddSlowPath(slow_path); __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -7681,8 +7682,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) kWithoutReadBarrier); __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(ne, slow_path->GetEntryLabel()); __ Mov(out, 1); @@ -7710,8 +7711,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); break; @@ -7789,8 +7790,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeARMVIXL* type_check_slow_path = - new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction, - is_type_check_slow_path_fatal); + new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( + instruction, is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); vixl32::Label done; @@ -8451,7 +8452,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. The entrypoint will // be loaded by the slow path code. SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root); + new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root); codegen_->AddSlowPath(slow_path); // /* GcRoot */ root = *(obj + offset) @@ -8700,7 +8701,7 @@ void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstructio // Slow path marking the object `ref` when the GC is marking. The // entrypoint will be loaded by the slow path code. SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL( + new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL( instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg); AddSlowPath(slow_path); @@ -8746,8 +8747,8 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction // Slow path updating the object reference at address `obj + field_offset` // when the GC is marking. The entrypoint will be loaded by the slow path code. - SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator()) - LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL( + SlowPathCodeARMVIXL* slow_path = + new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL( instruction, ref, obj, @@ -8858,7 +8859,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator()) + SlowPathCodeARMVIXL* slow_path = new (GetScopedAllocator()) ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -8894,7 +8895,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root); + new (GetScopedAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root); AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); @@ -9108,8 +9109,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral( const DexFile& dex_file, dex::StringIndex string_index, Handle handle) { - jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), - reinterpret_cast64(handle.GetReference())); + ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), [this]() { @@ -9120,8 +9120,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral( VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file, dex::TypeIndex type_index, Handle handle) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), - reinterpret_cast64(handle.GetReference())); + ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); return jit_class_patches_.GetOrCreate( TypeReference(&dex_file, type_index), [this]() { @@ -9401,17 +9400,13 @@ void CodeGeneratorARMVIXL::EmitJitRootPatches(uint8_t* code, const uint8_t* root for (const auto& entry : jit_string_patches_) { const StringReference& string_reference = entry.first; VIXLUInt32Literal* table_entry_literal = entry.second; - const auto it = jit_string_roots_.find(string_reference); - DCHECK(it != jit_string_roots_.end()); - uint64_t index_in_table = it->second; + uint64_t index_in_table = GetJitStringRootIndex(string_reference); PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); } for (const auto& entry : jit_class_patches_) { const TypeReference& type_reference = entry.first; VIXLUInt32Literal* table_entry_literal = entry.second; - const auto it = jit_class_roots_.find(type_reference); - DCHECK(it != jit_class_roots_.end()); - uint64_t index_in_table = it->second; + uint64_t index_in_table = GetJitClassRootIndex(type_reference); PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); } } diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index e58f43e1bb..c6346eb3b1 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -33,6 +33,7 @@ #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "offsets.h" +#include "stack_map_stream.h" #include "thread.h" #include "utils/assembler.h" #include "utils/mips/assembler_mips.h" @@ -1128,12 +1129,13 @@ void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) { __ FinalizeCode(); // Adjust native pc offsets in stack maps. - for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) { + StackMapStream* stack_map_stream = GetStackMapStream(); + for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) { uint32_t old_position = - stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips); + stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips); uint32_t new_position = __ GetAdjustedPosition(old_position); DCHECK_GE(new_position, old_position); - stack_map_stream_.SetStackMapNativePcOffset(i, new_position); + stack_map_stream->SetStackMapNativePcOffset(i, new_position); } // Adjust pc offsets for the disassembly information. @@ -1788,21 +1790,19 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch( const DexFile& dex_file, - dex::StringIndex dex_index, + dex::StringIndex string_index, Handle handle) { - jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), - reinterpret_cast64(handle.GetReference())); - jit_string_patches_.emplace_back(dex_file, dex_index.index_); + ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); + jit_string_patches_.emplace_back(dex_file, string_index.index_); return &jit_string_patches_.back(); } CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch( const DexFile& dex_file, - dex::TypeIndex dex_index, + dex::TypeIndex type_index, Handle handle) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), - reinterpret_cast64(handle.GetReference())); - jit_class_patches_.emplace_back(dex_file, dex_index.index_); + ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); + jit_class_patches_.emplace_back(dex_file, type_index.index_); return &jit_class_patches_.back(); } @@ -1834,17 +1834,13 @@ void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code, void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { for (const JitPatchInfo& info : jit_string_patches_) { - const auto it = jit_string_roots_.find(StringReference(&info.target_dex_file, - dex::StringIndex(info.index))); - DCHECK(it != jit_string_roots_.end()); - uint64_t index_in_table = it->second; + StringReference string_reference(&info.target_dex_file, dex::StringIndex(info.index)); + uint64_t index_in_table = GetJitStringRootIndex(string_reference); PatchJitRootUse(code, roots_data, info, index_in_table); } for (const JitPatchInfo& info : jit_class_patches_) { - const auto it = jit_class_roots_.find(TypeReference(&info.target_dex_file, - dex::TypeIndex(info.index))); - DCHECK(it != jit_class_roots_.end()); - uint64_t index_in_table = it->second; + TypeReference type_reference(&info.target_dex_file, dex::TypeIndex(info.index)); + uint64_t index_in_table = GetJitClassRootIndex(type_reference); PatchJitRootUse(code, roots_data, info, index_in_table); } } @@ -1998,7 +1994,7 @@ void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATT void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor) { SuspendCheckSlowPathMIPS* slow_path = - new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor); + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor); codegen_->AddSlowPath(slow_path); __ LoadFromOffset(kLoadUnsignedHalfword, @@ -2986,7 +2982,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { SlowPathCodeMIPS* slow_path = nullptr; if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction); + slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { MipsLabel non_zero; @@ -3171,7 +3167,7 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = instruction->GetLocations(); BoundsCheckSlowPathMIPS* slow_path = - new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction); + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction); codegen_->AddSlowPath(slow_path); Register index = locations->InAt(0).AsRegister(); @@ -3263,8 +3259,8 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction, - is_type_check_slow_path_fatal); + new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS( + instruction, is_type_check_slow_path_fatal); codegen_->AddSlowPath(slow_path); // Avoid this check if we know `obj` is not null. @@ -3427,7 +3423,7 @@ void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS( + SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS( check->GetLoadClass(), check, check->GetDexPc(), @@ -3884,7 +3880,7 @@ void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) { SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction); + new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS(instruction); codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); DataType::Type type = instruction->GetType(); @@ -6692,7 +6688,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc // Slow path marking the GC root `root`. Location temp = Location::RegisterLocation(T9); SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS( + new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS( instruction, root, /*entrypoint*/ temp); @@ -7019,14 +7015,14 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* // to be null in this code path. DCHECK_EQ(offset, 0u); DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1); - slow_path = new (GetGraph()->GetAllocator()) + slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction, ref, obj, /* field_offset */ index, temp_reg); } else { - slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref); + slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref); } AddSlowPath(slow_path); @@ -7062,7 +7058,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) + SlowPathCodeMIPS* slow_path = new (GetScopedAllocator()) ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -7098,7 +7094,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root); + new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root); AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); @@ -7268,8 +7264,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { maybe_temp_loc, kWithoutReadBarrier); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ Bne(out, cls, slow_path->GetEntryLabel()); __ LoadConst32(out, 1); @@ -7297,8 +7293,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); break; @@ -7841,7 +7837,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS( + SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -8006,7 +8002,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ kCompilerReadBarrierOption, &info_low->label); SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high); + new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high); codegen_->AddSlowPath(slow_path); __ Beqz(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -8333,7 +8329,7 @@ void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction); + SlowPathCodeMIPS* slow_path = new (GetScopedAllocator()) NullCheckSlowPathMIPS(instruction); AddSlowPath(slow_path); Location obj = instruction->GetLocations()->InAt(0); diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 5f2f90004d..7845e312cb 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -662,10 +662,10 @@ class CodeGeneratorMIPS : public CodeGenerator { const JitPatchInfo& info, uint64_t index_in_table) const; JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file, - dex::StringIndex dex_index, + dex::StringIndex string_index, Handle handle); JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file, - dex::TypeIndex dex_index, + dex::TypeIndex type_index, Handle handle); private: diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 11120cf07a..557a1ec67d 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -31,6 +31,7 @@ #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "offsets.h" +#include "stack_map_stream.h" #include "thread.h" #include "utils/assembler.h" #include "utils/mips64/assembler_mips64.h" @@ -1072,12 +1073,13 @@ void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) { __ FinalizeCode(); // Adjust native pc offsets in stack maps. - for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) { + StackMapStream* stack_map_stream = GetStackMapStream(); + for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) { uint32_t old_position = - stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64); + stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64); uint32_t new_position = __ GetAdjustedPosition(old_position); DCHECK_GE(new_position, old_position); - stack_map_stream_.SetStackMapNativePcOffset(i, new_position); + stack_map_stream->SetStackMapNativePcOffset(i, new_position); } // Adjust pc offsets for the disassembly information. @@ -1681,8 +1683,7 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index, Handle handle) { - jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), - reinterpret_cast64(handle.GetReference())); + ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), [this]() { return __ NewLiteral(/* placeholder */ 0u); }); @@ -1691,8 +1692,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_fil Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file, dex::TypeIndex type_index, Handle handle) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), - reinterpret_cast64(handle.GetReference())); + ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); return jit_class_patches_.GetOrCreate( TypeReference(&dex_file, type_index), [this]() { return __ NewLiteral(/* placeholder */ 0u); }); @@ -1712,17 +1712,13 @@ void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots for (const auto& entry : jit_string_patches_) { const StringReference& string_reference = entry.first; Literal* table_entry_literal = entry.second; - const auto it = jit_string_roots_.find(string_reference); - DCHECK(it != jit_string_roots_.end()); - uint64_t index_in_table = it->second; + uint64_t index_in_table = GetJitStringRootIndex(string_reference); PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); } for (const auto& entry : jit_class_patches_) { const TypeReference& type_reference = entry.first; Literal* table_entry_literal = entry.second; - const auto it = jit_class_roots_.find(type_reference); - DCHECK(it != jit_class_roots_.end()); - uint64_t index_in_table = it->second; + uint64_t index_in_table = GetJitClassRootIndex(type_reference); PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); } } @@ -1835,7 +1831,7 @@ void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind A void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor) { SuspendCheckSlowPathMIPS64* slow_path = - new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor); + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor); codegen_->AddSlowPath(slow_path); __ LoadFromOffset(kLoadUnsignedHalfword, @@ -2543,7 +2539,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { SlowPathCodeMIPS64* slow_path = nullptr; if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction); + slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { Mips64Label non_zero; @@ -2700,7 +2696,7 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = instruction->GetLocations(); BoundsCheckSlowPathMIPS64* slow_path = - new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction); + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); GpuRegister index = locations->InAt(0).AsRegister(); @@ -2792,8 +2788,8 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction, - is_type_check_slow_path_fatal); + new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64( + instruction, is_type_check_slow_path_fatal); codegen_->AddSlowPath(slow_path); // Avoid this check if we know `obj` is not null. @@ -2956,7 +2952,7 @@ void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64( + SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64( check->GetLoadClass(), check, check->GetDexPc(), @@ -3430,7 +3426,7 @@ void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) { SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction); + new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); @@ -5050,7 +5046,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr // Slow path marking the GC root `root`. Location temp = Location::RegisterLocation(T9); SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64( + new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64( instruction, root, /*entrypoint*/ temp); @@ -5335,14 +5331,14 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction // above are expected to be null in this code path. DCHECK_EQ(offset, 0u); DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1); - slow_path = new (GetGraph()->GetAllocator()) + slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction, ref, obj, /* field_offset */ index, temp_reg); } else { - slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref); + slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref); } AddSlowPath(slow_path); @@ -5378,7 +5374,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) + SlowPathCodeMIPS64* slow_path = new (GetScopedAllocator()) ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -5414,7 +5410,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instructi // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root); + new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root); AddSlowPath(slow_path); __ Bc(slow_path->GetEntryLabel()); @@ -5584,8 +5580,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { maybe_temp_loc, kWithoutReadBarrier); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ Bnec(out, cls, slow_path->GetEntryLabel()); __ LoadConst32(out, 1); @@ -5613,8 +5609,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction, - /* is_fatal */ false); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64( + instruction, /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ Bc(slow_path->GetEntryLabel()); break; @@ -6082,7 +6078,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64( + SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -6200,7 +6196,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA kCompilerReadBarrierOption, &info_low->label); SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high); + new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high); codegen_->AddSlowPath(slow_path); __ Beqzc(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -6464,7 +6460,7 @@ void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) { void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) { SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction); + new (GetScopedAllocator()) NullCheckSlowPathMIPS64(instruction); AddSlowPath(slow_path); Location obj = instruction->GetLocations()->InAt(0); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 39a07b82d1..3bcd7b91d7 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3581,7 +3581,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr GenerateDivRemWithAnyConstant(instruction); } } else { - SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86( + SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) DivRemMinusOneSlowPathX86( instruction, out.AsRegister(), is_div); codegen_->AddSlowPath(slow_path); @@ -3817,7 +3817,8 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { } void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { - SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction); + SlowPathCode* slow_path = + new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathX86(instruction); codegen_->AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -5149,7 +5150,7 @@ void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction); + SlowPathCode* slow_path = new (GetScopedAllocator()) NullCheckSlowPathX86(instruction); AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -5427,7 +5428,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { Location temp_loc = locations->GetTemp(0); Register temp = temp_loc.AsRegister(); if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction); + slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { __ testl(register_value, register_value); @@ -5618,7 +5619,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) { Location index_loc = locations->InAt(0); Location length_loc = locations->InAt(1); SlowPathCode* slow_path = - new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction); + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathX86(instruction); if (length_loc.IsConstant()) { int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant()); @@ -5719,7 +5720,8 @@ void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instructio SuspendCheckSlowPathX86* slow_path = down_cast(instruction->GetSlowPath()); if (slow_path == nullptr) { - slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor); + slow_path = + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathX86(instruction, successor); instruction->SetSlowPath(slow_path); codegen_->AddSlowPath(slow_path); if (successor != nullptr) { @@ -6076,12 +6078,11 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { } Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file, - dex::TypeIndex dex_index, + dex::TypeIndex type_index, Handle handle) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), - reinterpret_cast64(handle.GetReference())); + ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); // Add a patch entry and return the label. - jit_class_patches_.emplace_back(dex_file, dex_index.index_); + jit_class_patches_.emplace_back(dex_file, type_index.index_); PatchInfo