diff options
author | 2018-12-28 09:39:56 -0800 | |
---|---|---|
committer | 2019-01-02 10:32:25 -0800 | |
commit | 3f1dcd39e134d994ac88dcc4f30ec8cabcd8decf (patch) | |
tree | 365d20ad6b68ff1dbd4903764b63880324136e4d /compiler/optimizing | |
parent | 0f0a4e40667c87fbd4ae5480eddbfd701bfabfa2 (diff) |
ART: Move dex structs into own header
Separating out the structs from DexFile allows them to be forward-
declared, which reduces the need to include the dex_file header.
Bug: 119869270
Test: m
Change-Id: I32dde5a632884bca7435cd584b4a81883de2e7b4
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/block_builder.cc | 16 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/graph_visualizer.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/inliner.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/inliner.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/instruction_builder.cc | 10 | ||||
-rw-r--r-- | compiler/optimizing/nodes.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_unit_test.h | 2 |
10 files changed, 29 insertions, 29 deletions
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc index d1ccbee5f5..3672cce4c5 100644 --- a/compiler/optimizing/block_builder.cc +++ b/compiler/optimizing/block_builder.cc @@ -68,7 +68,7 @@ bool HBasicBlockBuilder::CreateBranchTargets() { // places where the program might fall through into/out of the a block and // where TryBoundary instructions will be inserted later. Other edges which // enter/exit the try blocks are a result of branches/switches. - for (const DexFile::TryItem& try_item : code_item_accessor_.TryItems()) { + for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) { uint32_t dex_pc_start = try_item.start_addr_; uint32_t dex_pc_end = dex_pc_start + try_item.insn_count_; MaybeCreateBlockAt(dex_pc_start); @@ -222,9 +222,9 @@ void HBasicBlockBuilder::ConnectBasicBlocks() { } // Returns the TryItem stored for `block` or nullptr if there is no info for it. -static const DexFile::TryItem* GetTryItem( +static const dex::TryItem* GetTryItem( HBasicBlock* block, - const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) { + const ScopedArenaSafeMap<uint32_t, const dex::TryItem*>& try_block_info) { auto iterator = try_block_info.find(block->GetBlockId()); return (iterator == try_block_info.end()) ? nullptr : iterator->second; } @@ -235,7 +235,7 @@ static const DexFile::TryItem* GetTryItem( // for a handler. static void LinkToCatchBlocks(HTryBoundary* try_boundary, const CodeItemDataAccessor& accessor, - const DexFile::TryItem* try_item, + const dex::TryItem* try_item, const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) { for (CatchHandlerIterator it(accessor.GetCatchHandlerData(try_item->handler_off_)); it.HasNext(); @@ -279,7 +279,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { // Keep a map of all try blocks and their respective TryItems. We do not use // the block's pointer but rather its id to ensure deterministic iteration. - ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info( + ScopedArenaSafeMap<uint32_t, const dex::TryItem*> try_block_info( std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder)); // Obtain TryItem information for blocks with throwing instructions, and split @@ -295,7 +295,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { // loop for synchronized blocks. if (ContainsElement(throwing_blocks_, block)) { // Try to find a TryItem covering the block. - const DexFile::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc()); + const dex::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc()); if (try_item != nullptr) { // Block throwing and in a TryItem. Store the try block information. try_block_info.Put(block->GetBlockId(), try_item); @@ -348,7 +348,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { // that all predecessors are relinked to. This preserves loop headers (b/23895756). for (const auto& entry : try_block_info) { uint32_t block_id = entry.first; - const DexFile::TryItem* try_item = entry.second; + const dex::TryItem* try_item = entry.second; HBasicBlock* try_block = graph_->GetBlocks()[block_id]; for (HBasicBlock* predecessor : try_block->GetPredecessors()) { if (GetTryItem(predecessor, try_block_info) != try_item) { @@ -367,7 +367,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() { // the successor is not in the same TryItem. for (const auto& entry : try_block_info) { uint32_t block_id = entry.first; - const DexFile::TryItem* try_item = entry.second; + const dex::TryItem* try_item = entry.second; HBasicBlock* try_block = graph_->GetBlocks()[block_id]; // NOTE: Do not use iterators because SplitEdge would invalidate them. for (size_t i = 0, e = try_block->GetSuccessors().size(); i < e; ++i) { diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 04e0cc4bdd..9e2f5cd508 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -987,7 +987,7 @@ static void CheckCovers(uint32_t dex_pc, // dex branch instructions. static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, const CodeInfo& code_info, - const DexFile::CodeItem& code_item) { + const dex::CodeItem& code_item) { if (graph.HasTryCatch()) { // One can write loops through try/catch, which we do not support for OSR anyway. return; @@ -1029,7 +1029,7 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, } } -ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const DexFile::CodeItem* code_item) { +ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) { ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode(); if (kIsDebugBuild && code_item != nullptr) { CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 39966ff8ea..f70ecb612d 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -349,7 +349,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { void AddSlowPath(SlowPathCode* slow_path); - ScopedArenaVector<uint8_t> BuildStackMaps(const DexFile::CodeItem* code_item_for_osr_check); + ScopedArenaVector<uint8_t> BuildStackMaps(const dex::CodeItem* code_item_for_osr_check); size_t GetNumberOfJitRoots() const; // Fills the `literals` array with literals collected during code generation. diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 07966207f7..2a7bbcb72f 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -393,7 +393,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { void VisitLoadMethodType(HLoadMethodType* load_method_type) override { StartAttributeStream("load_kind") << "RuntimeCall"; const DexFile& dex_file = load_method_type->GetDexFile(); - const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex()); + const dex::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex()); StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id); } diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 854228beb9..8440e9aa4c 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -1756,7 +1756,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, HInstruction** return_replacement) { DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid())); ScopedObjectAccess soa(Thread::Current()); - const DexFile::CodeItem* code_item = resolved_method->GetCodeItem(); + const dex::CodeItem* code_item = resolved_method->GetCodeItem(); const DexFile& callee_dex_file = *resolved_method->GetDexFile(); uint32_t method_index = resolved_method->GetDexMethodIndex(); CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo()); @@ -2027,7 +2027,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, } void HInliner::RunOptimizations(HGraph* callee_graph, - const DexFile::CodeItem* code_item, + const dex::CodeItem* code_item, const DexCompilationUnit& dex_compilation_unit) { // Note: if the outermost_graph_ is being compiled OSR, we should not run any // optimization that could lead to a HDeoptimize. The following optimizations do not. @@ -2112,7 +2112,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* // Iterate over the list of parameter types and test whether any of the // actual inputs has a more specific reference type than the type declared in // the signature. - const DexFile::TypeList* param_list = resolved_method->GetParameterTypeList(); + const dex::TypeList* param_list = resolved_method->GetParameterTypeList(); for (size_t param_idx = 0, input_idx = resolved_method->IsStatic() ? 0 : 1, e = (param_list == nullptr ? 0 : param_list->Size()); diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 8ac2163a94..efd4c74079 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -99,7 +99,7 @@ class HInliner : public HOptimization { // Run simple optimizations on `callee_graph`. void RunOptimizations(HGraph* callee_graph, - const DexFile::CodeItem* code_item, + const dex::CodeItem* code_item, const DexCompilationUnit& dex_compilation_unit) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index b6ef2b614f..5e7b57523f 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -559,7 +559,7 @@ void HInstructionBuilder::InitializeParameters() { uint16_t locals_index = graph_->GetNumberOfLocalVRegs(); uint16_t parameter_index = 0; - const DexFile::MethodId& referrer_method_id = + const dex::MethodId& referrer_method_id = dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex()); if (!dex_compilation_unit_->IsStatic()) { // Add the implicit 'this' argument, not expressed in the signature. @@ -576,8 +576,8 @@ void HInstructionBuilder::InitializeParameters() { DCHECK(current_this_parameter_ == nullptr); } - const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id); - const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto); + const dex::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id); + const dex::TypeList* arg_types = dex_file_->GetProtoParameters(proto); for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) { HParameterValue* parameter = new (allocator_) HParameterValue( *dex_file_, @@ -1515,7 +1515,7 @@ bool HInstructionBuilder::HandleStringInit(HInvoke* invoke, } static DataType::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) { - const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index); + const dex::FieldId& field_id = dex_file.GetFieldId(field_index); const char* type = dex_file.GetFieldTypeDescriptor(field_id); return DataType::FromShorty(type[0]); } @@ -3143,7 +3143,7 @@ ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType( ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const { // TODO: Cache the result in a Handle<mirror::Class>. - const DexFile::MethodId& method_id = + const dex::MethodId& method_id = dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex()); return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_); } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index e9a2f96798..f7c16d1d02 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -689,7 +689,7 @@ HCurrentMethod* HGraph::GetCurrentMethod() { } const char* HGraph::GetMethodName() const { - const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_idx_); + const dex::MethodId& method_id = dex_file_.GetMethodId(method_idx_); return dex_file_.GetMethodName(method_id); } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index a8fa370993..3b34e8d0f6 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -272,7 +272,7 @@ class OptimizingCompiler final : public Compiler { bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override; - CompiledMethod* Compile(const DexFile::CodeItem* code_item, + CompiledMethod* Compile(const dex::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, @@ -370,7 +370,7 @@ class OptimizingCompiler final : public Compiler { CompiledMethod* Emit(ArenaAllocator* allocator, CodeVectorAllocator* code_allocator, CodeGenerator* codegen, - const DexFile::CodeItem* item) const; + const dex::CodeItem* item) const; // Try compiling a method and return the code generator used for // compiling it. @@ -760,7 +760,7 @@ static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator, CodeVectorAllocator* code_allocator, CodeGenerator* codegen, - const DexFile::CodeItem* code_item_for_osr_check) const { + const dex::CodeItem* code_item_for_osr_check) const { ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check); @@ -799,7 +799,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, InstructionSet instruction_set = compiler_options.GetInstructionSet(); const DexFile& dex_file = *dex_compilation_unit.GetDexFile(); uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex(); - const DexFile::CodeItem* code_item = dex_compilation_unit.GetCodeItem(); + const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem(); // Always use the Thumb-2 assembler: some runtime functionality // (like implicit stack overflow checks) assume Thumb-2. @@ -1033,7 +1033,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic( return codegen.release(); } -CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, +CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, @@ -1254,7 +1254,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, const DexFile* dex_file = method->GetDexFile(); const uint16_t class_def_idx = method->GetClassDefIndex(); - const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); + const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); const uint32_t method_idx = method->GetDexMethodIndex(); const uint32_t access_flags = method->GetAccessFlags(); diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 4e376b1c57..e5f694109a 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -155,7 +155,7 @@ class OptimizingUnitTestHelper { void* aligned_data = GetAllocator()->Alloc(code_item_size); memcpy(aligned_data, &data[0], code_item_size); CHECK_ALIGNED(aligned_data, StandardDexFile::CodeItem::kAlignment); - const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(aligned_data); + const dex::CodeItem* code_item = reinterpret_cast<const dex::CodeItem*>(aligned_data); { ScopedObjectAccess soa(Thread::Current()); |