diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/dex/dex_to_dex_compiler.cc | 4 | ||||
| -rw-r--r-- | compiler/dex/dex_to_dex_decompiler_test.cc | 7 | ||||
| -rw-r--r-- | compiler/driver/compiler_driver.cc | 20 | ||||
| -rw-r--r-- | compiler/exception_test.cc | 37 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm_vixl.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_stream.cc | 101 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_stream.h | 12 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_test.cc | 4 | ||||
| -rw-r--r-- | compiler/verifier_deps_test.cc | 5 |
9 files changed, 61 insertions, 131 deletions
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 68155d844a..fb6a72b1c5 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -635,13 +635,13 @@ void DexToDexCompiler::SetDexFiles(const std::vector<const DexFile*>& dex_files) std::unordered_set<const DexFile::CodeItem*> seen_code_items; for (const DexFile* dex_file : dex_files) { for (ClassAccessor accessor : dex_file->GetClasses()) { - accessor.VisitMethods([&](const ClassAccessor::Method& method) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { const DexFile::CodeItem* code_item = method.GetCodeItem(); // Detect the shared code items. if (!seen_code_items.insert(code_item).second) { shared_code_items_.insert(code_item); } - }); + } } } VLOG(compiler) << "Shared code items " << shared_code_items_.size(); diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc index 082e6091d2..75de238211 100644 --- a/compiler/dex/dex_to_dex_decompiler_test.cc +++ b/compiler/dex/dex_to_dex_decompiler_test.cc @@ -85,10 +85,9 @@ class DexToDexDecompilerTest : public CommonCompilerTest { for (uint32_t i = 0; i < updated_dex_file->NumClassDefs(); ++i) { // Unquicken each method. ClassAccessor accessor(*updated_dex_file, updated_dex_file->GetClassDef(i)); - accessor.VisitMethods([&](const ClassAccessor::Method& method) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { CompiledMethod* compiled_method = compiler_driver_->GetCompiledMethod( - MethodReference(updated_dex_file, - method.GetIndex())); + method.GetReference()); ArrayRef<const uint8_t> table; if (compiled_method != nullptr) { table = compiled_method->GetVmapTable(); @@ -97,7 +96,7 @@ class DexToDexDecompilerTest : public CommonCompilerTest { *accessor.GetCodeItem(method), table, /* decompile_return_instruction */ true); - }); + } } // Make sure after unquickening we go back to the same contents as the original dex file. diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index decb330f3b..16f2d0f2cc 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -790,8 +790,7 @@ static void ResolveConstStrings(CompilerDriver* driver, // FIXME: Make sure that inlining honors this. b/26687569 continue; } - accessor.VisitMethods([&](const ClassAccessor::Method& method) - REQUIRES_SHARED(Locks::mutator_lock_) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { // Resolve const-strings in the code. Done to have deterministic allocation behavior. Right // now this is single-threaded for simplicity. // TODO: Collect the relevant string indices in parallel, then allocate them sequentially @@ -812,7 +811,7 @@ static void ResolveConstStrings(CompilerDriver* driver, break; } } - }); + } } } } @@ -880,10 +879,9 @@ static void InitializeTypeCheckBitstrings(CompilerDriver* driver, } // Direct and virtual methods. - accessor.VisitMethods([&](const ClassAccessor::Method& method) - REQUIRES_SHARED(Locks::mutator_lock_) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { InitializeTypeCheckBitstrings(driver, class_linker, dex_cache, *dex_file, method); - }); + } } } } @@ -1949,9 +1947,9 @@ bool CompilerDriver::FastVerify(jobject jclass_loader, // - We're only going to compile methods that did verify. // - Quickening will not do checkcast ellision. // TODO(ngeoffray): Reconsider this once we refactor compiler filters. - accessor.VisitMethods([&](const ClassAccessor::Method& method) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { verification_results_->CreateVerifiedMethodFor(method.GetReference()); - }); + } } } else if (!compiler_only_verifies) { // Make sure later compilation stages know they should not try to verify @@ -2747,12 +2745,12 @@ static void CompileDexFile(CompilerDriver* driver, // Compile direct and virtual methods. int64_t previous_method_idx = -1; - accessor.VisitMethods([&](const ClassAccessor::Method& method) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { const uint32_t method_idx = method.GetIndex(); if (method_idx == previous_method_idx) { // smali can create dex files with two encoded_methods sharing the same method_idx // http://code.google.com/p/smali/issues/detail?id=119 - return; + continue; } previous_method_idx = method_idx; compile_fn(soa.Self(), @@ -2767,7 +2765,7 @@ static void CompileDexFile(CompilerDriver* driver, dex_to_dex_compilation_level, compilation_enabled, dex_cache); - }); + } }; context.ForAllLambda(0, dex_file.NumClassDefs(), compile, thread_count); } diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index c139fcf1d8..da1db4593b 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -68,47 +68,38 @@ class ExceptionTest : public CommonRuntimeTest { fake_code_.push_back(0x70 | i); } + const uint32_t native_pc_offset = 4u; + CHECK_ALIGNED_PARAM(native_pc_offset, GetInstructionSetInstructionAlignment(kRuntimeISA)); + MallocArenaPool pool; ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stack_maps(&allocator, kRuntimeISA); stack_maps.BeginStackMapEntry(kDexPc, - /* native_pc_offset */ 3u, + native_pc_offset, /* register_mask */ 0u, /* sp_mask */ nullptr, /* num_dex_registers */ 0u, /* inlining_depth */ 0u); stack_maps.EndStackMapEntry(); - size_t stack_maps_size = stack_maps.PrepareForFillIn(); - size_t stack_maps_offset = stack_maps_size + sizeof(OatQuickMethodHeader); + const size_t stack_maps_size = stack_maps.PrepareForFillIn(); + const size_t header_size = sizeof(OatQuickMethodHeader); + const size_t code_alignment = GetInstructionSetAlignment(kRuntimeISA); + const size_t code_offset = RoundUp(stack_maps_size + header_size, code_alignment); - fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size()); + fake_header_code_and_maps_.resize(code_offset + fake_code_.size()); MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size); stack_maps.FillInCodeInfo(stack_maps_region); - OatQuickMethodHeader method_header(stack_maps_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size); - memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header)); + OatQuickMethodHeader method_header(code_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size); + memcpy(&fake_header_code_and_maps_[code_offset - header_size], &method_header, header_size); std::copy(fake_code_.begin(), fake_code_.end(), - fake_header_code_and_maps_.begin() + stack_maps_offset); - - // Align the code. - const size_t alignment = GetInstructionSetAlignment(kRuntimeISA); - fake_header_code_and_maps_.reserve(fake_header_code_and_maps_.size() + alignment); - const void* unaligned_code_ptr = - fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size); - size_t offset = dchecked_integral_cast<size_t>(reinterpret_cast<uintptr_t>(unaligned_code_ptr)); - size_t padding = RoundUp(offset, alignment) - offset; - // Make sure no resizing takes place. - CHECK_GE(fake_header_code_and_maps_.capacity(), fake_header_code_and_maps_.size() + padding); - fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), padding, 0); - const void* code_ptr = reinterpret_cast<const uint8_t*>(unaligned_code_ptr) + padding; - CHECK_EQ(code_ptr, - static_cast<const void*>(fake_header_code_and_maps_.data() + - (fake_header_code_and_maps_.size() - code_size))); + fake_header_code_and_maps_.begin() + code_offset); + const void* code_ptr = fake_header_code_and_maps_.data() + code_offset; if (kRuntimeISA == InstructionSet::kArm) { // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer(). - CHECK_ALIGNED(stack_maps_offset, 2); + CHECK_ALIGNED(code_ptr, 2); } method_f_ = my_klass_->FindClassMethod("f", "()I", kRuntimePointerSize); diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 5287b4b2fa..fecf1ccbfa 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -25,7 +25,7 @@ #include "mirror/array-inl.h" #include "mirror/object_array-inl.h" #include "mirror/reference.h" -#include "mirror/string.h" +#include "mirror/string-inl.h" #include "scoped_thread_state_change-inl.h" #include "thread-current-inl.h" diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index c6e375a1b2..b40ea3768a 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -48,10 +48,6 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream); current_entry_.dex_register_entry.live_dex_registers_mask->ClearAllBits(); } - if (sp_mask != nullptr) { - stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet()); - } - current_dex_register_ = 0; } @@ -217,11 +213,32 @@ size_t StackMapStream::PrepareForFillIn() { PrepareMethodIndices(); // Dedup stack masks. Needs to be done first as it modifies the stack map entry. - size_t stack_mask_bits = stack_mask_max_ + 1; // Need room for max element too. - size_t num_stack_masks = PrepareStackMasks(stack_mask_bits); + BitmapTableBuilder stack_mask_builder(allocator_); + for (StackMapEntry& stack_map : stack_maps_) { + BitVector* mask = stack_map.sp_mask; + size_t num_bits = (mask != nullptr) ? mask->GetNumberOfBits() : 0; + if (num_bits != 0) { + stack_map.stack_mask_index = stack_mask_builder.Dedup(mask->GetRawStorage(), num_bits); + } else { + stack_map.stack_mask_index = StackMap::kNoValue; + } + } // Dedup register masks. Needs to be done first as it modifies the stack map entry. - size_t num_register_masks = PrepareRegisterMasks(); + BitTableBuilder<std::array<uint32_t, RegisterMask::kCount>> register_mask_builder(allocator_); + for (StackMapEntry& stack_map : stack_maps_) { + uint32_t register_mask = stack_map.register_mask; + if (register_mask != 0) { + uint32_t shift = LeastSignificantBit(register_mask); + std::array<uint32_t, RegisterMask::kCount> entry = { + register_mask >> shift, + shift, + }; + stack_map.register_mask_index = register_mask_builder.Dedup(&entry); + } else { + stack_map.register_mask_index = StackMap::kNoValue; + } + } // Write dex register maps. MemoryRegion dex_register_map_region = @@ -301,31 +318,8 @@ size_t StackMapStream::PrepareForFillIn() { stack_map_builder.Encode(&out_, &bit_offset); invoke_info_builder.Encode(&out_, &bit_offset); inline_info_builder.Encode(&out_, &bit_offset); - - // Write register masks table. - BitTableBuilder<uint32_t> register_mask_builder(allocator_); - for (size_t i = 0; i < num_register_masks; ++i) { - register_mask_builder.Add(register_masks_[i]); - } register_mask_builder.Encode(&out_, &bit_offset); - - // Write stack masks table. - EncodeVarintBits(&out_, &bit_offset, stack_mask_bits); - out_.resize(BitsToBytesRoundUp(bit_offset + stack_mask_bits * num_stack_masks)); - BitMemoryRegion stack_mask_region(MemoryRegion(out_.data(), out_.size()), - bit_offset, - stack_mask_bits * num_stack_masks); - if (stack_mask_bits > 0) { - for (size_t i = 0; i < num_stack_masks; ++i) { - size_t stack_mask_bytes = BitsToBytesRoundUp(stack_mask_bits); - BitMemoryRegion src(MemoryRegion(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes)); - BitMemoryRegion dst = stack_mask_region.Subregion(i * stack_mask_bits, stack_mask_bits); - for (size_t bit_index = 0; bit_index < stack_mask_bits; bit_index += BitSizeOf<uint32_t>()) { - size_t num_bits = std::min<size_t>(stack_mask_bits - bit_index, BitSizeOf<uint32_t>()); - dst.StoreBits(bit_index, src.LoadBits(bit_index, num_bits), num_bits); - } - } - } + stack_mask_builder.Encode(&out_, &bit_offset); return UnsignedLeb128Size(out_.size()) + out_.size(); } @@ -448,17 +442,6 @@ void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info, } } -size_t StackMapStream::PrepareRegisterMasks() { - register_masks_.resize(stack_maps_.size(), 0u); - ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream)); - for (StackMapEntry& stack_map : stack_maps_) { - const size_t index = dedupe.size(); - stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second; - register_masks_[index] = stack_map.register_mask; - } - return dedupe.size(); -} - void StackMapStream::PrepareMethodIndices() { CHECK(method_indices_.empty()); method_indices_.resize(stack_maps_.size() + inline_infos_.size()); @@ -481,35 +464,10 @@ void StackMapStream::PrepareMethodIndices() { method_indices_.resize(dedupe.size()); } - -size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) { - // Preallocate memory since we do not want it to move (the dedup map will point into it). - const size_t byte_entry_size = RoundUp(entry_size_in_bits, kBitsPerByte) / kBitsPerByte; - stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u); - // For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later - // when copying out from stack_masks_. - ScopedArenaUnorderedMap<MemoryRegion, - size_t, - FNVHash<MemoryRegion>, - MemoryRegion::ContentEquals> dedup( - stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream)); - for (StackMapEntry& stack_map : stack_maps_) { - size_t index = dedup.size(); - MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size); - BitMemoryRegion stack_mask_bits(stack_mask); - for (size_t i = 0; i < entry_size_in_bits; i++) { - stack_mask_bits.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i)); - } - stack_map.stack_mask_index = dedup.emplace(stack_mask, index).first->second; - } - return dedup.size(); -} - // Check that all StackMapStream inputs are correctly encoded by trying to read them back. void StackMapStream::CheckCodeInfo(MemoryRegion region) const { CodeInfo code_info(region); DCHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size()); - DCHECK_EQ(code_info.GetNumberOfStackMaskBits(), static_cast<uint32_t>(stack_mask_max_ + 1)); DCHECK_EQ(code_info.GetNumberOfLocationCatalogEntries(), location_catalog_entries_.size()); size_t invoke_info_index = 0; for (size_t s = 0; s < stack_maps_.size(); ++s) { @@ -522,18 +480,15 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const { DCHECK_EQ(stack_map.GetDexPc(), entry.dex_pc); DCHECK_EQ(stack_map.GetRegisterMaskIndex(), entry.register_mask_index); DCHECK_EQ(code_info.GetRegisterMaskOf(stack_map), entry.register_mask); - const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits(); DCHECK_EQ(stack_map.GetStackMaskIndex(), entry.stack_mask_index); BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map); if (entry.sp_mask != nullptr) { DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits()); - for (size_t b = 0; b < num_stack_mask_bits; b++) { - DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b)); + for (size_t b = 0; b < stack_mask.size_in_bits(); b++) { + DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b)) << b; } } else { - for (size_t b = 0; b < num_stack_mask_bits; b++) { - DCHECK_EQ(stack_mask.LoadBit(b), 0u); - } + DCHECK_EQ(stack_mask.size_in_bits(), 0u); } if (entry.dex_method_index != dex::kDexNoIndex) { InvokeInfo invoke_info = code_info.GetInvokeInfo(invoke_info_index); diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index ea97cf6530..19863d882a 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -68,11 +68,8 @@ class StackMapStream : public ValueObject { location_catalog_entries_indices_(allocator->Adapter(kArenaAllocStackMapStream)), dex_register_locations_(allocator->Adapter(kArenaAllocStackMapStream)), inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)), - stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)), - register_masks_(allocator->Adapter(kArenaAllocStackMapStream)), method_indices_(allocator->Adapter(kArenaAllocStackMapStream)), dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)), - stack_mask_max_(-1), out_(allocator->Adapter(kArenaAllocStackMapStream)), dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter(kArenaAllocStackMapStream)), @@ -171,12 +168,6 @@ class StackMapStream : public ValueObject { private: size_t ComputeDexRegisterLocationCatalogSize() const; - // Returns the number of unique stack masks. - size_t PrepareStackMasks(size_t entry_size_in_bits); - - // Returns the number of unique register masks. - size_t PrepareRegisterMasks(); - // Prepare and deduplicate method indices. void PrepareMethodIndices(); @@ -217,11 +208,8 @@ class StackMapStream : public ValueObject { // A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`. ScopedArenaVector<size_t> dex_register_locations_; ScopedArenaVector<InlineInfoEntry> inline_infos_; - ScopedArenaVector<uint8_t> stack_masks_; - ScopedArenaVector<uint32_t> register_masks_; ScopedArenaVector<uint32_t> method_indices_; ScopedArenaVector<DexRegisterMapEntry> dex_register_entries_; - int stack_mask_max_; ScopedArenaVector<uint8_t> out_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 9db7588b3a..c372bb9b22 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -32,10 +32,10 @@ static bool CheckStackMask( const StackMap& stack_map, const BitVector& bit_vector) { BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map); - if (bit_vector.GetNumberOfBits() > code_info.GetNumberOfStackMaskBits()) { + if (bit_vector.GetNumberOfBits() > stack_mask.size_in_bits()) { return false; } - for (size_t i = 0; i < code_info.GetNumberOfStackMaskBits(); ++i) { + for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) { if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) { return false; } diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc index 103862beff..c0892ff466 100644 --- a/compiler/verifier_deps_test.cc +++ b/compiler/verifier_deps_test.cc @@ -155,8 +155,7 @@ class VerifierDepsTest : public CommonCompilerTest { bool has_failures = true; bool found_method = false; - accessor.VisitMethods([&](const ClassAccessor::Method& method) - REQUIRES_SHARED(Locks::mutator_lock_) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { ArtMethod* resolved_method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( method.GetIndex(), @@ -186,7 +185,7 @@ class VerifierDepsTest : public CommonCompilerTest { has_failures = verifier.HasFailures(); found_method = true; } - }); + } CHECK(found_method) << "Expected to find method " << method_name; return !has_failures; } |