diff options
author | 2016-03-31 12:02:28 +0100 | |
---|---|---|
committer | 2016-04-04 17:50:20 +0100 | |
commit | 9d07e3d128ccfa0ef7670feadd424a825e447d1d (patch) | |
tree | dfb677fd75f0f297fef9bc49311cf1d22c770f56 | |
parent | eb98c0ded592cfca8187c744393c82efd1020b2a (diff) |
Clean up OatQuickMethodHeader after Quick removal.
This reduces the size of the pre-header by 8 bytes, reducing
oat file size and mmapped .text section size. The memory
needed to store a CompiledMethod by dex2oat is also reduced,
for 32-bit dex2oat by 8B and for 64-bit dex2oat by 16B. The
aosp_flounder-userdebug 32-bit and 64-bit boot.oat are each
about 1.1MiB smaller.
Disable the broken StubTest.IMT, b/27991555 .
Change-Id: I05fe45c28c8ffb7a0fa8b1117b969786748b1039
38 files changed, 212 insertions, 1409 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index f67da3ff4c..8309bd854f 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -195,7 +195,6 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \ runtime/entrypoints/math_entrypoints_test.cc \ runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc \ runtime/entrypoints_order_test.cc \ - runtime/exception_test.cc \ runtime/gc/accounting/card_table_test.cc \ runtime/gc/accounting/mod_union_table_test.cc \ runtime/gc/accounting/space_bitmap_test.cc \ @@ -251,6 +250,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \ compiler/driver/compiled_method_storage_test.cc \ compiler/driver/compiler_driver_test.cc \ compiler/elf_writer_test.cc \ + compiler/exception_test.cc \ compiler/image_test.cc \ compiler/jni/jni_compiler_test.cc \ compiler/linker/multi_oat_relative_patcher_test.cc \ diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 6483ef63b1..0001b672be 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -59,36 +59,20 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) { ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable(); uint32_t vmap_table_offset = vmap_table.empty() ? 0u : sizeof(OatQuickMethodHeader) + vmap_table.size(); - ArrayRef<const uint8_t> mapping_table = compiled_method->GetMappingTable(); - bool mapping_table_used = !mapping_table.empty(); - size_t mapping_table_size = mapping_table.size(); - uint32_t mapping_table_offset = !mapping_table_used ? 0u - : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size; - ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap(); - bool gc_map_used = !gc_map.empty(); - size_t gc_map_size = gc_map.size(); - uint32_t gc_map_offset = !gc_map_used ? 0u - : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size + gc_map_size; - OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset, + OatQuickMethodHeader method_header(vmap_table_offset, compiled_method->GetFrameSizeInBytes(), compiled_method->GetCoreSpillMask(), - compiled_method->GetFpSpillMask(), code_size); + compiled_method->GetFpSpillMask(), + code_size); header_code_and_maps_chunks_.push_back(std::vector<uint8_t>()); std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back(); const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet()); - const size_t size = - gc_map_size + mapping_table_size + vmap_table.size() + sizeof(method_header) + code_size; + const size_t size = vmap_table.size() + sizeof(method_header) + code_size; chunk->reserve(size + max_padding); chunk->resize(sizeof(method_header)); memcpy(&(*chunk)[0], &method_header, sizeof(method_header)); chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end()); - if (mapping_table_used) { - chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end()); - } - if (gc_map_used) { - chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end()); - } chunk->insert(chunk->end(), code.begin(), code.end()); CHECK_EQ(chunk->size(), size); const void* unaligned_code_ptr = chunk->data() + (size - code_size); diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc index 9551d2298b..f06d90c81c 100644 --- a/compiler/compiled_method.cc +++ b/compiler/compiled_method.cc @@ -106,9 +106,7 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef<const SrcMapElem>& src_mapping_table, - const ArrayRef<const uint8_t>& mapping_table, const ArrayRef<const uint8_t>& vmap_table, - const ArrayRef<const uint8_t>& native_gc_map, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches) : CompiledCode(driver, instruction_set, quick_code), @@ -116,9 +114,7 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver, fp_spill_mask_(fp_spill_mask), src_mapping_table_( driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)), - mapping_table_(driver->GetCompiledMethodStorage()->DeduplicateMappingTable(mapping_table)), vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)), - gc_map_(driver->GetCompiledMethodStorage()->DeduplicateGCMap(native_gc_map)), cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)), patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) { } @@ -131,15 +127,20 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod( const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef<const SrcMapElem>& src_mapping_table, - const ArrayRef<const uint8_t>& mapping_table, const ArrayRef<const uint8_t>& vmap_table, - const ArrayRef<const uint8_t>& native_gc_map, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches) { SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator()); CompiledMethod* ret = alloc.allocate(1); - alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask, - fp_spill_mask, src_mapping_table, mapping_table, vmap_table, native_gc_map, + alloc.construct(ret, + driver, + instruction_set, + quick_code, + frame_size_in_bytes, + core_spill_mask, + fp_spill_mask, + src_mapping_table, + vmap_table, cfi_info, patches); return ret; } @@ -154,9 +155,7 @@ CompiledMethod::~CompiledMethod() { CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage(); storage->ReleaseLinkerPatches(patches_); storage->ReleaseCFIInfo(cfi_info_); - storage->ReleaseGCMap(gc_map_); storage->ReleaseVMapTable(vmap_table_); - storage->ReleaseMappingTable(mapping_table_); storage->ReleaseSrcMappingTable(src_mapping_table_); } diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h index 70161eb221..9479ff38be 100644 --- a/compiler/compiled_method.h +++ b/compiler/compiled_method.h @@ -377,9 +377,7 @@ class CompiledMethod FINAL : public CompiledCode { const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef<const SrcMapElem>& src_mapping_table, - const ArrayRef<const uint8_t>& mapping_table, const ArrayRef<const uint8_t>& vmap_table, - const ArrayRef<const uint8_t>& native_gc_map, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches); @@ -393,9 +391,7 @@ class CompiledMethod FINAL : public CompiledCode { const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef<const SrcMapElem>& src_mapping_table, - const ArrayRef<const uint8_t>& mapping_table, const ArrayRef<const uint8_t>& vmap_table, - const ArrayRef<const uint8_t>& native_gc_map, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches); @@ -417,18 +413,10 @@ class CompiledMethod FINAL : public CompiledCode { return GetArray(src_mapping_table_); } - ArrayRef<const uint8_t> GetMappingTable() const { - return GetArray(mapping_table_); - } - ArrayRef<const uint8_t> GetVmapTable() const { return GetArray(vmap_table_); } - ArrayRef<const uint8_t> GetGcMap() const { - return GetArray(gc_map_); - } - ArrayRef<const uint8_t> GetCFIInfo() const { return GetArray(cfi_info_); } @@ -446,14 +434,8 @@ class CompiledMethod FINAL : public CompiledCode { const uint32_t fp_spill_mask_; // For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset. const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_; - // For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to - // native PC offset. Size prefixed. - const LengthPrefixedArray<uint8_t>* const mapping_table_; // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed. const LengthPrefixedArray<uint8_t>* const vmap_table_; - // For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers - // are live. - const LengthPrefixedArray<uint8_t>* const gc_map_; // For quick code, a FDE entry for the debug_frame section. const LengthPrefixedArray<uint8_t>* const cfi_info_; // For quick code, linker patches needed by the method. diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index efddeba6a9..3ce786e008 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -363,9 +363,7 @@ CompiledMethod* ArtCompileDEX( 0, 0, ArrayRef<const SrcMapElem>(), // src_mapping_table - ArrayRef<const uint8_t>(), // mapping_table ArrayRef<const uint8_t>(builder.GetData()), // vmap_table - ArrayRef<const uint8_t>(), // gc_map ArrayRef<const uint8_t>(), // cfi data ArrayRef<const LinkerPatch>()); } diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc index 510613ecf4..a0a8f81c1f 100644 --- a/compiler/driver/compiled_method_storage.cc +++ b/compiler/driver/compiled_method_storage.cc @@ -174,11 +174,8 @@ CompiledMethodStorage::CompiledMethodStorage(int swap_fd) dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_src_mapping_table_("dedupe source mapping table", LengthPrefixedArrayAlloc<SrcMapElem>(swap_space_.get())), - dedupe_mapping_table_("dedupe mapping table", - LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_vmap_table_("dedupe vmap table", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), - dedupe_gc_map_("dedupe gc map", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_linker_patches_("dedupe cfi info", LengthPrefixedArrayAlloc<LinkerPatch>(swap_space_.get())) { @@ -196,9 +193,7 @@ void CompiledMethodStorage::DumpMemoryUsage(std::ostream& os, bool extended) con if (extended) { Thread* self = Thread::Current(); os << "\nCode dedupe: " << dedupe_code_.DumpStats(self); - os << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats(self); os << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats(self); - os << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats(self); os << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats(self); } } @@ -221,15 +216,6 @@ void CompiledMethodStorage::ReleaseSrcMappingTable(const LengthPrefixedArray<Src ReleaseArrayIfNotDeduplicated(src_map); } -const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMappingTable( - const ArrayRef<const uint8_t>& table) { - return AllocateOrDeduplicateArray(table, &dedupe_mapping_table_); -} - -void CompiledMethodStorage::ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table) { - ReleaseArrayIfNotDeduplicated(table); -} - const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable( const ArrayRef<const uint8_t>& table) { return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_); @@ -239,15 +225,6 @@ void CompiledMethodStorage::ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* ReleaseArrayIfNotDeduplicated(table); } -const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateGCMap( - const ArrayRef<const uint8_t>& gc_map) { - return AllocateOrDeduplicateArray(gc_map, &dedupe_gc_map_); -} - -void CompiledMethodStorage::ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map) { - ReleaseArrayIfNotDeduplicated(gc_map); -} - const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateCFIInfo( const ArrayRef<const uint8_t>& cfi_info) { return AllocateOrDeduplicateArray(cfi_info, &dedupe_cfi_info_); diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h index d6961a0876..8674abf815 100644 --- a/compiler/driver/compiled_method_storage.h +++ b/compiler/driver/compiled_method_storage.h @@ -56,15 +56,9 @@ class CompiledMethodStorage { const ArrayRef<const SrcMapElem>& src_map); void ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map); - const LengthPrefixedArray<uint8_t>* DeduplicateMappingTable(const ArrayRef<const uint8_t>& table); - void ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table); - const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table); void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table); - const LengthPrefixedArray<uint8_t>* DeduplicateGCMap(const ArrayRef<const uint8_t>& gc_map); - void ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map); - const LengthPrefixedArray<uint8_t>* DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info); void ReleaseCFIInfo(const LengthPrefixedArray<uint8_t>* cfi_info); @@ -103,9 +97,7 @@ class CompiledMethodStorage { ArrayDedupeSet<uint8_t> dedupe_code_; ArrayDedupeSet<SrcMapElem> dedupe_src_mapping_table_; - ArrayDedupeSet<uint8_t> dedupe_mapping_table_; ArrayDedupeSet<uint8_t> dedupe_vmap_table_; - ArrayDedupeSet<uint8_t> dedupe_gc_map_; ArrayDedupeSet<uint8_t> dedupe_cfi_info_; ArrayDedupeSet<LinkerPatch> dedupe_linker_patches_; diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc index 0695cb56b3..9e0c22c68c 100644 --- a/compiler/driver/compiled_method_storage_test.cc +++ b/compiler/driver/compiled_method_storage_test.cc @@ -61,24 +61,12 @@ TEST(CompiledMethodStorage, Deduplicate) { ArrayRef<const SrcMapElem>(raw_src_map1), ArrayRef<const SrcMapElem>(raw_src_map2), }; - const uint8_t raw_mapping_table1[] = { 5, 6, 7 }; - const uint8_t raw_mapping_table2[] = { 7, 6, 5, 4 }; - ArrayRef<const uint8_t> mapping_table[] = { - ArrayRef<const uint8_t>(raw_mapping_table1), - ArrayRef<const uint8_t>(raw_mapping_table2), - }; const uint8_t raw_vmap_table1[] = { 2, 4, 6 }; const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 }; ArrayRef<const uint8_t> vmap_table[] = { ArrayRef<const uint8_t>(raw_vmap_table1), ArrayRef<const uint8_t>(raw_vmap_table2), }; - const uint8_t raw_gc_map1[] = { 9, 8, 7 }; - const uint8_t raw_gc_map2[] = { 6, 7, 8, 9 }; - ArrayRef<const uint8_t> gc_map[] = { - ArrayRef<const uint8_t>(raw_gc_map1), - ArrayRef<const uint8_t>(raw_gc_map2), - }; const uint8_t raw_cfi_info1[] = { 1, 3, 5 }; const uint8_t raw_cfi_info2[] = { 8, 6, 4, 2 }; ArrayRef<const uint8_t> cfi_info[] = { @@ -102,49 +90,37 @@ TEST(CompiledMethodStorage, Deduplicate) { compiled_methods.reserve(1u << 7); for (auto&& c : code) { for (auto&& s : src_map) { - for (auto&& m : mapping_table) { - for (auto&& v : vmap_table) { - for (auto&& g : gc_map) { - for (auto&& f : cfi_info) { - for (auto&& p : patches) { - compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod( - &driver, kNone, c, 0u, 0u, 0u, s, m, v, g, f, p)); - } - } + for (auto&& v : vmap_table) { + for (auto&& f : cfi_info) { + for (auto&& p : patches) { + compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod( + &driver, kNone, c, 0u, 0u, 0u, s, v, f, p)); } } } } } - constexpr size_t code_bit = 1u << 6; - constexpr size_t src_map_bit = 1u << 5; - constexpr size_t mapping_table_bit = 1u << 4; - constexpr size_t vmap_table_bit = 1u << 3; - constexpr size_t gc_map_bit = 1u << 2; + constexpr size_t code_bit = 1u << 4; + constexpr size_t src_map_bit = 1u << 3; + constexpr size_t vmap_table_bit = 1u << 2; constexpr size_t cfi_info_bit = 1u << 1; constexpr size_t patches_bit = 1u << 0; - CHECK_EQ(compiled_methods.size(), 1u << 7); + CHECK_EQ(compiled_methods.size(), 1u << 5); for (size_t i = 0; i != compiled_methods.size(); ++i) { for (size_t j = 0; j != compiled_methods.size(); ++j) { CompiledMethod* lhs = compiled_methods[i]; CompiledMethod* rhs = compiled_methods[j]; bool same_code = ((i ^ j) & code_bit) == 0u; bool same_src_map = ((i ^ j) & src_map_bit) == 0u; - bool same_mapping_table = ((i ^ j) & mapping_table_bit) == 0u; bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u; - bool same_gc_map = ((i ^ j) & gc_map_bit) == 0u; bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u; bool same_patches = ((i ^ j) & patches_bit) == 0u; ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data()) << i << " " << j; ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data()) << i << " " << j; - ASSERT_EQ(same_mapping_table, lhs->GetMappingTable().data() == rhs->GetMappingTable().data()) - << i << " " << j; ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data()) << i << " " << j; - ASSERT_EQ(same_gc_map, lhs->GetGcMap().data() == rhs->GetGcMap().data()) - << i << " " << j; ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data()) << i << " " << j; ASSERT_EQ(same_patches, lhs->GetPatches().data() == rhs->GetPatches().data()) diff --git a/runtime/exception_test.cc b/compiler/exception_test.cc index 18ccd082ec..38ac052830 100644 --- a/runtime/exception_test.cc +++ b/compiler/exception_test.cc @@ -16,6 +16,7 @@ #include <memory> +#include "base/arena_allocator.h" #include "class_linker.h" #include "common_runtime_test.h" #include "dex_file.h" @@ -27,11 +28,11 @@ #include "mirror/object-inl.h" #include "mirror/stack_trace_element.h" #include "oat_quick_method_header.h" +#include "optimizing/stack_map_stream.h" #include "runtime.h" #include "scoped_thread_state_change.h" #include "handle_scope-inl.h" #include "thread.h" -#include "vmap_table.h" namespace art { @@ -57,40 +58,27 @@ class ExceptionTest : public CommonRuntimeTest { fake_code_.push_back(0x70 | i); } - fake_mapping_data_.PushBackUnsigned(4); // first element is count - fake_mapping_data_.PushBackUnsigned(4); // total (non-length) elements - fake_mapping_data_.PushBackUnsigned(2); // count of pc to dex elements - // --- pc to dex table - fake_mapping_data_.PushBackUnsigned(3 - 0); // offset 3 - fake_mapping_data_.PushBackSigned(3 - 0); // maps to dex offset 3 - // --- dex to pc table - fake_mapping_data_.PushBackUnsigned(3 - 0); // offset 3 - fake_mapping_data_.PushBackSigned(3 - 0); // maps to dex offset 3 - - fake_vmap_table_data_.PushBackUnsigned(0 + VmapTable::kEntryAdjustment); - - fake_gc_map_.push_back(0); // 0 bytes to encode references and native pc offsets. - fake_gc_map_.push_back(0); - fake_gc_map_.push_back(0); // 0 entries. - fake_gc_map_.push_back(0); - - const std::vector<uint8_t>& fake_vmap_table_data = fake_vmap_table_data_.GetData(); - const std::vector<uint8_t>& fake_mapping_data = fake_mapping_data_.GetData(); - uint32_t vmap_table_offset = sizeof(OatQuickMethodHeader) + fake_vmap_table_data.size(); - uint32_t mapping_table_offset = vmap_table_offset + fake_mapping_data.size(); - uint32_t gc_map_offset = mapping_table_offset + fake_gc_map_.size(); - OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset, - 4 * sizeof(void*), 0u, 0u, code_size); - fake_header_code_and_maps_.resize(sizeof(method_header)); - memcpy(&fake_header_code_and_maps_[0], &method_header, sizeof(method_header)); - fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), - fake_vmap_table_data.begin(), fake_vmap_table_data.end()); - fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), - fake_mapping_data.begin(), fake_mapping_data.end()); - fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), - fake_gc_map_.begin(), fake_gc_map_.end()); - fake_header_code_and_maps_.insert(fake_header_code_and_maps_.end(), - fake_code_.begin(), fake_code_.end()); + ArenaPool pool; + ArenaAllocator allocator(&pool); + StackMapStream stack_maps(&allocator); + stack_maps.BeginStackMapEntry(/* dex_pc */ 3u, + /* native_pc_offset */ 3u, + /* register_mask */ 0u, + /* sp_mask */ nullptr, + /* num_dex_registers */ 0u, + /* inlining_depth */ 0u); + stack_maps.EndStackMapEntry(); + size_t stack_maps_size = stack_maps.PrepareForFillIn(); + size_t stack_maps_offset = stack_maps_size + sizeof(OatQuickMethodHeader); + + fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size()); + MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size); + stack_maps.FillIn(stack_maps_region); + OatQuickMethodHeader method_header(stack_maps_offset, 4 * sizeof(void*), 0u, 0u, code_size); + memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header)); + std::copy(fake_code_.begin(), + fake_code_.end(), + fake_header_code_and_maps_.begin() + stack_maps_offset); // Align the code. const size_t alignment = GetInstructionSetAlignment(kRuntimeISA); @@ -109,7 +97,7 @@ class ExceptionTest : public CommonRuntimeTest { if (kRuntimeISA == kArm) { // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer(). - CHECK_ALIGNED(mapping_table_offset, 2); + CHECK_ALIGNED(stack_maps_offset, 2); } method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*)); @@ -124,9 +112,6 @@ class ExceptionTest : public CommonRuntimeTest { const DexFile* dex_; std::vector<uint8_t> fake_code_; - Leb128EncodingVector<> fake_mapping_data_; - Leb128EncodingVector<> fake_vmap_table_data_; - std::vector<uint8_t> fake_gc_map_; std::vector<uint8_t> fake_header_code_and_maps_; ArtMethod* method_f_; diff --git a/compiler/gc_map_builder.h b/compiler/gc_map_builder.h deleted file mode 100644 index 2ef7f1a659..0000000000 --- a/compiler/gc_map_builder.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (C) 2014 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_GC_MAP_BUILDER_H_ -#define ART_COMPILER_GC_MAP_BUILDER_H_ - -#include <vector> - -#include "base/bit_utils.h" -#include "gc_map.h" - -namespace art { - -class GcMapBuilder { - public: - template <typename Vector> - GcMapBuilder(Vector* table, size_t entries, uint32_t max_native_offset, - size_t references_width) - : entries_(entries), references_width_(entries != 0u ? references_width : 0u), - native_offset_width_(entries != 0 && max_native_offset != 0 - ? sizeof(max_native_offset) - CLZ(max_native_offset) / 8u - : 0u), - in_use_(entries) { - static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type"); - - // Resize table and set up header. - table->resize((EntryWidth() * entries) + sizeof(uint32_t)); - table_ = table->data(); - CHECK_LT(native_offset_width_, 1U << 3); - (*table)[0] = native_offset_width_ & 7; - CHECK_LT(references_width_, 1U << 13); - (*table)[0] |= (references_width_ << 3) & 0xFF; - (*table)[1] = (references_width_ >> 5) & 0xFF; - CHECK_LT(entries, 1U << 16); - (*table)[2] = entries & 0xFF; - (*table)[3] = (entries >> 8) & 0xFF; - } - - void AddEntry(uint32_t native_offset, const uint8_t* references) { - size_t table_index = TableIndex(native_offset); - while (in_use_[table_index]) { - table_index = (table_index + 1) % entries_; - } - in_use_[table_index] = true; - SetCodeOffset(table_index, native_offset); - DCHECK_EQ(native_offset, GetCodeOffset(table_index)); - SetReferences(table_index, references); - } - - private: - size_t TableIndex(uint32_t native_offset) { - return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_; - } - - uint32_t GetCodeOffset(size_t table_index) { - uint32_t native_offset = 0; - size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); - for (size_t i = 0; i < native_offset_width_; i++) { - native_offset |= table_[table_offset + i] << (i * 8); - } - return native_offset; - } - - void SetCodeOffset(size_t table_index, uint32_t native_offset) { - size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); - for (size_t i = 0; i < native_offset_width_; i++) { - table_[table_offset + i] = (native_offset >> (i * 8)) & 0xFF; - } - } - - void SetReferences(size_t table_index, const uint8_t* references) { - size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); - memcpy(&table_[table_offset + native_offset_width_], references, references_width_); - } - - size_t EntryWidth() const { - return native_offset_width_ + references_width_; - } - - // Number of entries in the table. - const size_t entries_; - // Number of bytes used to encode the reference bitmap. - const size_t references_width_; - // Number of bytes used to encode a native offset. - const size_t native_offset_width_; - // Entries that are in use. - std::vector<bool> in_use_; - // The table we're building. - uint8_t* table_; -}; - -} // namespace art - -#endif // ART_COMPILER_GC_MAP_BUILDER_H_ diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index e92046057c..b8cda24c78 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -488,9 +488,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, main_jni_conv->CoreSpillMask(), main_jni_conv->FpSpillMask(), ArrayRef<const SrcMapElem>(), - ArrayRef<const uint8_t>(), // mapping_table. ArrayRef<const uint8_t>(), // vmap_table. - ArrayRef<const uint8_t>(), // native_gc_map. ArrayRef<const uint8_t>(*jni_asm->cfi().data()), ArrayRef<const LinkerPatch>()); } diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index bf61ea0570..c07de79984 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -85,9 +85,15 @@ class RelativePatcherTest : public testing::Test { const ArrayRef<const LinkerPatch>& patches) { compiled_method_refs_.push_back(method_ref); compiled_methods_.emplace_back(new CompiledMethod( - &driver_, instruction_set_, code, - 0u, 0u, 0u, ArrayRef<const SrcMapElem>(), ArrayRef<const uint8_t>(), - ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(), + &driver_, + instruction_set_, + code, + /* frame_size_in_bytes */ 0u, + /* core_spill_mask */ 0u, + /* fp_spill_mask */ 0u, + /* src_mapping_table */ ArrayRef<const SrcMapElem>(), + /* vmap_table */ ArrayRef<const uint8_t>(), + /* cfi_info */ ArrayRef<const uint8_t>(), patches)); } diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index eaf0e179a5..73b16d5b46 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -442,7 +442,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) { // it is time to update OatHeader::kOatVersion EXPECT_EQ(72U, sizeof(OatHeader)); EXPECT_EQ(4U, sizeof(OatMethodOffsets)); - EXPECT_EQ(28U, sizeof(OatQuickMethodHeader)); + EXPECT_EQ(20U, sizeof(OatQuickMethodHeader)); EXPECT_EQ(132 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints)); } diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 3a67b1ec2a..cf1c1146e9 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -275,9 +275,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings) size_code_alignment_(0), size_relative_call_thunks_(0), size_misc_thunks_(0), - size_mapping_table_(0), size_vmap_table_(0), - size_gc_map_(0), size_oat_dex_file_location_size_(0), size_oat_dex_file_location_data_(0), size_oat_dex_file_location_checksum_(0), @@ -498,72 +496,6 @@ void OatWriter::PrepareLayout(const CompilerDriver* compiler, OatWriter::~OatWriter() { } -struct OatWriter::GcMapDataAccess { - static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE { - return compiled_method->GetGcMap(); - } - - static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE { - uint32_t offset = oat_class->method_headers_[method_offsets_index].gc_map_offset_; - return offset == 0u ? 0u : - (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset; - } - - static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset) - ALWAYS_INLINE { - oat_class->method_headers_[method_offsets_index].gc_map_offset_ = - (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset; - } - - static const char* Name() { - return "GC map"; - } -}; - -struct OatWriter::MappingTableDataAccess { - static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE { - return compiled_method->GetMappingTable(); - } - - static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE { - uint32_t offset = oat_class->method_headers_[method_offsets_index].mapping_table_offset_; - return offset == 0u ? 0u : - (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset; - } - - static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset) - ALWAYS_INLINE { - oat_class->method_headers_[method_offsets_index].mapping_table_offset_ = - (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset; - } - - static const char* Name() { - return "mapping table"; - } -}; - -struct OatWriter::VmapTableDataAccess { - static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE { - return compiled_method->GetVmapTable(); - } - - static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE { - uint32_t offset = oat_class->method_headers_[method_offsets_index].vmap_table_offset_; - return offset == 0u ? 0u : - (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset; - } - - static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset) - ALWAYS_INLINE { - oat_class->method_headers_[method_offsets_index].vmap_table_offset_ = - (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset; - } - - static const char* Name() { - return "vmap table"; - } -}; - class OatWriter::DexMethodVisitor { public: DexMethodVisitor(OatWriter* writer, size_t offset) @@ -726,26 +658,24 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { uint32_t thumb_offset = compiled_method->CodeDelta(); // Deduplicate code arrays if we are not producing debuggable code. - bool deduped = false; + bool deduped = true; MethodReference method_ref(dex_file_, it.GetMemberIndex()); if (debuggable_) { quick_code_offset = writer_->relative_patcher_->GetOffset(method_ref); if (quick_code_offset != 0u) { // Duplicate methods, we want the same code for both of them so that the oat writer puts // the same code in both ArtMethods so that we do not get different oat code at runtime. - deduped = true; } else { quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset); + deduped = false; } } else { - auto lb = dedupe_map_.lower_bound(compiled_method); - if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) { - quick_code_offset = lb->second; - deduped = true; - } else { - quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset); - dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset); - } + quick_code_offset = dedupe_map_.GetOrCreate( + compiled_method, + [this, &deduped, compiled_method, &it, thumb_offset]() { + deduped = false; + return NewQuickCodeOffset(compiled_method, it, thumb_offset); + }); } if (code_size != 0) { @@ -763,33 +693,25 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { // Update quick method header. DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size()); OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_]; - uint32_t mapping_table_offset = method_header->mapping_table_offset_; uint32_t vmap_table_offset = method_header->vmap_table_offset_; // If we don't have quick code, then we must have a vmap, as that is how the dex2dex // compiler records its transformations. DCHECK(!quick_code.empty() || vmap_table_offset != 0); - uint32_t gc_map_offset = method_header->gc_map_offset_; // The code offset was 0 when the mapping/vmap table offset was set, so it's set // to 0-offset and we need to adjust it by code_offset. uint32_t code_offset = quick_code_offset - thumb_offset; - if (mapping_table_offset != 0u && code_offset != 0u) { - mapping_table_offset += code_offset; - DCHECK_LT(mapping_table_offset, code_offset) << "Overflow in oat offsets"; - } if (vmap_table_offset != 0u && code_offset != 0u) { vmap_table_offset += code_offset; DCHECK_LT(vmap_table_offset, code_offset) << "Overflow in oat offsets"; } - if (gc_map_offset != 0u && code_offset != 0u) { - gc_map_offset += code_offset; - DCHECK_LT(gc_map_offset, code_offset) << "Overflow in oat offsets"; - } uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes(); uint32_t core_spill_mask = compiled_method->GetCoreSpillMask(); uint32_t fp_spill_mask = compiled_method->GetFpSpillMask(); - *method_header = OatQuickMethodHeader(mapping_table_offset, vmap_table_offset, - gc_map_offset, frame_size_in_bytes, core_spill_mask, - fp_spill_mask, code_size); + *method_header = OatQuickMethodHeader(vmap_table_offset, + frame_size_in_bytes, + core_spill_mask, + fp_spill_mask, + code_size); if (!deduped) { // Update offsets. (Checksum is updated when writing.) @@ -844,15 +766,6 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { } else { status = mirror::Class::kStatusNotReady; } - ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap(); - if (!gc_map.empty()) { - size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]); - bool is_native = it.MemberIsNative(); - CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified) - << gc_map_size << " " << (is_native ? "true" : "false") << " " - << (status < mirror::Class::kStatusVerified) << " " << status << " " - << PrettyMethod(it.GetMemberIndex(), *dex_file_); - } } DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size()); @@ -872,15 +785,9 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { return lhs->GetQuickCode().data() < rhs->GetQuickCode().data(); } // If the code is the same, all other fields are likely to be the same as well. - if (UNLIKELY(lhs->GetMappingTable().data() != rhs->GetMappingTable().data())) { - return lhs->GetMappingTable().data() < rhs->GetMappingTable().data(); - } if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) { return lhs->GetVmapTable().data() < rhs->GetVmapTable().data(); } - if (UNLIKELY(lhs->GetGcMap().data() != rhs->GetGcMap().data())) { - return lhs->GetGcMap().data() < rhs->GetGcMap().data(); - } if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) { return lhs->GetPatches().data() < rhs->GetPatches().data(); } @@ -907,7 +814,6 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { const bool debuggable_; }; -template <typename DataAccess> class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor { public: InitMapMethodVisitor(OatWriter* writer, size_t offset) @@ -921,19 +827,21 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor { if (compiled_method != nullptr) { DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size()); - DCHECK_EQ(DataAccess::GetOffset(oat_class, method_offsets_index_), 0u); + DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u); - ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method); + ArrayRef<const uint8_t> map = compiled_method->GetVmapTable(); uint32_t map_size = map.size() * sizeof(map[0]); if (map_size != 0u) { - auto lb = dedupe_map_.lower_bound(map.data()); - if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(map.data(), lb->first)) { - DataAccess::SetOffset(oat_class, method_offsets_index_, lb->second); - } else { - DataAccess::SetOffset(oat_class, method_offsets_index_, offset_); - dedupe_map_.PutBefore(lb, map.data(), offset_); - offset_ += map_size; - } + size_t offset = dedupe_map_.GetOrCreate( + map.data(), + [this, map_size]() { + uint32_t new_offset = offset_; + offset_ += map_size; + return new_offset; + }); + // Code offset is not initialized yet, so set the map offset to 0u-offset. + DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u); + oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset; } ++method_offsets_index_; } @@ -1342,10 +1250,11 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } }; -template <typename DataAccess> class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor { public: - WriteMapMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset, + WriteMapMethodVisitor(OatWriter* writer, + OutputStream* out, + const size_t file_offset, size_t relative_offset) : OatDexMethodVisitor(writer, relative_offset), out_(out), @@ -1360,22 +1269,31 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor { size_t file_offset = file_offset_; OutputStream* out = out_; - uint32_t map_offset = DataAccess::GetOffset(oat_class, method_offsets_index_); + uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].vmap_table_offset_; + uint32_t code_offset = oat_class->method_offsets_[method_offsets_index_].code_offset_; ++method_offsets_index_; - // Write deduplicated map. - ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method); - size_t map_size = map.size() * sizeof(map[0]); - DCHECK((map_size == 0u && map_offset == 0u) || - (map_size != 0u && map_offset != 0u && map_offset <= offset_)) - << map_size << " " << map_offset << " " << offset_ << " " - << PrettyMethod(it.GetMemberIndex(), *dex_file_) << " for " << DataAccess::Name(); - if (map_size != 0u && map_offset == offset_) { - if (UNLIKELY(!writer_->WriteData(out, map.data(), map_size))) { - ReportWriteFailure(it); - return false; + DCHECK((compiled_method->GetVmapTable().size() == 0u && map_offset == 0u) || + (compiled_method->GetVmapTable().size() != 0u && map_offset != 0u)) + << compiled_method->GetVmapTable().size() << " " << map_offset << " " + << PrettyMethod(it.GetMemberIndex(), *dex_file_); + + if (map_offset != 0u) { + // Transform map_offset to actual oat data offset. + map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset; + DCHECK_NE(map_offset, 0u); + DCHECK_LE(map_offset, offset_) << PrettyMethod(it.GetMemberIndex(), *dex_file_); + + ArrayRef<const uint8_t> map = compiled_method->GetVmapTable(); + size_t map_size = map.size() * sizeof(map[0]); + if (map_offset == offset_) { + // Write deduplicated map (code info for Optimizing or transformation info for dex2dex). + if (UNLIKELY(!writer_->WriteData(out, map.data(), map_size))) { + ReportWriteFailure(it); + return false; + } + offset_ += map_size; } - offset_ += map_size; } DCHECK_OFFSET_(); } @@ -1388,7 +1306,7 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor { size_t const file_offset_; void ReportWriteFailure(const ClassDataItemIterator& it) { - PLOG(ERROR) << "Failed to write " << DataAccess::Name() << " for " + PLOG(ERROR) << "Failed to write map for " << PrettyMethod(it.GetMemberIndex(), *dex_file_) << " to " << out_->GetLocation(); } }; @@ -1481,19 +1399,10 @@ size_t OatWriter::InitOatClasses(size_t offset) { } size_t OatWriter::InitOatMaps(size_t offset) { - #define VISIT(VisitorType) \ - do { \ - VisitorType visitor(this, offset); \ - bool success = VisitDexMethods(&visitor); \ - DCHECK(success); \ - offset = visitor.GetOffset(); \ - } while (false) - - VISIT(InitMapMethodVisitor<GcMapDataAccess>); - VISIT(InitMapMethodVisitor<MappingTableDataAccess>); - VISIT(InitMapMethodVisitor<VmapTableDataAccess>); - - #undef VISIT + InitMapMethodVisitor visitor(this, offset); + bool success = VisitDexMethods(&visitor); + DCHECK(success); + offset = visitor.GetOffset(); return offset; } @@ -1647,9 +1556,7 @@ bool OatWriter::WriteCode(OutputStream* out) { DO_STAT(size_code_alignment_); DO_STAT(size_relative_call_thunks_); DO_STAT(size_misc_thunks_); - DO_STAT(size_mapping_table_); DO_STAT(size_vmap_table_); - DO_STAT(size_gc_map_); DO_STAT(size_oat_dex_file_location_size_); DO_STAT(size_oat_dex_file_location_data_); DO_STAT(size_oat_dex_file_location_checksum_); @@ -1764,29 +1671,14 @@ bool OatWriter::WriteClasses(OutputStream* out) { } size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) { - #define VISIT(VisitorType) \ - do { \ - VisitorType visitor(this, out, file_offset, relative_offset); \ - if (UNLIKELY(!VisitDexMethods(&visitor))) { \ - return 0; \ - } \ - relative_offset = visitor.GetOffset(); \ - } while (false) - - size_t gc_maps_offset = relative_offset; - VISIT(WriteMapMethodVisitor<GcMapDataAccess>); - size_gc_map_ = relative_offset - gc_maps_offset; - - size_t mapping_tables_offset = relative_offset; - VISIT(WriteMapMethodVisitor<MappingTableDataAccess>); - size_mapping_table_ = relative_offset - mapping_tables_offset; - size_t vmap_tables_offset = relative_offset; - VISIT(WriteMapMethodVisitor<VmapTableDataAccess>); + WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset); + if (UNLIKELY(!VisitDexMethods(&visitor))) { + return 0; + } + relative_offset = visitor.GetOffset(); size_vmap_table_ = relative_offset - vmap_tables_offset; - #undef VISIT - return relative_offset; } diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index 5e7a4a37d1..3862798329 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -219,13 +219,6 @@ class OatWriter { class OatClass; class OatDexFile; - // The DataAccess classes are helper classes that provide access to members related to - // a given map, i.e. GC map, mapping table or vmap table. By abstracting these away - // we can share a lot of code for processing the maps with template classes below. - struct GcMapDataAccess; - struct MappingTableDataAccess; - struct VmapTableDataAccess; - // The function VisitDexMethods() below iterates through all the methods in all // the compiled dex files in order of their definitions. The method visitor // classes provide individual bits of processing for each of the passes we need to @@ -235,11 +228,9 @@ class OatWriter { class OatDexMethodVisitor; class InitOatClassesMethodVisitor; class InitCodeMethodVisitor; - template <typename DataAccess> class InitMapMethodVisitor; class InitImageMethodVisitor; class WriteCodeMethodVisitor; - template <typename DataAccess> class WriteMapMethodVisitor; // Visit all the methods in all the compiled dex files in their definition order @@ -354,9 +345,7 @@ class OatWriter { uint32_t size_code_alignment_; uint32_t size_relative_call_thunks_; uint32_t size_misc_thunks_; - uint32_t size_mapping_table_; uint32_t size_vmap_table_; - uint32_t size_gc_map_; uint32_t size_oat_dex_file_location_size_; uint32_t size_oat_dex_file_location_data_; uint32_t size_oat_dex_file_location_checksum_; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 65e5c3ad48..953c0ae418 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -44,18 +44,15 @@ #include "compiled_method.h" #include "dex/verified_method.h" #include "driver/compiler_driver.h" -#include "gc_map_builder.h" #include "graph_visualizer.h" #include "intrinsics.h" #include "leb128.h" -#include "mapping_table.h" #include "mirror/array-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object_reference.h" #include "parallel_move_resolver.h" #include "ssa_liveness_analysis.h" #include "utils/assembler.h" -#include "vmap_table.h" namespace art { diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 3d6bf62d0b..cad94c7ad7 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -597,9 +597,7 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, codegen->GetCoreSpillMask(), codegen->GetFpuSpillMask(), ArrayRef<const SrcMapElem>(), - ArrayRef<const uint8_t>(), // mapping_table. ArrayRef<const uint8_t>(stack_map), - ArrayRef<const uint8_t>(), // native_gc_map. ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), ArrayRef<const LinkerPatch>(linker_patches)); @@ -916,9 +914,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, const void* code = code_cache->CommitCode( self, method, - nullptr, stack_map_data, - nullptr, codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), codegen->GetCoreSpillMask(), codegen->GetFpuSpillMask(), diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc index 5c0eb3f116..811d9fd025 100644 --- a/imgdiag/imgdiag.cc +++ b/imgdiag/imgdiag.cc @@ -35,7 +35,6 @@ #include "image.h" #include "scoped_thread_state_change.h" #include "os.h" -#include "gc_map.h" #include "cmdline.h" #include "backtrace/BacktraceMap.h" diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 9a3bb02906..b673eff9ad 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -38,7 +38,6 @@ #include "dex_instruction.h" #include "disassembler.h" #include "elf_builder.h" -#include "gc_map.h" #include "gc/space/image_space.h" #include "gc/space/large_object_space.h" #include "gc/space/space-inl.h" @@ -46,7 +45,6 @@ #include "indenter.h" #include "linker/buffered_output_stream.h" #include "linker/file_output_stream.h" -#include "mapping_table.h" #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" @@ -62,7 +60,6 @@ #include "ScopedLocalRef.h" #include "thread_list.h" #include "verifier/method_verifier.h" -#include "vmap_table.h" #include "well_known_classes.h" #include <sys/stat.h> @@ -282,9 +279,7 @@ class OatSymbolizer FINAL { class OatDumperOptions { public: - OatDumperOptions(bool dump_raw_mapping_table, - bool dump_raw_gc_map, - bool dump_vmap, + OatDumperOptions(bool dump_vmap, bool dump_code_info_stack_maps, bool disassemble_code, bool absolute_addresses, @@ -297,9 +292,7 @@ class OatDumperOptions { const char* app_image, const char* app_oat, uint32_t addr2instr) - : dump_raw_mapping_table_(dump_raw_mapping_table), - dump_raw_gc_map_(dump_raw_gc_map), - dump_vmap_(dump_vmap), + : dump_vmap_(dump_vmap), dump_code_info_stack_maps_(dump_code_info_stack_maps), disassemble_code_(disassemble_code), absolute_addresses_(absolute_addresses), @@ -314,8 +307,6 @@ class OatDumperOptions { addr2instr_(addr2instr), class_loader_(nullptr) {} - const bool dump_raw_mapping_table_; - const bool dump_raw_gc_map_; const bool dump_vmap_; const bool dump_code_info_stack_maps_; const bool disassemble_code_; @@ -572,9 +563,7 @@ class OatDumper { code_offset &= ~0x1; } offsets_.insert(code_offset); - offsets_.insert(oat_method.GetMappingTableOffset()); offsets_.insert(oat_method.GetVmapTableOffset()); - offsets_.insert(oat_method.GetGcMapOffset()); } bool DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) { @@ -843,22 +832,6 @@ class OatDumper { success = false; } vios->Stream() << "\n"; - - vios->Stream() << "gc_map: "; - if (options_.absolute_addresses_) { - vios->Stream() << StringPrintf("%p ", oat_method.GetGcMap()); - } - uint32_t gc_map_offset = oat_method.GetGcMapOffset(); - vios->Stream() << StringPrintf("(offset=0x%08x)\n", gc_map_offset); - if (gc_map_offset > oat_file_.Size()) { - vios->Stream() << StringPrintf("WARNING: " - "gc map table offset 0x%08x is past end of file 0x%08zx.\n", - gc_map_offset, oat_file_.Size()); - success = false; - } else if (options_.dump_raw_gc_map_) { - ScopedIndentation indent3(vios); - DumpGcMap(vios->Stream(), oat_method, code_item); - } } { vios->Stream() << "OatQuickMethodHeader "; @@ -879,24 +852,6 @@ class OatDumper { } ScopedIndentation indent2(vios); - vios->Stream() << "mapping_table: "; - if (options_.absolute_addresses_) { - vios->Stream() << StringPrintf("%p ", oat_method.GetMappingTable()); - } - uint32_t mapping_table_offset = oat_method.GetMappingTableOffset(); - vios->Stream() << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset()); - if (mapping_table_offset > oat_file_.Size()) { - vios->Stream() << StringPrintf("WARNING: " - "mapping table offset 0x%08x is past end of file 0x%08zx. " - "mapping table offset was loaded from offset 0x%08x.\n", - mapping_table_offset, oat_file_.Size(), - oat_method.GetMappingTableOffsetOffset()); - success = false; - } else if (options_.dump_raw_mapping_table_) { - ScopedIndentation indent3(vios); - DumpMappingTable(vios, oat_method); - } - vios->Stream() << "vmap_table: "; if (options_.absolute_addresses_) { vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable()); @@ -973,7 +928,7 @@ class OatDumper { success = false; if (options_.disassemble_code_) { if (code_size_offset + kPrologueBytes <= oat_file_.Size()) { - DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes); + DumpCode(vios, oat_method, code_item, true, kPrologueBytes); } } } else if (code_size > kMaxCodeSize) { @@ -986,11 +941,11 @@ class OatDumper { success = false; if (options_.disassemble_code_) { if (code_size_offset + kPrologueBytes <= oat_file_.Size()) { - DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes); + DumpCode(vios, oat_method, code_item, true, kPrologueBytes); } } } else if (options_.disassemble_code_) { - DumpCode(vios, verifier.get(), oat_method, code_item, !success, 0); + DumpCode(vios, oat_method, code_item, !success, 0); } } } @@ -1040,12 +995,7 @@ class OatDumper { ScopedIndentation indent(vios); vios->Stream() << "quickened data\n"; } else { - // Otherwise, display the vmap table. - const uint8_t* raw_table = oat_method.GetVmapTable(); - if (raw_table != nullptr) { - VmapTable vmap_table(raw_table); - DumpVmapTable(vios->Stream(), oat_method, vmap_table); - } + // Otherwise, there is nothing to display. } } @@ -1060,32 +1010,6 @@ class OatDumper { options_.dump_code_info_stack_maps_); } - // Display a vmap table. - void DumpVmapTable(std::ostream& os, - const OatFile::OatMethod& oat_method, - const VmapTable& vmap_table) { - bool first = true; - bool processing_fp = false; - uint32_t spill_mask = oat_method.GetCoreSpillMask(); - for (size_t i = 0; i < vmap_table.Size(); i++) { - uint16_t dex_reg = vmap_table[i]; - uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i, - processing_fp ? kFloatVReg : kIntVReg); - os << (first ? "v" : ", v") << dex_reg; - if (!processing_fp) { - os << "/r" << cpu_reg; - } else { - os << "/fr" << cpu_reg; - } - first = false; - if (!processing_fp && dex_reg == 0xFFFF) { - processing_fp = true; - spill_mask = oat_method.GetFpSpillMask(); - } - } - os << "\n"; - } - void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item) { if (code_item != nullptr) { @@ -1128,203 +1052,18 @@ class OatDumper { } } - void DescribeVReg(std::ostream& os, const OatFile::OatMethod& oat_method, - const DexFile::CodeItem* code_item, size_t reg, VRegKind kind) { - const uint8_t* raw_table = oat_method.GetVmapTable(); - if (raw_table != nullptr) { - const VmapTable vmap_table(raw_table); - uint32_t vmap_offset; - if (vmap_table.IsInContext(reg, kind, &vmap_offset)) { - bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); - uint32_t spill_mask = is_float ? oat_method.GetFpSpillMask() - : oat_method.GetCoreSpillMask(); - os << (is_float ? "fr" : "r") << vmap_table.ComputeRegister(spill_mask, vmap_offset, kind); - } else { - uint32_t offset = StackVisitor::GetVRegOffsetFromQuickCode( - code_item, - oat_method.GetCoreSpillMask(), - oat_method.GetFpSpillMask(), - oat_method.GetFrameSizeInBytes(), - reg, - GetInstructionSet()); - os << "[sp + #" << offset << "]"; - } - } - } - - void DumpGcMapRegisters(std::ostream& os, const OatFile::OatMethod& oat_method, - const DexFile::CodeItem* code_item, - size_t num_regs, const uint8_t* reg_bitmap) { - bool first = true; - for (size_t reg = 0; reg < num_regs; reg++) { - if (((reg_bitmap[reg / 8] >> (reg % 8)) & 0x01) != 0) { - if (first) { - os << " v" << reg << " ("; - DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg); - os << ")"; - first = false; - } else { - os << ", v" << reg << " ("; - DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg); - os << ")"; - } - } - } - if (first) { - os << "No registers in GC map\n"; - } else { - os << "\n"; - } - } - void DumpGcMap(std::ostream& os, const OatFile::OatMethod& oat_method, - const DexFile::CodeItem* code_item) { - const uint8_t* gc_map_raw = oat_method.GetGcMap(); - if (gc_map_raw == nullptr) { - return; // No GC map. - } - const void* quick_code = oat_method.GetQuickCode(); - NativePcOffsetToReferenceMap map(gc_map_raw); - for (size_t entry = 0; entry < map.NumEntries(); entry++) { - const uint8_t* native_pc = reinterpret_cast<const uint8_t*>(quick_code) + - map.GetNativePcOffset(entry); - os << StringPrintf("%p", native_pc); - DumpGcMapRegisters(os, oat_method, code_item, map.RegWidth() * 8, map.GetBitMap(entry)); - } - } - - void DumpMappingTable(VariableIndentationOutputStream* vios, - const OatFile::OatMethod& oat_method) { - const void* quick_code = oat_method.GetQuickCode(); - if (quick_code == nullptr) { + void DumpInformationAtOffset(VariableIndentationOutputStream* vios, + const OatFile::OatMethod& oat_method, + const DexFile::CodeItem* code_item, + size_t offset, + bool suspend_point_mapping) { + if (!IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) { + // Native method. return; } - MappingTable table(oat_method.GetMappingTable()); - if (table.TotalSize() != 0) { - if (table.PcToDexSize() != 0) { - typedef MappingTable::PcToDexIterator It; - vios->Stream() << "suspend point mappings {\n"; - for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) { - ScopedIndentation indent1(vios); - vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc()); - } - vios->Stream() << "}\n"; - } - if (table.DexToPcSize() != 0) { - typedef MappingTable::DexToPcIterator It; - vios->Stream() << "catch entry mappings {\n"; - for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) { - ScopedIndentation indent1(vios); - vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc()); - } - vios->Stream() << "}\n"; - } - } - } - - uint32_t DumpInformationAtOffset(VariableIndentationOutputStream* vios, - const OatFile::OatMethod& oat_method, - const DexFile::CodeItem* code_item, - size_t offset, - bool suspend_point_mapping) { - if (IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) { - if (suspend_point_mapping) { - ScopedIndentation indent1(vios); - DumpDexRegisterMapAtOffset(vios, oat_method, code_item, offset); - } - // The return value is not used in the case of a method compiled - // with the optimizing compiler. - return DexFile::kDexNoIndex; - } else { - return DumpMappingAtOffset(vios->Stream(), oat_method, offset, suspend_point_mapping); - } - } - - uint32_t DumpMappingAtOffset(std::ostream& os, const OatFile::OatMethod& oat_method, - size_t offset, bool suspend_point_mapping) { - MappingTable table(oat_method.GetMappingTable()); - if (suspend_point_mapping && table.PcToDexSize() > 0) { - typedef MappingTable::PcToDexIterator It; - for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) { - if (offset == cur.NativePcOffset()) { - os << StringPrintf("suspend point dex PC: 0x%04x\n", cur.DexPc()); - return cur.DexPc(); - } - } - } else if (!suspend_point_mapping && table.DexToPcSize() > 0) { - typedef MappingTable::DexToPcIterator It; - for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) { - if (offset == cur.NativePcOffset()) { - os << StringPrintf("catch entry dex PC: 0x%04x\n", cur.DexPc()); - return cur.DexPc(); - } - } - } - return DexFile::kDexNoIndex; - } - - void DumpGcMapAtNativePcOffset(std::ostream& os, const OatFile::OatMethod& oat_method, - const DexFile::CodeItem* code_item, size_t native_pc_offset) { - const uint8_t* gc_map_raw = oat_method.GetGcMap(); - if (gc_map_raw != nullptr) { - NativePcOffsetToReferenceMap map(gc_map_raw); - if (map.HasEntry(native_pc_offset)) { - size_t num_regs = map.RegWidth() * 8; - const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset); - bool first = true; - for (size_t reg = 0; reg < num_regs; reg++) { - if (((reg_bitmap[reg / 8] >> (reg % 8)) & 0x01) != 0) { - if (first) { - os << "GC map objects: v" << reg << " ("; - DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg); - os << ")"; - first = false; - } else { - os << ", v" << reg << " ("; - DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg); - os << ")"; - } - } - } - if (!first) { - os << "\n"; - } - } - } - } - - void DumpVRegsAtDexPc(std::ostream& os, verifier::MethodVerifier* verifier, - const OatFile::OatMethod& oat_method, - const DexFile::CodeItem* code_item, uint32_t dex_pc) { - DCHECK(verifier != nullptr); - std::vector<int32_t> kinds = verifier->DescribeVRegs(dex_pc); - bool first = true; - for (size_t reg = 0; reg < code_item->registers_size_; reg++) { - VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2)); - if (kind != kUndefined) { - if (first) { - os << "VRegs: v"; - first = false; - } else { - os << ", v"; - } - os << reg << " ("; - switch (kind) { - case kImpreciseConstant: - os << "Imprecise Constant: " << kinds.at((reg * 2) + 1) << ", "; - DescribeVReg(os, oat_method, code_item, reg, kind); - break; - case kConstant: - os << "Constant: " << kinds.at((reg * 2) + 1); - break; - default: - DescribeVReg(os, oat_method, code_item, reg, kind); - break; - } - os << ")"; - } - } - if (!first) { - os << "\n"; + if (suspend_point_mapping) { + ScopedIndentation indent1(vios); + DumpDexRegisterMapAtOffset(vios, oat_method, code_item, offset); } } @@ -1349,7 +1088,7 @@ class OatDumper { // null, then this method has been compiled with the optimizing // compiler. return oat_method.GetQuickCode() != nullptr && - oat_method.GetGcMap() == nullptr && + oat_method.GetVmapTable() != nullptr && code_item != nullptr; } @@ -1409,7 +1148,6 @@ class OatDumper { } void DumpCode(VariableIndentationOutputStream* vios, - verifier::MethodVerifier* verifier, const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item, bool bad_input, size_t code_size) { const void* quick_code = oat_method.GetQuickCode(); @@ -1429,14 +1167,7 @@ class OatDumper { } offset += disassembler_->Dump(vios->Stream(), quick_native_pc + offset); if (!bad_input) { - uint32_t dex_pc = - DumpInformationAtOffset(vios, oat_method, code_item, offset, true); - if (dex_pc != DexFile::kDexNoIndex) { - DumpGcMapAtNativePcOffset(vios->Stream(), oat_method, code_item, offset); - if (verifier != nullptr) { - DumpVRegsAtDexPc(vios->Stream(), verifier, oat_method, code_item, dex_pc); - } - } + DumpInformationAtOffset(vios, oat_method, code_item, offset, true); } } } @@ -1986,10 +1717,6 @@ class ImageDumper { OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>( reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader)); if (method->IsNative()) { - if (!Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(quick_oat_code_begin)) { - DCHECK(method_header->GetNativeGcMap() == nullptr) << PrettyMethod(method); - DCHECK(method_header->GetMappingTable() == nullptr) << PrettyMethod(method); - } bool first_occurrence; uint32_t quick_oat_code_size = GetQuickOatCodeSize(method); ComputeOatSize(quick_oat_code_begin, &first_occurrence); @@ -2013,17 +1740,6 @@ class ImageDumper { stats_.dex_instruction_bytes += dex_instruction_bytes; bool first_occurrence; - size_t gc_map_bytes = ComputeOatSize(method_header->GetNativeGcMap(), &first_occurrence); - if (first_occurrence) { - stats_.gc_map_bytes += gc_map_bytes; - } - - size_t pc_mapping_table_bytes = ComputeOatSize( - method_header->GetMappingTable(), &first_occurrence); - if (first_occurrence) { - stats_.pc_mapping_table_bytes += pc_mapping_table_bytes; - } - size_t vmap_table_bytes = 0u; if (!method_header->IsOptimized()) { // Method compiled with the optimizing compiler have no vmap table. @@ -2052,11 +1768,12 @@ class ImageDumper { uint32_t method_access_flags = method->GetAccessFlags(); indent_os << StringPrintf("OAT CODE: %p-%p\n", quick_oat_code_begin, quick_oat_code_end); - indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd AccessFlags=0x%x\n", - dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes, + indent_os << StringPrintf("SIZE: Dex Instructions=%zd StackMaps=%zd AccessFlags=0x%x\n", + dex_instruction_bytes, + vmap_table_bytes, method_access_flags); - size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes + + size_t total_size = dex_instruction_bytes + vmap_table_bytes + quick_oat_code_size + ArtMethod::Size(image_header_.GetPointerSize()); double expansion = @@ -2101,8 +1818,6 @@ class ImageDumper { size_t large_initializer_code_bytes; size_t large_method_code_bytes; - size_t gc_map_bytes; - size_t pc_mapping_table_bytes; size_t vmap_table_bytes; size_t dex_instruction_bytes; @@ -2131,8 +1846,6 @@ class ImageDumper { class_initializer_code_bytes(0), large_initializer_code_bytes(0), large_method_code_bytes(0), - gc_map_bytes(0), - pc_mapping_table_bytes(0), vmap_table_bytes(0), dex_instruction_bytes(0) {} @@ -2351,11 +2064,7 @@ class ImageDumper { PercentOfOatBytes(oat_dex_file_size.second)); } - os << "\n" << StringPrintf("gc_map_bytes = %7zd (%2.0f%% of oat file bytes)\n" - "pc_mapping_table_bytes = %7zd (%2.0f%% of oat file bytes)\n" - "vmap_table_bytes = %7zd (%2.0f%% of oat file bytes)\n\n", - gc_map_bytes, PercentOfOatBytes(gc_map_bytes), - pc_mapping_table_bytes, PercentOfOatBytes(pc_mapping_table_bytes), + os << "\n" << StringPrintf("vmap_table_bytes = %7zd (%2.0f%% of oat file bytes)\n\n", vmap_table_bytes, PercentOfOatBytes(vmap_table_bytes)) << std::flush; @@ -2590,10 +2299,6 @@ struct OatdumpArgs : public CmdlineArgs { oat_filename_ = option.substr(strlen("--oat-file=")).data(); } else if (option.starts_with("--image=")) { image_location_ = option.substr(strlen("--image=")).data(); - } else if (option =="--dump:raw_mapping_table") { - dump_raw_mapping_table_ = true; - } else if (option == "--dump:raw_gc_map") { - dump_raw_gc_map_ = true; } else if (option == "--no-dump:vmap") { dump_vmap_ = false; } else if (option =="--dump:code_info_stack_maps") { @@ -2683,12 +2388,6 @@ struct OatdumpArgs : public CmdlineArgs { usage += Base::GetUsage(); usage += // Optional. - " --dump:raw_mapping_table enables dumping of the mapping table.\n" - " Example: --dump:raw_mapping_table\n" - "\n" - " --dump:raw_gc_map enables dumping of the GC map.\n" - " Example: --dump:raw_gc_map\n" - "\n" " --no-dump:vmap may be used to disable vmap dumping.\n" " Example: --no-dump:vmap\n" "\n" @@ -2739,8 +2438,6 @@ struct OatdumpArgs : public CmdlineArgs { const char* method_filter_ = ""; const char* image_location_ = nullptr; std::string elf_filename_prefix_; - bool dump_raw_mapping_table_ = false; - bool dump_raw_gc_map_ = false; bool dump_vmap_ = true; bool dump_code_info_stack_maps_ = false; bool disassemble_code_ = true; @@ -2763,8 +2460,6 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> { bool absolute_addresses = (args_->oat_filename_ == nullptr); oat_dumper_options_.reset(new OatDumperOptions( - args_->dump_raw_mapping_table_, - args_->dump_raw_gc_map_, args_->dump_vmap_, args_->dump_code_info_stack_maps_, args_->disassemble_code_, diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc index bf062ed292..c7ced8adf2 100644 --- a/oatdump/oatdump_test.cc +++ b/oatdump/oatdump_test.cc @@ -97,16 +97,6 @@ TEST_F(OatDumpTest, TestOatImage) { ASSERT_TRUE(Exec(kModeOat, {}, &error_msg)) << error_msg; } -TEST_F(OatDumpTest, TestDumpRawMappingTable) { - std::string error_msg; - ASSERT_TRUE(Exec(kModeArt, {"--dump:raw_mapping_table"}, &error_msg)) << error_msg; -} - -TEST_F(OatDumpTest, TestDumpRawGcMap) { - std::string error_msg; - ASSERT_TRUE(Exec(kModeArt, {"--dump:raw_gc_map"}, &error_msg)) << error_msg; -} - TEST_F(OatDumpTest, TestNoDumpVmap) { std::string error_msg; ASSERT_TRUE(Exec(kModeArt, {"--no-dump:vmap"}, &error_msg)) << error_msg; diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 969a038523..75d9073cfb 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -1934,7 +1934,12 @@ TEST_F(StubTest, Fields64) { TestFields(self, this, Primitive::Type::kPrimLong); } -TEST_F(StubTest, IMT) { +// Disabled, b/27991555 . +// FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken. +// The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header +// and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before +// the bridge and uses that to check for inlined frames, crashing in the process. +TEST_F(StubTest, DISABLED_IMT) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) Thread* self = Thread::Current(); diff --git a/runtime/art_method.cc b/runtime/art_method.cc index f97ad51568..34d19d151b 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -31,7 +31,6 @@ #include "jit/jit_code_cache.h" #include "jit/profiling_info.h" #include "jni_internal.h" -#include "mapping_table.h" #include "mirror/abstract_method.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h index 7595d14101..0e2f9f2030 100644 --- a/runtime/check_reference_map_visitor.h +++ b/runtime/check_reference_map_visitor.h @@ -18,7 +18,6 @@ #define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_ #include "art_method-inl.h" -#include "gc_map.h" #include "oat_quick_method_header.h" #include "scoped_thread_state_change.h" #include "stack_map.h" @@ -54,11 +53,8 @@ class CheckReferenceMapVisitor : public StackVisitor { void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset) SHARED_REQUIRES(Locks::mutator_lock_) { - if (GetCurrentOatQuickMethodHeader()->IsOptimized()) { - CheckOptimizedMethod(registers, number_of_references, native_pc_offset); - } else { - CheckQuickMethod(registers, number_of_references, native_pc_offset); - } + CHECK(GetCurrentOatQuickMethodHeader()->IsOptimized()); + CheckOptimizedMethod(registers, number_of_references, native_pc_offset); } private: @@ -104,20 +100,6 @@ class CheckReferenceMapVisitor : public StackVisitor { } } } - - void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset) - SHARED_REQUIRES(Locks::mutator_lock_) { - ArtMethod* m = GetMethod(); - NativePcOffsetToReferenceMap map(GetCurrentOatQuickMethodHeader()->GetNativeGcMap()); - const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset); - CHECK(ref_bitmap); - for (int i = 0; i < number_of_references; ++i) { - int reg = registers[i]; - CHECK(reg < m->GetCodeItem()->registers_size_); - CHECK((*((ref_bitmap) + reg / 8) >> (reg % 8) ) & 0x01) - << "Error: Reg @" << i << " is not in GC map"; - } - } }; } // namespace art diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index e46576e884..197caa1878 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -272,19 +272,19 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) { if (outer_method != nullptr) { const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc); - if (current_code->IsOptimized()) { - uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc); - CodeInfo code_info = current_code->GetOptimizedCodeInfo(); - CodeInfoEncoding encoding = code_info.ExtractEncoding(); - StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); - DCHECK(stack_map.IsValid()); - if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) { - InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); - caller = GetResolvedMethod(outer_method, - inline_info, - encoding.inline_info_encoding, - inline_info.GetDepth(encoding.inline_info_encoding) - 1); - } + DCHECK(current_code != nullptr); + DCHECK(current_code->IsOptimized()); + uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc); + CodeInfo code_info = current_code->GetOptimizedCodeInfo(); + CodeInfoEncoding encoding = code_info.ExtractEncoding(); + StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); + DCHECK(stack_map.IsValid()); + if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) { + InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); + caller = GetResolvedMethod(outer_method, + inline_info, + encoding.inline_info_encoding, + inline_info.GetDepth(encoding.inline_info_encoding) - 1); } } if (kIsDebugBuild && do_caller_check) { diff --git a/runtime/gc_map.h b/runtime/gc_map.h deleted file mode 100644 index b4ccdd6d54..0000000000 --- a/runtime/gc_map.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_GC_MAP_H_ -#define ART_RUNTIME_GC_MAP_H_ - -#include <stdint.h> - -#include "base/logging.h" -#include "base/macros.h" - -namespace art { - -// Lightweight wrapper for native PC offset to reference bit maps. -class NativePcOffsetToReferenceMap { - public: - explicit NativePcOffsetToReferenceMap(const uint8_t* data) : data_(data) { - CHECK(data_ != nullptr); - } - - // The number of entries in the table. - size_t NumEntries() const { - return data_[2] | (data_[3] << 8); - } - - // Return address of bitmap encoding what are live references. - const uint8_t* GetBitMap(size_t index) const { - size_t entry_offset = index * EntryWidth(); - return &Table()[entry_offset + NativeOffsetWidth()]; - } - - // Get the native PC encoded in the table at the given index. - uintptr_t GetNativePcOffset(size_t index) const { - size_t entry_offset = index * EntryWidth(); - uintptr_t result = 0; - for (size_t i = 0; i < NativeOffsetWidth(); ++i) { - result |= Table()[entry_offset + i] << (i * 8); - } - return result; - } - - // Does the given offset have an entry? - bool HasEntry(uintptr_t native_pc_offset) { - for (size_t i = 0; i < NumEntries(); ++i) { - if (GetNativePcOffset(i) == native_pc_offset) { - return true; - } - } - return false; - } - - // Finds the bitmap associated with the native pc offset. - const uint8_t* FindBitMap(uintptr_t native_pc_offset) { - size_t num_entries = NumEntries(); - size_t index = Hash(native_pc_offset) % num_entries; - size_t misses = 0; - while (GetNativePcOffset(index) != native_pc_offset) { - index = (index + 1) % num_entries; - misses++; - DCHECK_LT(misses, num_entries) << "Failed to find offset: " << native_pc_offset; - } - return GetBitMap(index); - } - - static uint32_t Hash(uint32_t native_offset) { - uint32_t hash = native_offset; - hash ^= (hash >> 20) ^ (hash >> 12); - hash ^= (hash >> 7) ^ (hash >> 4); - return hash; - } - - // The number of bytes used to encode registers. - size_t RegWidth() const { - return (static_cast<size_t>(data_[0]) | (static_cast<size_t>(data_[1]) << 8)) >> 3; - } - - private: - // Skip the size information at the beginning of data. - const uint8_t* Table() const { - return data_ + 4; - } - - // Number of bytes used to encode a native offset. - size_t NativeOffsetWidth() const { - return data_[0] & 7; - } - - // The width of an entry in the table. - size_t EntryWidth() const { - return NativeOffsetWidth() + RegWidth(); - } - - const uint8_t* const data_; // The header and table data -}; - -} // namespace art - -#endif // ART_RUNTIME_GC_MAP_H_ diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 53d645ce29..37ff6a5dd6 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -195,9 +195,7 @@ class ScopedCodeCacheWrite : ScopedTrace { uint8_t* JitCodeCache::CommitCode(Thread* self, ArtMethod* method, - const uint8_t* mapping_table, const uint8_t* vmap_table, - const uint8_t* gc_map, size_t frame_size_in_bytes, size_t core_spill_mask, size_t fp_spill_mask, @@ -206,9 +204,7 @@ uint8_t* JitCodeCache::CommitCode(Thread* self, bool osr) { uint8_t* result = CommitCodeInternal(self, method, - mapping_table, vmap_table, - gc_map, frame_size_in_bytes, core_spill_mask, fp_spill_mask, @@ -220,9 +216,7 @@ uint8_t* JitCodeCache::CommitCode(Thread* self, GarbageCollectCache(self); result = CommitCodeInternal(self, method, - mapping_table, vmap_table, - gc_map, frame_size_in_bytes, core_spill_mask, fp_spill_mask, @@ -254,8 +248,6 @@ void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UN // It does nothing if we are not using native debugger. DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr)); - FreeData(const_cast<uint8_t*>(method_header->GetNativeGcMap())); - FreeData(const_cast<uint8_t*>(method_header->GetMappingTable())); // Use the offset directly to prevent sanity check that the method is // compiled with optimizing. // TODO(ngeoffray): Clean up. @@ -314,9 +306,7 @@ void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) { uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, ArtMethod* method, - const uint8_t* mapping_table, const uint8_t* vmap_table, - const uint8_t* gc_map, size_t frame_size_in_bytes, size_t core_spill_mask, size_t fp_spill_mask, @@ -346,9 +336,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, std::copy(code, code + code_size, code_ptr); method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); new (method_header) OatQuickMethodHeader( - (mapping_table == nullptr) ? 0 : code_ptr - mapping_table, (vmap_table == nullptr) ? 0 : code_ptr - vmap_table, - (gc_map == nullptr) ? 0 : code_ptr - gc_map, frame_size_in_bytes, core_spill_mask, fp_spill_mask, diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index a54f04faa4..6faa8f15b6 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -91,9 +91,7 @@ class JitCodeCache { // Allocate and write code and its metadata to the code cache. uint8_t* CommitCode(Thread* self, ArtMethod* method, - const uint8_t* mapping_table, const uint8_t* vmap_table, - const uint8_t* gc_map, size_t frame_size_in_bytes, size_t core_spill_mask, size_t fp_spill_mask, @@ -201,9 +199,7 @@ class JitCodeCache { // allocation fails. Return null if the allocation fails. uint8_t* CommitCodeInternal(Thread* self, ArtMethod* method, - const uint8_t* mapping_table, const uint8_t* vmap_table, - const uint8_t* gc_map, size_t frame_size_in_bytes, size_t core_spill_mask, size_t fp_spill_mask, diff --git a/runtime/oat.h b/runtime/oat.h index 469a65f2de..543d99f2ad 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '0', '7', '8', '\0' }; + static constexpr uint8_t kOatVersion[] = { '0', '7', '9', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h index 7b92120fde..d7d0c4f733 100644 --- a/runtime/oat_file-inl.h +++ b/runtime/oat_file-inl.h @@ -71,44 +71,6 @@ inline uint32_t OatFile::OatMethod::GetFpSpillMask() const { return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FpSpillMask(); } -inline const uint8_t* OatFile::OatMethod::GetGcMap() const { - const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); - if (code == nullptr) { - return nullptr; - } - uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].gc_map_offset_; - if (UNLIKELY(offset == 0u)) { - return nullptr; - } - return reinterpret_cast<const uint8_t*>(code) - offset; -} - -inline uint32_t OatFile::OatMethod::GetGcMapOffset() const { - const uint8_t* gc_map = GetGcMap(); - return static_cast<uint32_t>(gc_map != nullptr ? gc_map - begin_ : 0u); -} - -inline uint32_t OatFile::OatMethod::GetGcMapOffsetOffset() const { - const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader(); - if (method_header == nullptr) { - return 0u; - } - return reinterpret_cast<const uint8_t*>(&method_header->gc_map_offset_) - begin_; -} - -inline uint32_t OatFile::OatMethod::GetMappingTableOffset() const { - const uint8_t* mapping_table = GetMappingTable(); - return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u); -} - -inline uint32_t OatFile::OatMethod::GetMappingTableOffsetOffset() const { - const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader(); - if (method_header == nullptr) { - return 0u; - } - return reinterpret_cast<const uint8_t*>(&method_header->mapping_table_offset_) - begin_; -} - inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const { const uint8_t* vmap_table = GetVmapTable(); return static_cast<uint32_t>(vmap_table != nullptr ? vmap_table - begin_ : 0u); @@ -122,18 +84,6 @@ inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const { return reinterpret_cast<const uint8_t*>(&method_header->vmap_table_offset_) - begin_; } -inline const uint8_t* OatFile::OatMethod::GetMappingTable() const { - const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); - if (code == nullptr) { - return nullptr; - } - uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].mapping_table_offset_; - if (UNLIKELY(offset == 0u)) { - return nullptr; - } - return reinterpret_cast<const uint8_t*>(code) - offset; -} - inline const uint8_t* OatFile::OatMethod::GetVmapTable() const { const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); if (code == nullptr) { diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 7c83715ff5..46fc3a3cdc 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -50,7 +50,6 @@ #include "type_lookup_table.h" #include "utils.h" #include "utils/dex_cache_arrays_layout-inl.h" -#include "vmap_table.h" namespace art { diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 705ba0d976..11a9d76dad 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -123,18 +123,10 @@ class OatFile { uint32_t GetCoreSpillMask() const; uint32_t GetFpSpillMask() const; - const uint8_t* GetMappingTable() const; - uint32_t GetMappingTableOffset() const; - uint32_t GetMappingTableOffsetOffset() const; - const uint8_t* GetVmapTable() const; uint32_t GetVmapTableOffset() const; uint32_t GetVmapTableOffsetOffset() const; - const uint8_t* GetGcMap() const; - uint32_t GetGcMapOffset() const; - uint32_t GetGcMapOffsetOffset() const; - // Create an OatMethod with offsets relative to the given base address OatMethod(const uint8_t* base, const uint32_t code_offset) : begin_(base), code_offset_(code_offset) { diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc index 07a112fb7f..0ab2bfe80e 100644 --- a/runtime/oat_quick_method_header.cc +++ b/runtime/oat_quick_method_header.cc @@ -17,23 +17,18 @@ #include "oat_quick_method_header.h" #include "art_method.h" -#include "mapping_table.h" #include "scoped_thread_state_change.h" #include "thread.h" namespace art { OatQuickMethodHeader::OatQuickMethodHeader( - uint32_t mapping_table_offset, uint32_t vmap_table_offset, - uint32_t gc_map_offset, uint32_t frame_size_in_bytes, uint32_t core_spill_mask, uint32_t fp_spill_mask, uint32_t code_size) - : mapping_table_offset_(mapping_table_offset), - vmap_table_offset_(vmap_table_offset), - gc_map_offset_(gc_map_offset), + : vmap_table_offset_(vmap_table_offset), frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask), code_size_(code_size) {} @@ -52,28 +47,8 @@ uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method, return stack_map.GetDexPc(encoding.stack_map_encoding); } } else { - MappingTable table(GetMappingTable()); - // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping - // but they have no suspend checks and, consequently, we never call ToDexPc() for them. - if (table.TotalSize() == 0) { - DCHECK(method->IsNative()); - return DexFile::kDexNoIndex; - } - - // Assume the caller wants a pc-to-dex mapping so check here first. - typedef MappingTable::PcToDexIterator It; - for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) { - if (cur.NativePcOffset() == sought_offset) { - return cur.DexPc(); - } - } - // Now check dex-to-pc mappings. - typedef MappingTable::DexToPcIterator It2; - for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) { - if (cur.NativePcOffset() == sought_offset) { - return cur.DexPc(); - } - } + DCHECK(method->IsNative()); + return DexFile::kDexNoIndex; } if (abort_on_failure) { ScopedObjectAccess soa(Thread::Current()); @@ -91,44 +66,22 @@ uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method, bool is_for_catch_handler, bool abort_on_failure) const { const void* entry_point = GetEntryPoint(); - if (IsOptimized()) { - // Optimized code does not have a mapping table. Search for the dex-to-pc - // mapping in stack maps. - CodeInfo code_info = GetOptimizedCodeInfo(); - CodeInfoEncoding encoding = code_info.ExtractEncoding(); + DCHECK(!method->IsNative()); + DCHECK(IsOptimized()); + // Search for the dex-to-pc mapping in stack maps. + CodeInfo code_info = GetOptimizedCodeInfo(); + CodeInfoEncoding encoding = code_info.ExtractEncoding(); - // All stack maps are stored in the same CodeItem section, safepoint stack - // maps first, then catch stack maps. We use `is_for_catch_handler` to select - // the order of iteration. - StackMap stack_map = - LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding) - : code_info.GetStackMapForDexPc(dex_pc, encoding); - if (stack_map.IsValid()) { - return reinterpret_cast<uintptr_t>(entry_point) + - stack_map.GetNativePcOffset(encoding.stack_map_encoding); - } - } else { - MappingTable table(GetMappingTable()); - if (table.TotalSize() == 0) { - DCHECK_EQ(dex_pc, 0U); - return 0; // Special no mapping/pc == 0 case - } - // Assume the caller wants a dex-to-pc mapping so check here first. - typedef MappingTable::DexToPcIterator It; - for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) { - if (cur.DexPc() == dex_pc) { - return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset(); - } - } - // Now check pc-to-dex mappings. - typedef MappingTable::PcToDexIterator It2; - for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) { - if (cur.DexPc() == dex_pc) { - return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset(); - } - } + // All stack maps are stored in the same CodeItem section, safepoint stack + // maps first, then catch stack maps. We use `is_for_catch_handler` to select + // the order of iteration. + StackMap stack_map = + LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding) + : code_info.GetStackMapForDexPc(dex_pc, encoding); + if (stack_map.IsValid()) { + return reinterpret_cast<uintptr_t>(entry_point) + + stack_map.GetNativePcOffset(encoding.stack_map_encoding); } - if (abort_on_failure) { ScopedObjectAccess soa(Thread::Current()); LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h index daabc6ee09..abddc6d7a0 100644 --- a/runtime/oat_quick_method_header.h +++ b/runtime/oat_quick_method_header.h @@ -30,9 +30,7 @@ class ArtMethod; // OatQuickMethodHeader precedes the raw code chunk generated by the compiler. class PACKED(4) OatQuickMethodHeader { public: - OatQuickMethodHeader(uint32_t mapping_table_offset = 0U, - uint32_t vmap_table_offset = 0U, - uint32_t gc_map_offset = 0U, + OatQuickMethodHeader(uint32_t vmap_table_offset = 0U, uint32_t frame_size_in_bytes = 0U, uint32_t core_spill_mask = 0U, uint32_t fp_spill_mask = 0U, @@ -60,7 +58,7 @@ class PACKED(4) OatQuickMethodHeader { } bool IsOptimized() const { - return gc_map_offset_ == 0 && vmap_table_offset_ != 0; + return code_size_ != 0 && vmap_table_offset_ != 0; } const void* GetOptimizedCodeInfoPtr() const { @@ -81,14 +79,6 @@ class PACKED(4) OatQuickMethodHeader { return code_size_; } - const uint8_t* GetNativeGcMap() const { - return (gc_map_offset_ == 0) ? nullptr : code_ - gc_map_offset_; - } - - const uint8_t* GetMappingTable() const { - return (mapping_table_offset_ == 0) ? nullptr : code_ - mapping_table_offset_; - } - const uint8_t* GetVmapTable() const { CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler"; return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_; @@ -135,12 +125,8 @@ class PACKED(4) OatQuickMethodHeader { uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const; - // The offset in bytes from the start of the mapping table to the end of the header. - uint32_t mapping_table_offset_; // The offset in bytes from the start of the vmap table to the end of the header. uint32_t vmap_table_offset_; - // The offset in bytes from the start of the gc map to the end of the header. - uint32_t gc_map_offset_; // The stack frame information. QuickMethodFrameInfo frame_info_; // The code size in bytes. diff --git a/runtime/stack.cc b/runtime/stack.cc index c22eb92f54..56ef5aaa90 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -21,7 +21,6 @@ #include "base/hex_dump.h" #include "entrypoints/entrypoint_utils-inl.h" #include "entrypoints/runtime_asm_entrypoints.h" -#include "gc_map.h" #include "gc/space/image_space.h" #include "gc/space/space-inl.h" #include "jit/jit.h" @@ -36,7 +35,6 @@ #include "thread.h" #include "thread_list.h" #include "verify_object-inl.h" -#include "vmap_table.h" namespace art { @@ -215,33 +213,6 @@ size_t StackVisitor::GetNativePcOffset() const { return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_); } -bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) { - DCHECK_EQ(m, GetMethod()); - // Process register map (which native and runtime methods don't have) - if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) { - return false; - } - const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); - if (method_header->IsOptimized()) { - return true; // TODO: Implement. - } - const uint8_t* native_gc_map = method_header->GetNativeGcMap(); - CHECK(native_gc_map != nullptr) << PrettyMethod(m); - const DexFile::CodeItem* code_item = m->GetCodeItem(); - // Can't be null or how would we compile its instructions? - DCHECK(code_item != nullptr) << PrettyMethod(m); - NativePcOffsetToReferenceMap map(native_gc_map); - size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_)); - const uint8_t* reg_bitmap = nullptr; - if (num_regs > 0) { - uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); - reg_bitmap = map.FindBitMap(native_pc_offset); - DCHECK(reg_bitmap != nullptr); - } - // Does this register hold a reference? - return vreg < num_regs && TestBitmap(vreg, reg_bitmap); -} - bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const { @@ -273,11 +244,8 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) { return true; } - if (cur_oat_quick_method_header_->IsOptimized()) { - return GetVRegFromOptimizedCode(m, vreg, kind, val); - } else { - return GetVRegFromQuickCode(m, vreg, kind, val); - } + DCHECK(cur_oat_quick_method_header_->IsOptimized()); + return GetVRegFromOptimizedCode(m, vreg, kind, val); } else { DCHECK(cur_shadow_frame_ != nullptr); if (kind == kReferenceVReg) { @@ -290,29 +258,6 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* } } -bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind, - uint32_t* val) const { - DCHECK_EQ(m, GetMethod()); - const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); - QuickMethodFrameInfo frame_info = method_header->GetFrameInfo(); - const VmapTable vmap_table(method_header->GetVmapTable()); - uint32_t vmap_offset; - // TODO: IsInContext stops before spotting floating point registers. - if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) { - bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); - uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); - uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind); - return GetRegisterIfAccessible(reg, kind, val); - } else { - const DexFile::CodeItem* code_item = m->GetCodeItem(); - DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile - // its instructions? - *val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(), - frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg); - return true; - } -} - bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const { DCHECK_EQ(m, GetMethod()); @@ -432,11 +377,8 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, if (cur_quick_frame_ != nullptr) { DCHECK(context_ != nullptr); // You can't reliably read registers without a context. DCHECK(m == GetMethod()); - if (cur_oat_quick_method_header_->IsOptimized()) { - return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val); - } else { - return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val); - } + DCHECK(cur_oat_quick_method_header_->IsOptimized()); + return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val); } else { DCHECK(cur_shadow_frame_ != nullptr); *val = cur_shadow_frame_->GetVRegLong(vreg); @@ -444,33 +386,6 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, } } -bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, - VRegKind kind_hi, uint64_t* val) const { - DCHECK_EQ(m, GetMethod()); - const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); - QuickMethodFrameInfo frame_info = method_header->GetFrameInfo(); - const VmapTable vmap_table(method_header->GetVmapTable()); - uint32_t vmap_offset_lo, vmap_offset_hi; - // TODO: IsInContext stops before spotting floating point registers. - if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) && - vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) { - bool is_float = (kind_lo == kDoubleLoVReg); - uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); - uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo); - uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi); - return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val); - } else { - const DexFile::CodeItem* code_item = m->GetCodeItem(); - DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile - // its instructions? - uint32_t* addr = GetVRegAddrFromQuickCode( - cur_quick_frame_, code_item, frame_info.CoreSpillMask(), - frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg); - *val = *reinterpret_cast<uint64_t*>(addr); - return true; - } -} - bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const { diff --git a/runtime/stack.h b/runtime/stack.h index 3659560555..51f7d6368b 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -634,9 +634,6 @@ class StackVisitor { bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) SHARED_REQUIRES(Locks::mutator_lock_); - bool IsReferenceVReg(ArtMethod* m, uint16_t vreg) - SHARED_REQUIRES(Locks::mutator_lock_); - bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const SHARED_REQUIRES(Locks::mutator_lock_); @@ -798,9 +795,6 @@ class StackVisitor { bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const SHARED_REQUIRES(Locks::mutator_lock_); - bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind, - uint32_t* val) const - SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const SHARED_REQUIRES(Locks::mutator_lock_); @@ -808,9 +802,6 @@ class StackVisitor { bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const SHARED_REQUIRES(Locks::mutator_lock_); - bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, - VRegKind kind_hi, uint64_t* val) const - SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const diff --git a/runtime/thread.cc b/runtime/thread.cc index 3ecb04185f..57ccabc09c 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -42,7 +42,6 @@ #include "dex_file-inl.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" -#include "gc_map.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/heap_bitmap-inl.h" #include "gc/allocator/rosalloc.h" @@ -72,7 +71,6 @@ #include "utils.h" #include "verifier/method_verifier.h" #include "verify_object-inl.h" -#include "vmap_table.h" #include "well_known_classes.h" #include "interpreter/interpreter.h" @@ -2765,83 +2763,36 @@ class ReferenceMapVisitor : public StackVisitor { // Process register map (which native and runtime methods don't have) if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); - if (method_header->IsOptimized()) { - auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>( - reinterpret_cast<uintptr_t>(cur_quick_frame)); - uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); - CodeInfo code_info = method_header->GetOptimizedCodeInfo(); - CodeInfoEncoding encoding = code_info.ExtractEncoding(); - StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); - DCHECK(map.IsValid()); - // Visit stack entries that hold pointers. - size_t number_of_bits = map.GetNumberOfStackMaskBits(encoding.stack_map_encoding); - for (size_t i = 0; i < number_of_bits; ++i) { - if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) { - auto* ref_addr = vreg_base + i; - mirror::Object* ref = ref_addr->AsMirrorPtr(); - if (ref != nullptr) { - mirror::Object* new_ref = ref; - visitor_(&new_ref, -1, this); - if (ref != new_ref) { - ref_addr->Assign(new_ref); - } + DCHECK(method_header->IsOptimized()); + auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>( + reinterpret_cast<uintptr_t>(cur_quick_frame)); + uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); + CodeInfo code_info = method_header->GetOptimizedCodeInfo(); + CodeInfoEncoding encoding = code_info.ExtractEncoding(); + StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); + DCHECK(map.IsValid()); + // Visit stack entries that hold pointers. + size_t number_of_bits = map.GetNumberOfStackMaskBits(encoding.stack_map_encoding); + for (size_t i = 0; i < number_of_bits; ++i) { + if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) { + auto* ref_addr = vreg_base + i; + mirror::Object* ref = ref_addr->AsMirrorPtr(); + if (ref != nullptr) { + mirror::Object* new_ref = ref; + visitor_(&new_ref, -1, this); + if (ref != new_ref) { + ref_addr->Assign(new_ref); } } } - // Visit callee-save registers that hold pointers. - uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding); - for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { - if (register_mask & (1 << i)) { - mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); - if (*ref_addr != nullptr) { - visitor_(ref_addr, -1, this); - } - } - } - } else { - const uint8_t* native_gc_map = method_header->GetNativeGcMap(); - CHECK(native_gc_map != nullptr) << PrettyMethod(m); - const DexFile::CodeItem* code_item = m->GetCodeItem(); - // Can't be null or how would we compile its instructions? - DCHECK(code_item != nullptr) << PrettyMethod(m); - NativePcOffsetToReferenceMap map(native_gc_map); - size_t num_regs = map.RegWidth() * 8; - if (num_regs > 0) { - uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); - const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset); - DCHECK(reg_bitmap != nullptr); - const VmapTable vmap_table(method_header->GetVmapTable()); - QuickMethodFrameInfo frame_info = method_header->GetFrameInfo(); - // For all dex registers in the bitmap - DCHECK(cur_quick_frame != nullptr); - for (size_t reg = 0; reg < num_regs; ++reg) { - // Does this register hold a reference? - if (TestBitmap(reg, reg_bitmap)) { - uint32_t vmap_offset; - if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) { - int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset, - kReferenceVReg); - // This is sound as spilled GPRs will be word sized (ie 32 or 64bit). - mirror::Object** ref_addr = - reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg)); - if (*ref_addr != nullptr) { - visitor_(ref_addr, reg, this); - } - } else { - StackReference<mirror::Object>* ref_addr = - reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode( - cur_quick_frame, code_item, frame_info.CoreSpillMask(), - frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg)); - mirror::Object* ref = ref_addr->AsMirrorPtr(); - if (ref != nullptr) { - mirror::Object* new_ref = ref; - visitor_(&new_ref, reg, this); - if (ref != new_ref) { - ref_addr->Assign(new_ref); - } - } - } - } + } + // Visit callee-save registers that hold pointers. + uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding); + for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { + if (register_mask & (1 << i)) { + mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); + if (*ref_addr != nullptr) { + visitor_(ref_addr, -1, this); } } } diff --git a/runtime/vmap_table.h b/runtime/vmap_table.h deleted file mode 100644 index db9e1ea5cb..0000000000 --- a/runtime/vmap_table.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_VMAP_TABLE_H_ -#define ART_RUNTIME_VMAP_TABLE_H_ - -#include "base/logging.h" -#include "leb128.h" -#include "stack.h" - -namespace art { - -class VmapTable { - public: - // For efficient encoding of special values, entries are adjusted by 2. - static constexpr uint16_t kEntryAdjustment = 2u; - static constexpr uint16_t kAdjustedFpMarker = static_cast<uint16_t>(0xffffu + kEntryAdjustment); - - explicit VmapTable(const uint8_t* table) : table_(table) { - } - - // Look up nth entry, not called from performance critical code. - uint16_t operator[](size_t n) const { - const uint8_t* table = table_; - size_t size = DecodeUnsignedLeb128(&table); - CHECK_LT(n, size); - uint16_t adjusted_entry = DecodeUnsignedLeb128(&table); - for (size_t i = 0; i < n; ++i) { - adjusted_entry = DecodeUnsignedLeb128(&table); - } - return adjusted_entry - kEntryAdjustment; - } - - size_t Size() const { - const uint8_t* table = table_; - return DecodeUnsignedLeb128(&table); - } - - // Is the dex register 'vreg' in the context or on the stack? Should not be called when the - // 'kind' is unknown or constant. - bool IsInContext(size_t vreg, VRegKind kind, uint32_t* vmap_offset) const { - DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg || - kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg || - kind == kDoubleHiVReg || kind == kImpreciseConstant); - *vmap_offset = 0xEBAD0FF5; - // TODO: take advantage of the registers being ordered - // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values - // are never promoted to floating point registers. - bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); - bool in_floats = false; - const uint8_t* table = table_; - uint16_t adjusted_vreg = vreg + kEntryAdjustment; - size_t end = DecodeUnsignedLeb128(&table); - bool high_reg = (kind == kLongHiVReg) || (kind == kDoubleHiVReg); - bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64) || (kRuntimeISA == kMips64); - if (target64 && high_reg) { - // Wide promoted registers are associated with the sreg of the low portion. - adjusted_vreg--; - } - for (size_t i = 0; i < end; ++i) { - // Stop if we find what we are are looking for. - uint16_t adjusted_entry = DecodeUnsignedLeb128(&table); - if ((adjusted_entry == adjusted_vreg) && (in_floats == is_float)) { - *vmap_offset = i; - return true; - } - // 0xffff is the marker for LR (return PC on x86), following it are spilled float registers. - if (adjusted_entry == kAdjustedFpMarker) { - in_floats = true; - } - } - return false; - } - - // Compute the register number that corresponds to the entry in the vmap (vmap_offset, computed - // by IsInContext above). If the kind is floating point then the result will be a floating point - // register number, otherwise it will be an integer register number. - uint32_t ComputeRegister(uint32_t spill_mask, uint32_t vmap_offset, VRegKind kind) const { - // Compute the register we need to load from the context. - DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg || - kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg || - kind == kDoubleHiVReg || kind == kImpreciseConstant); - // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values - // are never promoted to floating point registers. - bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); - uint32_t matches = 0; - if (UNLIKELY(is_float)) { - const uint8_t* table = table_; - DecodeUnsignedLeb128(&table); // Skip size. - while (DecodeUnsignedLeb128(&table) != kAdjustedFpMarker) { - matches++; - } - matches++; - } - CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(POPCOUNT(spill_mask))); - uint32_t spill_shifts = 0; - while (matches != (vmap_offset + 1)) { - DCHECK_NE(spill_mask, 0u); - matches += spill_mask & 1; // Add 1 if the low bit is set - spill_mask >>= 1; - spill_shifts++; - } - spill_shifts--; // wind back one as we want the last match - return spill_shifts; - } - - private: - const uint8_t* const table_; -}; - -} // namespace art - -#endif // ART_RUNTIME_VMAP_TABLE_H_ diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc index 2d26fa1ac9..284e5544fb 100644 --- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc @@ -49,13 +49,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor { if (m_name.compare("f") == 0) { CHECK_REGS_CONTAIN_REFS(0x03U, true, 8); // v8: this CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x - if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) { - CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x - } CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x - if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) { - CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x - } CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x // v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See: // 0024: move-object v3, v2 @@ -68,15 +62,6 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor { CHECK_REGS_CONTAIN_REFS(0x14U, false, 2); // v2: y // Note that v0: ex can be eliminated because it's a dead merge of two different exceptions. CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex) - if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) { - // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex) - CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 4, 2, 1); - // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex) - CHECK_REGS_CONTAIN_REFS(0x1eU, true, 8, 4, 2, 1); - // v4 is removed from the root set because there is a "merge" operation. - // See 0016: if-nez v2, 0020. - CHECK_REGS_CONTAIN_REFS(0x20U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex) - } CHECK_REGS_CONTAIN_REFS(0x22U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex) if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) { |