summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/common_compiler_test.cc13
-rw-r--r--compiler/compiled_method.cc14
-rw-r--r--compiler/compiled_method.h14
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/driver/compiled_method_storage.cc14
-rw-r--r--compiler/driver/compiled_method_storage.h8
-rw-r--r--compiler/driver/compiled_method_storage_test.cc14
-rw-r--r--compiler/exception_test.cc4
-rw-r--r--compiler/jni/quick/jni_compiler.cc4
-rw-r--r--compiler/linker/relative_patcher_test.h2
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/oat_writer.cc153
-rw-r--r--compiler/oat_writer.h3
-rw-r--r--compiler/optimizing/code_generator.cc17
-rw-r--r--compiler/optimizing/code_generator.h6
-rw-r--r--compiler/optimizing/optimizing_compiler.cc34
-rw-r--r--compiler/optimizing/stack_map_stream.cc72
-rw-r--r--compiler/optimizing/stack_map_stream.h13
-rw-r--r--compiler/optimizing/stack_map_test.cc37
19 files changed, 329 insertions, 97 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 9a45379a05..8b3029261f 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -55,11 +55,17 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) {
// If the code size is 0 it means the method was skipped due to profile guided compilation.
if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0u) {
ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
- uint32_t code_size = code.size();
+ const uint32_t code_size = code.size();
ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
- uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+ const uint32_t vmap_table_offset = vmap_table.empty() ? 0u
: sizeof(OatQuickMethodHeader) + vmap_table.size();
+ // The method info is directly before the vmap table.
+ ArrayRef<const uint8_t> method_info = compiled_method->GetMethodInfo();
+ const uint32_t method_info_offset = method_info.empty() ? 0u
+ : vmap_table_offset + method_info.size();
+
OatQuickMethodHeader method_header(vmap_table_offset,
+ method_info_offset,
compiled_method->GetFrameSizeInBytes(),
compiled_method->GetCoreSpillMask(),
compiled_method->GetFpSpillMask(),
@@ -68,11 +74,12 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) {
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet());
- const size_t size = vmap_table.size() + sizeof(method_header) + code_size;
+ const size_t size = method_info.size() + vmap_table.size() + sizeof(method_header) + code_size;
chunk->reserve(size + max_padding);
chunk->resize(sizeof(method_header));
memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
+ chunk->insert(chunk->begin(), method_info.begin(), method_info.end());
chunk->insert(chunk->end(), code.begin(), code.end());
CHECK_EQ(chunk->size(), size);
const void* unaligned_code_ptr = chunk->data() + (size - code_size);
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f06d90c81c..0d9021fcfb 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -105,15 +105,15 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const ArrayRef<const SrcMapElem>& src_mapping_table,
+ const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches)
: CompiledCode(driver, instruction_set, quick_code),
- frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
+ frame_size_in_bytes_(frame_size_in_bytes),
+ core_spill_mask_(core_spill_mask),
fp_spill_mask_(fp_spill_mask),
- src_mapping_table_(
- driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)),
+ method_info_(driver->GetCompiledMethodStorage()->DeduplicateMethodInfo(method_info)),
vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
@@ -126,7 +126,7 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const ArrayRef<const SrcMapElem>& src_mapping_table,
+ const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches) {
@@ -139,7 +139,7 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
- src_mapping_table,
+ method_info,
vmap_table,
cfi_info, patches);
return ret;
@@ -156,7 +156,7 @@ CompiledMethod::~CompiledMethod() {
storage->ReleaseLinkerPatches(patches_);
storage->ReleaseCFIInfo(cfi_info_);
storage->ReleaseVMapTable(vmap_table_);
- storage->ReleaseSrcMappingTable(src_mapping_table_);
+ storage->ReleaseMethodInfo(method_info_);
}
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 00e2d62bff..23422570f0 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -420,7 +420,7 @@ class CompiledMethod FINAL : public CompiledCode {
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const ArrayRef<const SrcMapElem>& src_mapping_table,
+ const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches);
@@ -434,7 +434,7 @@ class CompiledMethod FINAL : public CompiledCode {
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const ArrayRef<const SrcMapElem>& src_mapping_table,
+ const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches);
@@ -453,8 +453,8 @@ class CompiledMethod FINAL : public CompiledCode {
return fp_spill_mask_;
}
- ArrayRef<const SrcMapElem> GetSrcMappingTable() const {
- return GetArray(src_mapping_table_);
+ ArrayRef<const uint8_t> GetMethodInfo() const {
+ return GetArray(method_info_);
}
ArrayRef<const uint8_t> GetVmapTable() const {
@@ -476,9 +476,9 @@ class CompiledMethod FINAL : public CompiledCode {
const uint32_t core_spill_mask_;
// For quick code, a bit mask describing spilled FPR callee-save registers.
const uint32_t fp_spill_mask_;
- // For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset.
- const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_;
- // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
+ // For quick code, method specific information that is not very dedupe friendly (method indices).
+ const LengthPrefixedArray<uint8_t>* const method_info_;
+ // For quick code, holds code infos which contain stack maps, inline information, and etc.
const LengthPrefixedArray<uint8_t>* const vmap_table_;
// For quick code, a FDE entry for the debug_frame section.
const LengthPrefixedArray<uint8_t>* const cfi_info_;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 76aeaa55d7..808e28c9ea 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -370,7 +370,7 @@ CompiledMethod* ArtCompileDEX(
0,
0,
0,
- ArrayRef<const SrcMapElem>(), // src_mapping_table
+ ArrayRef<const uint8_t>(), // method_info
ArrayRef<const uint8_t>(builder.GetData()), // vmap_table
ArrayRef<const uint8_t>(), // cfi data
ArrayRef<const LinkerPatch>());
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index a0a8f81c1f..e6a47ba60f 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -172,8 +172,8 @@ CompiledMethodStorage::CompiledMethodStorage(int swap_fd)
: swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
dedupe_enabled_(true),
dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
- dedupe_src_mapping_table_("dedupe source mapping table",
- LengthPrefixedArrayAlloc<SrcMapElem>(swap_space_.get())),
+ dedupe_method_info_("dedupe method info",
+ LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_vmap_table_("dedupe vmap table",
LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
@@ -207,13 +207,13 @@ void CompiledMethodStorage::ReleaseCode(const LengthPrefixedArray<uint8_t>* code
ReleaseArrayIfNotDeduplicated(code);
}
-const LengthPrefixedArray<SrcMapElem>* CompiledMethodStorage::DeduplicateSrcMappingTable(
- const ArrayRef<const SrcMapElem>& src_map) {
- return AllocateOrDeduplicateArray(src_map, &dedupe_src_mapping_table_);
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMethodInfo(
+ const ArrayRef<const uint8_t>& src_map) {
+ return AllocateOrDeduplicateArray(src_map, &dedupe_method_info_);
}
-void CompiledMethodStorage::ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map) {
- ReleaseArrayIfNotDeduplicated(src_map);
+void CompiledMethodStorage::ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info) {
+ ReleaseArrayIfNotDeduplicated(method_info);
}
const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable(
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index 124b5a6e25..b833702352 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -52,9 +52,9 @@ class CompiledMethodStorage {
const LengthPrefixedArray<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code);
void ReleaseCode(const LengthPrefixedArray<uint8_t>* code);
- const LengthPrefixedArray<SrcMapElem>* DeduplicateSrcMappingTable(
- const ArrayRef<const SrcMapElem>& src_map);
- void ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map);
+ const LengthPrefixedArray<uint8_t>* DeduplicateMethodInfo(
+ const ArrayRef<const uint8_t>& method_info);
+ void ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info);
const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table);
void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table);
@@ -96,7 +96,7 @@ class CompiledMethodStorage {
bool dedupe_enabled_;
ArrayDedupeSet<uint8_t> dedupe_code_;
- ArrayDedupeSet<SrcMapElem> dedupe_src_mapping_table_;
+ ArrayDedupeSet<uint8_t> dedupe_method_info_;
ArrayDedupeSet<uint8_t> dedupe_vmap_table_;
ArrayDedupeSet<uint8_t> dedupe_cfi_info_;
ArrayDedupeSet<LinkerPatch> dedupe_linker_patches_;
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index b72d0acb8e..6572d170e6 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -51,11 +51,11 @@ TEST(CompiledMethodStorage, Deduplicate) {
ArrayRef<const uint8_t>(raw_code1),
ArrayRef<const uint8_t>(raw_code2),
};
- const SrcMapElem raw_src_map1[] = { { 1u, 2u }, { 3u, 4u }, { 5u, 6u } };
- const SrcMapElem raw_src_map2[] = { { 8u, 7u }, { 6u, 5u }, { 4u, 3u }, { 2u, 1u } };
- ArrayRef<const SrcMapElem> src_map[] = {
- ArrayRef<const SrcMapElem>(raw_src_map1),
- ArrayRef<const SrcMapElem>(raw_src_map2),
+ const uint8_t raw_method_info_map1[] = { 1u, 2u, 3u, 4u, 5u, 6u };
+ const uint8_t raw_method_info_map2[] = { 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u };
+ ArrayRef<const uint8_t> method_info[] = {
+ ArrayRef<const uint8_t>(raw_method_info_map1),
+ ArrayRef<const uint8_t>(raw_method_info_map2),
};
const uint8_t raw_vmap_table1[] = { 2, 4, 6 };
const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 };
@@ -85,7 +85,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
std::vector<CompiledMethod*> compiled_methods;
compiled_methods.reserve(1u << 7);
for (auto&& c : code) {
- for (auto&& s : src_map) {
+ for (auto&& s : method_info) {
for (auto&& v : vmap_table) {
for (auto&& f : cfi_info) {
for (auto&& p : patches) {
@@ -113,7 +113,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
bool same_patches = ((i ^ j) & patches_bit) == 0u;
ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data())
<< i << " " << j;
- ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data())
+ ASSERT_EQ(same_src_map, lhs->GetMethodInfo().data() == rhs->GetMethodInfo().data())
<< i << " " << j;
ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data())
<< i << " " << j;
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index eac46e5909..c975944a04 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -74,8 +74,8 @@ class ExceptionTest : public CommonRuntimeTest {
fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size());
MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
- stack_maps.FillIn(stack_maps_region);
- OatQuickMethodHeader method_header(stack_maps_offset, 4 * sizeof(void*), 0u, 0u, code_size);
+ stack_maps.FillInCodeInfo(stack_maps_region);
+ OatQuickMethodHeader method_header(stack_maps_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size);
memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header));
std::copy(fake_code_.begin(),
fake_code_.end(),
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 3bd290da17..68ec7bd860 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -660,8 +660,8 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
frame_size,
main_jni_conv->CoreSpillMask(),
main_jni_conv->FpSpillMask(),
- ArrayRef<const SrcMapElem>(),
- ArrayRef<const uint8_t>(), // vmap_table.
+ /* method_info */ ArrayRef<const uint8_t>(),
+ /* vmap_table */ ArrayRef<const uint8_t>(),
ArrayRef<const uint8_t>(*jni_asm->cfi().data()),
ArrayRef<const LinkerPatch>());
}
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 233daf4a39..908cb412bf 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -87,7 +87,7 @@ class RelativePatcherTest : public testing::Test {
/* frame_size_in_bytes */ 0u,
/* core_spill_mask */ 0u,
/* fp_spill_mask */ 0u,
- /* src_mapping_table */ ArrayRef<const SrcMapElem>(),
+ /* method_info */ ArrayRef<const uint8_t>(),
/* vmap_table */ ArrayRef<const uint8_t>(),
/* cfi_info */ ArrayRef<const uint8_t>(),
patches));
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 97b13746fc..ead41240c2 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -485,7 +485,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
// it is time to update OatHeader::kOatVersion
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
- EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
+ EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(161 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index afcdf5ea17..5406ae72d1 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -326,6 +326,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo
size_relative_call_thunks_(0),
size_misc_thunks_(0),
size_vmap_table_(0),
+ size_method_info_(0),
size_oat_dex_file_location_size_(0),
size_oat_dex_file_location_data_(0),
size_oat_dex_file_location_checksum_(0),
@@ -809,6 +810,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
uint32_t vmap_table_offset = method_header->GetVmapTableOffset();
+ uint32_t method_info_offset = method_header->GetMethodInfoOffset();
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
uint32_t code_offset = quick_code_offset - thumb_offset;
@@ -819,13 +821,18 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
vmap_table_offset += code_offset;
DCHECK_LT(vmap_table_offset, code_offset);
}
+ if (method_info_offset != 0u) {
+ method_info_offset += code_offset;
+ DCHECK_LT(method_info_offset, code_offset);
+ }
} else {
+ CHECK(compiled_method->GetMethodInfo().empty());
if (kIsVdexEnabled) {
// We write the offset in the .vdex file.
DCHECK_EQ(vmap_table_offset, 0u);
vmap_table_offset = current_quickening_info_offset_;
- ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
- current_quickening_info_offset_ += map.size() * sizeof(map.front());
+ ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
+ current_quickening_info_offset_ += vmap_table.size() * sizeof(vmap_table.front());
} else {
// We write the offset of the quickening info relative to the code.
vmap_table_offset += code_offset;
@@ -836,6 +843,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
*method_header = OatQuickMethodHeader(vmap_table_offset,
+ method_info_offset,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
@@ -909,6 +917,9 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) {
return lhs->GetVmapTable().data() < rhs->GetVmapTable().data();
}
+ if (UNLIKELY(lhs->GetMethodInfo().data() != rhs->GetMethodInfo().data())) {
+ return lhs->GetMethodInfo().data() < rhs->GetMethodInfo().data();
+ }
if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) {
return lhs->GetPatches().data() < rhs->GetPatches().data();
}
@@ -983,6 +994,44 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
SafeMap<const uint8_t*, uint32_t> dedupe_map_;
};
+class OatWriter::InitMethodInfoVisitor : public OatDexMethodVisitor {
+ public:
+ InitMethodInfoVisitor(OatWriter* writer, size_t offset) : OatDexMethodVisitor(writer, offset) {}
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
+ CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+ if (compiled_method != nullptr) {
+ DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
+ DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset(), 0u);
+ ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo();
+ const uint32_t map_size = map.size() * sizeof(map[0]);
+ if (map_size != 0u) {
+ size_t offset = dedupe_map_.GetOrCreate(
+ map.data(),
+ [this, map_size]() {
+ uint32_t new_offset = offset_;
+ offset_ += map_size;
+ return new_offset;
+ });
+ // Code offset is not initialized yet, so set the map offset to 0u-offset.
+ DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
+ oat_class->method_headers_[method_offsets_index_].SetMethodInfoOffset(0u - offset);
+ }
+ ++method_offsets_index_;
+ }
+
+ return true;
+ }
+
+ private:
+ // Deduplication is already done on a pointer basis by the compiler driver,
+ // so we can simply compare the pointers to find out if things are duplicated.
+ SafeMap<const uint8_t*, uint32_t> dedupe_map_;
+};
+
class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
public:
InitImageMethodVisitor(OatWriter* writer, size_t offset)
@@ -1434,7 +1483,7 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != nullptr) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // i.e. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -1483,6 +1532,63 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
}
};
+class OatWriter::WriteMethodInfoVisitor : public OatDexMethodVisitor {
+ public:
+ WriteMethodInfoVisitor(OatWriter* writer,
+ OutputStream* out,
+ const size_t file_offset,
+ size_t relative_offset)
+ : OatDexMethodVisitor(writer, relative_offset),
+ out_(out),
+ file_offset_(file_offset) {}
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
+ const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+ if (compiled_method != nullptr) { // i.e. not an abstract method
+ size_t file_offset = file_offset_;
+ OutputStream* out = out_;
+ uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset();
+ uint32_t code_offset = oat_class->method_offsets_[method_offsets_index_].code_offset_;
+ ++method_offsets_index_;
+ DCHECK((compiled_method->GetMethodInfo().size() == 0u && map_offset == 0u) ||
+ (compiled_method->GetMethodInfo().size() != 0u && map_offset != 0u))
+ << compiled_method->GetMethodInfo().size() << " " << map_offset << " "
+ << dex_file_->PrettyMethod(it.GetMemberIndex());
+ if (map_offset != 0u) {
+ // Transform map_offset to actual oat data offset.
+ map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset;
+ DCHECK_NE(map_offset, 0u);
+ DCHECK_LE(map_offset, offset_) << dex_file_->PrettyMethod(it.GetMemberIndex());
+
+ ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo();
+ size_t map_size = map.size() * sizeof(map[0]);
+ if (map_offset == offset_) {
+ // Write deduplicated map (code info for Optimizing or transformation info for dex2dex).
+ if (UNLIKELY(!out->WriteFully(map.data(), map_size))) {
+ ReportWriteFailure(it);
+ return false;
+ }
+ offset_ += map_size;
+ }
+ }
+ DCHECK_OFFSET_();
+ }
+
+ return true;
+ }
+
+ private:
+ OutputStream* const out_;
+ size_t const file_offset_;
+
+ void ReportWriteFailure(const ClassDataItemIterator& it) {
+ PLOG(ERROR) << "Failed to write map for "
+ << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to " << out_->GetLocation();
+ }
+};
+
// Visit all methods from all classes in all dex files with the specified visitor.
bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
for (const DexFile* dex_file : *dex_files_) {
@@ -1576,11 +1682,18 @@ size_t OatWriter::InitOatMaps(size_t offset) {
if (!compiler_driver_->GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
return offset;
}
- InitMapMethodVisitor visitor(this, offset);
- bool success = VisitDexMethods(&visitor);
- DCHECK(success);
- offset = visitor.GetOffset();
-
+ {
+ InitMapMethodVisitor visitor(this, offset);
+ bool success = VisitDexMethods(&visitor);
+ DCHECK(success);
+ offset = visitor.GetOffset();
+ }
+ {
+ InitMethodInfoVisitor visitor(this, offset);
+ bool success = VisitDexMethods(&visitor);
+ DCHECK(success);
+ offset = visitor.GetOffset();
+ }
return offset;
}
@@ -1920,6 +2033,7 @@ bool OatWriter::WriteCode(OutputStream* out) {
DO_STAT(size_relative_call_thunks_);
DO_STAT(size_misc_thunks_);
DO_STAT(size_vmap_table_);
+ DO_STAT(size_method_info_);
DO_STAT(size_oat_dex_file_location_size_);
DO_STAT(size_oat_dex_file_location_data_);
DO_STAT(size_oat_dex_file_location_checksum_);
@@ -2035,13 +2149,24 @@ bool OatWriter::WriteClasses(OutputStream* out) {
}
size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) {
- size_t vmap_tables_offset = relative_offset;
- WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset);
- if (UNLIKELY(!VisitDexMethods(&visitor))) {
- return 0;
+ {
+ size_t vmap_tables_offset = relative_offset;
+ WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset);
+ if (UNLIKELY(!VisitDexMethods(&visitor))) {
+ return 0;
+ }
+ relative_offset = visitor.GetOffset();
+ size_vmap_table_ = relative_offset - vmap_tables_offset;
+ }
+ {
+ size_t method_infos_offset = relative_offset;
+ WriteMethodInfoVisitor visitor(this, out, file_offset, relative_offset);
+ if (UNLIKELY(!VisitDexMethods(&visitor))) {
+ return 0;
+ }
+ relative_offset = visitor.GetOffset();
+ size_method_info_ = relative_offset - method_infos_offset;
}
- relative_offset = visitor.GetOffset();
- size_vmap_table_ = relative_offset - vmap_tables_offset;
return relative_offset;
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 511371480a..e778f75551 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -254,9 +254,11 @@ class OatWriter {
class InitOatClassesMethodVisitor;
class InitCodeMethodVisitor;
class InitMapMethodVisitor;
+ class InitMethodInfoVisitor;
class InitImageMethodVisitor;
class WriteCodeMethodVisitor;
class WriteMapMethodVisitor;
+ class WriteMethodInfoVisitor;
class WriteQuickeningInfoMethodVisitor;
// Visit all the methods in all the compiled dex files in their definition order
@@ -425,6 +427,7 @@ class OatWriter {
uint32_t size_relative_call_thunks_;
uint32_t size_misc_thunks_;
uint32_t size_vmap_table_;
+ uint32_t size_method_info_;
uint32_t size_oat_dex_file_location_size_;
uint32_t size_oat_dex_file_location_data_;
uint32_t size_oat_dex_file_location_checksum_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 424b8507fb..b7c80756b0 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -654,8 +654,12 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
}
}
-size_t CodeGenerator::ComputeStackMapsSize() {
- return stack_map_stream_.PrepareForFillIn();
+void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
+ size_t* method_info_size) {
+ DCHECK(stack_map_size != nullptr);
+ DCHECK(method_info_size != nullptr);
+ *stack_map_size = stack_map_stream_.PrepareForFillIn();
+ *method_info_size = stack_map_stream_.ComputeMethodInfoSize();
}
static void CheckCovers(uint32_t dex_pc,
@@ -723,10 +727,13 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
}
-void CodeGenerator::BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item) {
- stack_map_stream_.FillIn(region);
+void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
+ MemoryRegion method_info_region,
+ const DexFile::CodeItem& code_item) {
+ stack_map_stream_.FillInCodeInfo(stack_map_region);
+ stack_map_stream_.FillInMethodInfo(method_info_region);
if (kIsDebugBuild) {
- CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(region), code_item);
+ CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item);
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b912672792..ea463eeb62 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -341,8 +341,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
}
- void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
- size_t ComputeStackMapsSize();
+ void BuildStackMaps(MemoryRegion stack_map_region,
+ MemoryRegion method_info_region,
+ const DexFile::CodeItem& code_item);
+ void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size);
size_t GetNumberOfJitRoots() const {
return jit_string_roots_.size() + jit_class_roots_.size();
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d6153b091c..8b1b04b462 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2014 The Android Open Source Project
*
@@ -856,8 +857,15 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
const DexFile::CodeItem* code_item) const {
ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
- stack_map.resize(codegen->ComputeStackMapsSize());
- codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item);
+ ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps));
+ size_t stack_map_size = 0;
+ size_t method_info_size = 0;
+ codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
+ stack_map.resize(stack_map_size);
+ method_info.resize(method_info_size);
+ codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()),
+ MemoryRegion(method_info.data(), method_info.size()),
+ *code_item);
CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
compiler_driver,
@@ -869,7 +877,7 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
- ArrayRef<const SrcMapElem>(),
+ ArrayRef<const uint8_t>(method_info),
ArrayRef<const uint8_t>(stack_map),
ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
ArrayRef<const LinkerPatch>(linker_patches));
@@ -1200,7 +1208,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
}
- size_t stack_map_size = codegen->ComputeStackMapsSize();
+ size_t stack_map_size = 0;
+ size_t method_info_size = 0;
+ codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
size_t number_of_roots = codegen->GetNumberOfJitRoots();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
@@ -1216,20 +1226,30 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return false;
}
uint8_t* stack_map_data = nullptr;
+ uint8_t* method_info_data = nullptr;
uint8_t* roots_data = nullptr;
- uint32_t data_size = code_cache->ReserveData(
- self, stack_map_size, number_of_roots, method, &stack_map_data, &roots_data);
+ uint32_t data_size = code_cache->ReserveData(self,
+ stack_map_size,
+ method_info_size,
+ number_of_roots,
+ method,
+ &stack_map_data,
+ &method_info_data,
+ &roots_data);
if (stack_map_data == nullptr || roots_data == nullptr) {
return false;
}
MaybeRecordStat(MethodCompilationStat::kCompiled);
- codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
+ codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size),
+ MemoryRegion(method_info_data, method_info_size),
+ *code_item);
codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
const void* code = code_cache->CommitCode(
self,
method,
stack_map_data,
+ method_info_data,
roots_data,
codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 4d12ad6eb6..b7840d73db 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -152,6 +152,9 @@ size_t StackMapStream::PrepareForFillIn() {
encoding.location_catalog.num_entries = location_catalog_entries_.size();
encoding.location_catalog.num_bytes = ComputeDexRegisterLocationCatalogSize();
encoding.inline_info.num_entries = inline_infos_.size();
+ // Must be done before calling ComputeInlineInfoEncoding since ComputeInlineInfoEncoding requires
+ // dex_method_index_idx to be filled in.
+ PrepareMethodIndices();
ComputeInlineInfoEncoding(&encoding.inline_info.encoding,
encoding.dex_register_map.num_bytes);
CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
@@ -245,7 +248,7 @@ void StackMapStream::ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
for (size_t j = 0; j < entry.inlining_depth; ++j) {
InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
if (inline_entry.method == nullptr) {
- method_index_max = std::max(method_index_max, inline_entry.method_index);
+ method_index_max = std::max(method_index_max, inline_entry.dex_method_index_idx);
extra_data_max = std::max(extra_data_max, 1u);
} else {
method_index_max = std::max(
@@ -288,7 +291,25 @@ size_t StackMapStream::MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
return entry.offset;
}
-void StackMapStream::FillIn(MemoryRegion region) {
+void StackMapStream::FillInMethodInfo(MemoryRegion region) {
+ {
+ MethodInfo info(region.begin(), method_indices_.size());
+ for (size_t i = 0; i < method_indices_.size(); ++i) {
+ info.SetMethodIndex(i, method_indices_[i]);
+ }
+ }
+ if (kIsDebugBuild) {
+ // Check the data matches.
+ MethodInfo info(region.begin());
+ const size_t count = info.NumMethodIndices();
+ DCHECK_EQ(count, method_indices_.size());
+ for (size_t i = 0; i < count; ++i) {
+ DCHECK_EQ(info.GetMethodIndex(i), method_indices_[i]);
+ }
+ }
+}
+
+void StackMapStream::FillInCodeInfo(MemoryRegion region) {
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
@@ -345,7 +366,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
InvokeInfo invoke_info(code_info.GetInvokeInfo(encoding, invoke_info_idx));
invoke_info.SetNativePcCodeOffset(encoding.invoke_info.encoding, entry.native_pc_code_offset);
invoke_info.SetInvokeType(encoding.invoke_info.encoding, entry.invoke_type);
- invoke_info.SetMethodIndex(encoding.invoke_info.encoding, entry.dex_method_index);
+ invoke_info.SetMethodIndexIdx(encoding.invoke_info.encoding, entry.dex_method_index_idx);
++invoke_info_idx;
}
@@ -364,7 +385,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
if (inline_entry.method != nullptr) {
- inline_info.SetMethodIndexAtDepth(
+ inline_info.SetMethodIndexIdxAtDepth(
encoding.inline_info.encoding,
depth,
High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
@@ -373,9 +394,9 @@ void StackMapStream::FillIn(MemoryRegion region) {
depth,
Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
} else {
- inline_info.SetMethodIndexAtDepth(encoding.inline_info.encoding,
- depth,
- inline_entry.method_index);
+ inline_info.SetMethodIndexIdxAtDepth(encoding.inline_info.encoding,
+ depth,
+ inline_entry.dex_method_index_idx);
inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
}
inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
@@ -533,6 +554,29 @@ size_t StackMapStream::PrepareRegisterMasks() {
return dedupe.size();
}
+void StackMapStream::PrepareMethodIndices() {
+ CHECK(method_indices_.empty());
+ method_indices_.resize(stack_maps_.size() + inline_infos_.size());
+ ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+ for (StackMapEntry& stack_map : stack_maps_) {
+ const size_t index = dedupe.size();
+ const uint32_t method_index = stack_map.dex_method_index;
+ if (method_index != DexFile::kDexNoIndex) {
+ stack_map.dex_method_index_idx = dedupe.emplace(method_index, index).first->second;
+ method_indices_[index] = method_index;
+ }
+ }
+ for (InlineInfoEntry& inline_info : inline_infos_) {
+ const size_t index = dedupe.size();
+ const uint32_t method_index = inline_info.method_index;
+ CHECK_NE(method_index, DexFile::kDexNoIndex);
+ inline_info.dex_method_index_idx = dedupe.emplace(method_index, index).first->second;
+ method_indices_[index] = method_index;
+ }
+ method_indices_.resize(dedupe.size());
+}
+
+
size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) {
// Preallocate memory since we do not want it to move (the dedup map will point into it).
const size_t byte_entry_size = RoundUp(entry_size_in_bits, kBitsPerByte) / kBitsPerByte;
@@ -590,7 +634,8 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
DCHECK_EQ(invoke_info.GetNativePcOffset(encoding.invoke_info.encoding, instruction_set_),
entry.native_pc_code_offset.Uint32Value(instruction_set_));
DCHECK_EQ(invoke_info.GetInvokeType(encoding.invoke_info.encoding), entry.invoke_type);
- DCHECK_EQ(invoke_info.GetMethodIndex(encoding.invoke_info.encoding), entry.dex_method_index);
+ DCHECK_EQ(invoke_info.GetMethodIndexIdx(encoding.invoke_info.encoding),
+ entry.dex_method_index_idx);
invoke_info_index++;
}
CheckDexRegisterMap(code_info,
@@ -615,8 +660,10 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info.encoding, d),
inline_entry.method);
} else {
- DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info.encoding, d),
- inline_entry.method_index);
+ const size_t method_index_idx =
+ inline_info.GetMethodIndexIdxAtDepth(encoding.inline_info.encoding, d);
+ DCHECK_EQ(method_index_idx, inline_entry.dex_method_index_idx);
+ DCHECK_EQ(method_indices_[method_index_idx], inline_entry.method_index);
}
CheckDexRegisterMap(code_info,
@@ -633,4 +680,9 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
}
}
+size_t StackMapStream::ComputeMethodInfoSize() const {
+ DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before " << __FUNCTION__;
+ return MethodInfo::ComputeSize(method_indices_.size());
+}
+
} // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 4225a875b9..e6471e1bc5 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -22,6 +22,7 @@
#include "base/hash_map.h"
#include "base/value_object.h"
#include "memory_region.h"
+#include "method_info.h"
#include "nodes.h"
#include "stack_map.h"
@@ -70,6 +71,7 @@ class StackMapStream : public ValueObject {
inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
register_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
+ method_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_mask_max_(-1),
dex_pc_max_(0),
@@ -120,6 +122,7 @@ class StackMapStream : public ValueObject {
size_t dex_register_map_index;
InvokeType invoke_type;
uint32_t dex_method_index;
+ uint32_t dex_method_index_idx; // Index into dex method index table.
};
struct InlineInfoEntry {
@@ -128,6 +131,7 @@ class StackMapStream : public ValueObject {
uint32_t method_index;
DexRegisterMapEntry dex_register_entry;
size_t dex_register_map_index;
+ uint32_t dex_method_index_idx; // Index into the dex method index table.
};
void BeginStackMapEntry(uint32_t dex_pc,
@@ -164,7 +168,10 @@ class StackMapStream : public ValueObject {
// Prepares the stream to fill in a memory region. Must be called before FillIn.
// Returns the size (in bytes) needed to store this stream.
size_t PrepareForFillIn();
- void FillIn(MemoryRegion region);
+ void FillInCodeInfo(MemoryRegion region);
+ void FillInMethodInfo(MemoryRegion region);
+
+ size_t ComputeMethodInfoSize() const;
private:
size_t ComputeDexRegisterLocationCatalogSize() const;
@@ -180,6 +187,9 @@ class StackMapStream : public ValueObject {
// Returns the number of unique register masks.
size_t PrepareRegisterMasks();
+ // Prepare and deduplicate method indices.
+ void PrepareMethodIndices();
+
// Deduplicate entry if possible and return the corresponding index into dex_register_entries_
// array. If entry is not a duplicate, a new entry is added to dex_register_entries_.
size_t AddDexRegisterMapEntry(const DexRegisterMapEntry& entry);
@@ -232,6 +242,7 @@ class StackMapStream : public ValueObject {
ArenaVector<InlineInfoEntry> inline_infos_;
ArenaVector<uint8_t> stack_masks_;
ArenaVector<uint32_t> register_masks_;
+ ArenaVector<uint32_t> method_indices_;
ArenaVector<DexRegisterMapEntry> dex_register_entries_;
int stack_mask_max_;
uint32_t dex_pc_max_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 330f7f28b6..a842c6e452 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -60,7 +60,7 @@ TEST(StackMapTest, Test1) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -173,7 +173,7 @@ TEST(StackMapTest, Test2) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -433,7 +433,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -519,7 +519,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -611,7 +611,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -672,7 +672,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo ci(region);
CodeInfoEncoding encoding = ci.ExtractEncoding();
@@ -721,7 +721,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -823,7 +823,7 @@ TEST(StackMapTest, InlineTest) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo ci(region);
CodeInfoEncoding encoding = ci.ExtractEncoding();
@@ -950,7 +950,7 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
- stream.FillIn(region);
+ stream.FillInCodeInfo(region);
CodeInfo code_info(region);
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -979,11 +979,16 @@ TEST(StackMapTest, TestInvokeInfo) {
stream.AddInvoke(kDirect, 65535);
stream.EndStackMapEntry();
- const size_t size = stream.PrepareForFillIn();
- MemoryRegion region(arena.Alloc(size, kArenaAllocMisc), size);
- stream.FillIn(region);
+ const size_t code_info_size = stream.PrepareForFillIn();
+ MemoryRegion code_info_region(arena.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
+ stream.FillInCodeInfo(code_info_region);
- CodeInfo code_info(region);
+ const size_t method_info_size = stream.ComputeMethodInfoSize();
+ MemoryRegion method_info_region(arena.Alloc(method_info_size, kArenaAllocMisc), method_info_size);
+ stream.FillInMethodInfo(method_info_region);
+
+ CodeInfo code_info(code_info_region);
+ MethodInfo method_info(method_info_region.begin());
CodeInfoEncoding encoding = code_info.ExtractEncoding();
ASSERT_EQ(3u, code_info.GetNumberOfStackMaps(encoding));
@@ -996,13 +1001,13 @@ TEST(StackMapTest, TestInvokeInfo) {
EXPECT_TRUE(invoke2.IsValid());
EXPECT_TRUE(invoke3.IsValid());
EXPECT_EQ(invoke1.GetInvokeType(encoding.invoke_info.encoding), kSuper);
- EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding), 1u);
+ EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding, method_info), 1u);
EXPECT_EQ(invoke1.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 4u);
EXPECT_EQ(invoke2.GetInvokeType(encoding.invoke_info.encoding), kStatic);
- EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding), 3u);
+ EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding, method_info), 3u);
EXPECT_EQ(invoke2.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 8u);
EXPECT_EQ(invoke3.GetInvokeType(encoding.invoke_info.encoding), kDirect);
- EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding), 65535u);
+ EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding, method_info), 65535u);
EXPECT_EQ(invoke3.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 16u);
}