Revert "Revert "Make dex2dex return a CompiledMethod after quickening.""
This reverts commit 327c5ed30a1f016ef3e1bb26ea7b4abd34eb63b9.
Change-Id: I0dc5d92e5d1ef98830fbd3c40ec59a93f9e0422d
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index d1acada..f33db09 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -27,8 +27,7 @@
}
void CompiledCode::SetCode(const ArrayRef<const uint8_t>* quick_code) {
- if (quick_code != nullptr) {
- CHECK(!quick_code->empty());
+ if (quick_code != nullptr && !quick_code->empty()) {
if (owns_code_array_) {
// If we are supposed to own the code, don't deduplicate it.
CHECK(quick_code_ == nullptr);
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index bd59046..4b56b69 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -18,6 +18,7 @@
#include "art_method-inl.h"
#include "base/logging.h"
#include "base/mutex.h"
+#include "compiled_method.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "driver/compiler_driver.h"
@@ -34,6 +35,13 @@
// Control check-cast elision.
const bool kEnableCheckCastEllision = true;
+struct QuickenedInfo {
+ QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {}
+
+ uint32_t dex_pc;
+ uint16_t dex_member_index;
+};
+
class DexCompiler {
public:
DexCompiler(art::CompilerDriver& compiler,
@@ -47,6 +55,10 @@
void Compile();
+ const std::vector<QuickenedInfo>& GetQuickenedInfo() const {
+ return quickened_info_;
+ }
+
private:
const DexFile& GetDexFile() const {
return *unit_.GetDexFile();
@@ -87,6 +99,11 @@
const DexCompilationUnit& unit_;
const DexToDexCompilationLevel dex_to_dex_compilation_level_;
+ // Filled by the compiler when quickening, in order to encode that information
+ // in the .oat file. The runtime will use that information to get to the original
+ // opcodes.
+ std::vector<QuickenedInfo> quickened_info_;
+
DISALLOW_COPY_AND_ASSIGN(DexCompiler);
};
@@ -248,6 +265,7 @@
inst->SetOpcode(new_opcode);
// Replace field index by field offset.
inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
+ quickened_info_.push_back(QuickenedInfo(dex_pc, field_idx));
}
}
@@ -287,24 +305,60 @@
} else {
inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
}
+ quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx));
}
}
}
-} // namespace optimizer
-} // namespace art
-
-extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item,
- uint32_t access_flags, art::InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const art::DexFile& dex_file,
- art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
- UNUSED(invoke_type);
+extern "C" CompiledMethod* ArtCompileDEX(
+ art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type ATTRIBUTE_UNUSED,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file,
+ art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
driver.GetVerifiedMethod(&dex_file, method_idx));
art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
dex_compiler.Compile();
+ if (dex_compiler.GetQuickenedInfo().empty()) {
+ // No need to create a CompiledMethod if there are no quickened opcodes.
+ return nullptr;
+ }
+
+ // Create a `CompiledMethod`, with the quickened information in the vmap table.
+ Leb128EncodingVector builder;
+ for (QuickenedInfo info : dex_compiler.GetQuickenedInfo()) {
+ builder.PushBackUnsigned(info.dex_pc);
+ builder.PushBackUnsigned(info.dex_member_index);
+ }
+ InstructionSet instruction_set = driver.GetInstructionSet();
+ if (instruction_set == kThumb2) {
+ // Don't use the thumb2 instruction set to avoid the one off code delta.
+ instruction_set = kArm;
+ }
+ return CompiledMethod::SwapAllocCompiledMethod(
+ &driver,
+ instruction_set,
+ ArrayRef<const uint8_t>(), // no code
+ 0,
+ 0,
+ 0,
+ nullptr, // src_mapping_table
+ ArrayRef<const uint8_t>(), // mapping_table
+ ArrayRef<const uint8_t>(builder.GetData()), // vmap_table
+ ArrayRef<const uint8_t>(), // gc_map
+ ArrayRef<const uint8_t>(), // cfi data
+ ArrayRef<const LinkerPatch>());
}
+ return nullptr;
}
+
+} // namespace optimizer
+
+} // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7890108..a52bfae 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2291,10 +2291,16 @@
// TODO: add a command-line option to disable DEX-to-DEX compilation ?
// Do not optimize if a VerifiedMethod is missing. SafeCast elision, for example, relies on
// it.
- (*dex_to_dex_compiler_)(*this, code_item, access_flags,
- invoke_type, class_def_idx,
- method_idx, class_loader, dex_file,
- has_verified_method ? dex_to_dex_compilation_level : kRequired);
+ compiled_method = (*dex_to_dex_compiler_)(
+ *this,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ has_verified_method ? dex_to_dex_compilation_level : kRequired);
}
}
if (kTimeCompileMethod) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2d7ceae..5cf4044 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -675,12 +675,13 @@
typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
- typedef void (*DexToDexCompilerFn)(CompilerDriver& driver,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint32_t class_dex_idx, uint32_t method_idx,
- jobject class_loader, const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level);
+ typedef CompiledMethod* (*DexToDexCompilerFn)(
+ CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags, InvokeType invoke_type,
+ uint32_t class_dex_idx, uint32_t method_idx,
+ jobject class_loader, const DexFile& dex_file,
+ DexToDexCompilationLevel dex_to_dex_compilation_level);
DexToDexCompilerFn dex_to_dex_compiler_;
void* compiler_context_;
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index c68bbc0..c10ffeb 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -249,16 +249,16 @@
// Find all addresses (low_pc) which contain deduped methods.
// The first instance of method is not marked deduped_, but the rest is.
std::unordered_set<uint32_t> deduped_addresses;
- for (auto it = method_infos.begin(); it != method_infos.end(); ++it) {
- if (it->deduped_) {
- deduped_addresses.insert(it->low_pc_);
+ for (const OatWriter::DebugInfo& mi : method_infos) {
+ if (mi.deduped_) {
+ deduped_addresses.insert(mi.low_pc_);
}
}
// Group the methods into compilation units based on source file.
std::vector<std::vector<const OatWriter::DebugInfo*>> compilation_units;
const char* last_source_file = nullptr;
- for (const auto& mi : method_infos) {
+ for (const OatWriter::DebugInfo& mi : method_infos) {
// Attribute given instruction range only to single method.
// Otherwise the debugger might get really confused.
if (!mi.deduped_) {
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 9d45ce2..62d8a69 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -199,7 +199,7 @@
const uint8_t* GetOatAddress(uint32_t offset) const {
// With Quick, code is within the OatFile, as there are all in one
// .o ELF object.
- DCHECK_LT(offset, oat_file_->Size());
+ DCHECK_LE(offset, oat_file_->Size());
DCHECK(oat_data_begin_ != nullptr);
return offset == 0u ? nullptr : oat_data_begin_ + offset;
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a98a304..a2551e5 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -374,23 +374,23 @@
uint32_t quick_code_offset = 0;
const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- CHECK(quick_code != nullptr);
- uint32_t code_size = quick_code->size() * sizeof(uint8_t);
- CHECK_NE(code_size, 0U);
+ uint32_t code_size = (quick_code == nullptr) ? 0 : quick_code->size() * sizeof(uint8_t);
uint32_t thumb_offset = compiled_method->CodeDelta();
// Deduplicate code arrays if we are not producing debuggable code.
bool deduped = false;
- if (debuggable_) {
- quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
- } else {
- auto lb = dedupe_map_.lower_bound(compiled_method);
- if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) {
- quick_code_offset = lb->second;
- deduped = true;
- } else {
+ if (code_size != 0) {
+ if (debuggable_) {
quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
- dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset);
+ } else {
+ auto lb = dedupe_map_.lower_bound(compiled_method);
+ if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) {
+ quick_code_offset = lb->second;
+ deduped = true;
+ } else {
+ quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
+ dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset);
+ }
}
}
@@ -411,21 +411,24 @@
OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
uint32_t mapping_table_offset = method_header->mapping_table_offset_;
uint32_t vmap_table_offset = method_header->vmap_table_offset_;
+ // If we don't have quick code, then we must have a vmap, as that is how the dex2dex
+ // compiler records its transformations.
+ DCHECK(quick_code != nullptr || vmap_table_offset != 0);
uint32_t gc_map_offset = method_header->gc_map_offset_;
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
uint32_t code_offset = quick_code_offset - thumb_offset;
- if (mapping_table_offset != 0u) {
+ if (mapping_table_offset != 0u && code_offset != 0u) {
mapping_table_offset += code_offset;
- DCHECK_LT(mapping_table_offset, code_offset);
+ DCHECK_LT(mapping_table_offset, code_offset) << "Overflow in oat offsets";
}
- if (vmap_table_offset != 0u) {
+ if (vmap_table_offset != 0u && code_offset != 0u) {
vmap_table_offset += code_offset;
- DCHECK_LT(vmap_table_offset, code_offset);
+ DCHECK_LT(vmap_table_offset, code_offset) << "Overflow in oat offsets";
}
- if (gc_map_offset != 0u) {
+ if (gc_map_offset != 0u && code_offset != 0u) {
gc_map_offset += code_offset;
- DCHECK_LT(gc_map_offset, code_offset);
+ DCHECK_LT(gc_map_offset, code_offset) << "Overflow in oat offsets";
}
uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
@@ -434,7 +437,7 @@
gc_map_offset, frame_size_in_bytes, core_spill_mask,
fp_spill_mask, code_size);
- if (!deduped) {
+ if (!deduped && (code_size != 0)) {
// Update offsets. (Checksum is updated when writing.)
offset_ += sizeof(*method_header); // Method header is prepended before code.
offset_ += code_size;
@@ -689,85 +692,86 @@
OutputStream* out = out_;
const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
+ // Need a wrapper if we create a copy for patching.
+ ArrayRef<const uint8_t> wrapped;
+ uint32_t code_size = 0;
if (quick_code != nullptr) {
- // Need a wrapper if we create a copy for patching.
- ArrayRef<const uint8_t> wrapped(*quick_code);
- uint32_t code_size = quick_code->size() * sizeof(uint8_t);
- CHECK_NE(code_size, 0U);
-
- // Deduplicate code arrays.
- const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_];
- if (method_offsets.code_offset_ >= offset_) {
- offset_ = writer_->relative_patcher_->WriteThunks(out, offset_);
- if (offset_ == 0u) {
- ReportWriteFailure("relative call thunk", it);
- return false;
- }
- uint32_t aligned_offset = compiled_method->AlignCode(offset_);
- uint32_t aligned_code_delta = aligned_offset - offset_;
- if (aligned_code_delta != 0) {
- if (!writer_->WriteCodeAlignment(out, aligned_code_delta)) {
- ReportWriteFailure("code alignment padding", it);
- return false;
- }
- offset_ += aligned_code_delta;
- DCHECK_OFFSET_();
- }
- DCHECK_ALIGNED_PARAM(offset_,
- GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
- DCHECK_EQ(method_offsets.code_offset_,
- offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta())
- << PrettyMethod(it.GetMemberIndex(), *dex_file_);
- const OatQuickMethodHeader& method_header =
- oat_class->method_headers_[method_offsets_index_];
- writer_->oat_header_->UpdateChecksum(&method_header, sizeof(method_header));
- if (!out->WriteFully(&method_header, sizeof(method_header))) {
- ReportWriteFailure("method header", it);
- return false;
- }
- writer_->size_method_header_ += sizeof(method_header);
- offset_ += sizeof(method_header);
- DCHECK_OFFSET_();
-
- if (!compiled_method->GetPatches().empty()) {
- patched_code_.assign(quick_code->begin(), quick_code->end());
- wrapped = ArrayRef<const uint8_t>(patched_code_);
- for (const LinkerPatch& patch : compiled_method->GetPatches()) {
- if (patch.Type() == kLinkerPatchCallRelative) {
- // NOTE: Relative calls across oat files are not supported.
- uint32_t target_offset = GetTargetOffset(patch);
- uint32_t literal_offset = patch.LiteralOffset();
- writer_->relative_patcher_->PatchCall(&patched_code_, literal_offset,
- offset_ + literal_offset, target_offset);
- } else if (patch.Type() == kLinkerPatchDexCacheArray) {
- uint32_t target_offset = GetDexCacheOffset(patch);
- uint32_t literal_offset = patch.LiteralOffset();
- writer_->relative_patcher_->PatchDexCacheReference(&patched_code_, patch,
- offset_ + literal_offset,
- target_offset);
- } else if (patch.Type() == kLinkerPatchCall) {
- uint32_t target_offset = GetTargetOffset(patch);
- PatchCodeAddress(&patched_code_, patch.LiteralOffset(), target_offset);
- } else if (patch.Type() == kLinkerPatchMethod) {
- ArtMethod* method = GetTargetMethod(patch);
- PatchMethodAddress(&patched_code_, patch.LiteralOffset(), method);
- } else if (patch.Type() == kLinkerPatchType) {
- mirror::Class* type = GetTargetType(patch);
- PatchObjectAddress(&patched_code_, patch.LiteralOffset(), type);
- }
- }
- }
-
- writer_->oat_header_->UpdateChecksum(wrapped.data(), code_size);
- if (!out->WriteFully(wrapped.data(), code_size)) {
- ReportWriteFailure("method code", it);
- return false;
- }
- writer_->size_code_ += code_size;
- offset_ += code_size;
- }
- DCHECK_OFFSET_();
+ wrapped = (*quick_code);
+ code_size = quick_code->size() * sizeof(uint8_t);
}
+
+ // Deduplicate code arrays.
+ const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_];
+ if (method_offsets.code_offset_ > offset_) {
+ offset_ = writer_->relative_patcher_->WriteThunks(out, offset_);
+ if (offset_ == 0u) {
+ ReportWriteFailure("relative call thunk", it);
+ return false;
+ }
+ uint32_t aligned_offset = compiled_method->AlignCode(offset_);
+ uint32_t aligned_code_delta = aligned_offset - offset_;
+ if (aligned_code_delta != 0) {
+ if (!writer_->WriteCodeAlignment(out, aligned_code_delta)) {
+ ReportWriteFailure("code alignment padding", it);
+ return false;
+ }
+ offset_ += aligned_code_delta;
+ DCHECK_OFFSET_();
+ }
+ DCHECK_ALIGNED_PARAM(offset_,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
+ DCHECK_EQ(method_offsets.code_offset_,
+ offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta())
+ << PrettyMethod(it.GetMemberIndex(), *dex_file_);
+ const OatQuickMethodHeader& method_header =
+ oat_class->method_headers_[method_offsets_index_];
+ writer_->oat_header_->UpdateChecksum(&method_header, sizeof(method_header));
+ if (!out->WriteFully(&method_header, sizeof(method_header))) {
+ ReportWriteFailure("method header", it);
+ return false;
+ }
+ writer_->size_method_header_ += sizeof(method_header);
+ offset_ += sizeof(method_header);
+ DCHECK_OFFSET_();
+
+ if (!compiled_method->GetPatches().empty()) {
+ patched_code_.assign(quick_code->begin(), quick_code->end());
+ wrapped = ArrayRef<const uint8_t>(patched_code_);
+ for (const LinkerPatch& patch : compiled_method->GetPatches()) {
+ if (patch.Type() == kLinkerPatchCallRelative) {
+ // NOTE: Relative calls across oat files are not supported.
+ uint32_t target_offset = GetTargetOffset(patch);
+ uint32_t literal_offset = patch.LiteralOffset();
+ writer_->relative_patcher_->PatchCall(&patched_code_, literal_offset,
+ offset_ + literal_offset, target_offset);
+ } else if (patch.Type() == kLinkerPatchDexCacheArray) {
+ uint32_t target_offset = GetDexCacheOffset(patch);
+ uint32_t literal_offset = patch.LiteralOffset();
+ writer_->relative_patcher_->PatchDexCacheReference(&patched_code_, patch,
+ offset_ + literal_offset,
+ target_offset);
+ } else if (patch.Type() == kLinkerPatchCall) {
+ uint32_t target_offset = GetTargetOffset(patch);
+ PatchCodeAddress(&patched_code_, patch.LiteralOffset(), target_offset);
+ } else if (patch.Type() == kLinkerPatchMethod) {
+ ArtMethod* method = GetTargetMethod(patch);
+ PatchMethodAddress(&patched_code_, patch.LiteralOffset(), method);
+ } else if (patch.Type() == kLinkerPatchType) {
+ mirror::Class* type = GetTargetType(patch);
+ PatchObjectAddress(&patched_code_, patch.LiteralOffset(), type);
+ }
+ }
+ }
+
+ writer_->oat_header_->UpdateChecksum(wrapped.data(), code_size);
+ if (!out->WriteFully(wrapped.data(), code_size)) {
+ ReportWriteFailure("method code", it);
+ return false;
+ }
+ writer_->size_code_ += code_size;
+ offset_ += code_size;
+ }
+ DCHECK_OFFSET_();
++method_offsets_index_;
}