Merge changes Iae3a933e,I08ff5d6e
* changes:
ART: Make IndirectReferenceTable resizable
ART: Change IndirectReferenceTable
diff --git a/Android.mk b/Android.mk
index 2647268..b2716cd 100644
--- a/Android.mk
+++ b/Android.mk
@@ -568,3 +568,11 @@
# m art-boot-image ART_BOOT_IMAGE_EXTRA_ARGS=--dump-init-failures=fails.txt
.PHONY: art-boot-image
art-boot-image: $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME)
+
+.PHONY: art-job-images
+art-job-images: \
+ $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \
+ $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \
+ $(HOST_OUT_EXECUTABLES)/dex2oats \
+ $(HOST_OUT_EXECUTABLES)/dex2oatds \
+ $(HOST_OUT_EXECUTABLES)/profman
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 9e94b9d..8fdf6fc 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -260,7 +260,8 @@
OatWriter* const oat_writer = oat_writers[i].get();
ElfWriter* const elf_writer = elf_writers[i].get();
std::vector<const DexFile*> cur_dex_files(1u, class_path[i]);
- oat_writer->PrepareLayout(driver, writer.get(), cur_dex_files, &patcher);
+ oat_writer->Initialize(driver, writer.get(), cur_dex_files);
+ oat_writer->PrepareLayout(&patcher);
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
elf_writer->PrepareDynamicSection(rodata_size,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index ffeff76..fd1b135 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -203,7 +203,8 @@
}
linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
instruction_set_features_.get());
- oat_writer.PrepareLayout(compiler_driver_.get(), nullptr, dex_files, &patcher);
+ oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files);
+ oat_writer.PrepareLayout(&patcher);
size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer.GetOatSize() - rodata_size;
elf_writer->PrepareDynamicSection(rodata_size,
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 52134e8..6cbca7a 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -299,6 +299,7 @@
vdex_size_(0u),
vdex_dex_files_offset_(0u),
vdex_verifier_deps_offset_(0u),
+ vdex_quickening_info_offset_(0u),
oat_size_(0u),
bss_start_(0u),
bss_size_(0u),
@@ -314,6 +315,8 @@
size_dex_file_(0),
size_verifier_deps_(0),
size_verifier_deps_alignment_(0),
+ size_quickening_info_(0),
+ size_quickening_info_alignment_(0),
size_interpreter_to_interpreter_bridge_(0),
size_interpreter_to_compiled_code_bridge_(0),
size_jni_dlsym_lookup_(0),
@@ -519,15 +522,9 @@
return true;
}
-void OatWriter::PrepareLayout(const CompilerDriver* compiler,
- ImageWriter* image_writer,
- const std::vector<const DexFile*>& dex_files,
- linker::MultiOatRelativePatcher* relative_patcher) {
+void OatWriter::PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher) {
CHECK(write_state_ == WriteState::kPrepareLayout);
- compiler_driver_ = compiler;
- image_writer_ = image_writer;
- dex_files_ = &dex_files;
relative_patcher_ = relative_patcher;
SetMultiOatRelativePatcherAdjustment();
@@ -706,9 +703,10 @@
class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
public:
- InitCodeMethodVisitor(OatWriter* writer, size_t offset)
+ InitCodeMethodVisitor(OatWriter* writer, size_t offset, size_t quickening_info_offset)
: OatDexMethodVisitor(writer, offset),
- debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()) {
+ debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()),
+ current_quickening_info_offset_(quickening_info_offset) {
writer_->absolute_patch_locations_.reserve(
writer_->compiler_driver_->GetNonRelativeLinkerPatchCount());
}
@@ -726,6 +724,9 @@
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+ if (it.GetMethodCodeItem() != nullptr) {
+ current_quickening_info_offset_ += sizeof(uint32_t);
+ }
if (compiled_method != nullptr) {
// Derived from CompiledMethod.
uint32_t quick_code_offset = 0;
@@ -771,15 +772,28 @@
DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
uint32_t vmap_table_offset = method_header->vmap_table_offset_;
- // If we don't have quick code, then we must have a vmap, as that is how the dex2dex
- // compiler records its transformations.
- DCHECK(!quick_code.empty() || vmap_table_offset != 0);
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
uint32_t code_offset = quick_code_offset - thumb_offset;
- if (vmap_table_offset != 0u && code_offset != 0u) {
- vmap_table_offset += code_offset;
- DCHECK_LT(vmap_table_offset, code_offset) << "Overflow in oat offsets";
+ if (!compiled_method->GetQuickCode().empty()) {
+ // If the code is compiled, we write the offset of the stack map relative
+ // to the code,
+ if (vmap_table_offset != 0u) {
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
+ } else {
+ if (kIsVdexEnabled) {
+ // We write the offset in the .vdex file.
+ DCHECK_EQ(vmap_table_offset, 0u);
+ vmap_table_offset = current_quickening_info_offset_;
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ current_quickening_info_offset_ += map.size() * sizeof(map.front());
+ } else {
+ // We write the offset of the quickening info relative to the code.
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
}
uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
@@ -878,6 +892,9 @@
// Cache of compiler's --debuggable option.
const bool debuggable_;
+
+ // Offset in the vdex file for the quickening info.
+ uint32_t current_quickening_info_offset_;
};
class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
@@ -893,21 +910,25 @@
if (compiled_method != nullptr) {
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
- DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
+ // If vdex is enabled, we only emit the stack map of compiled code. The quickening info will
+ // be in the vdex file.
+ if (!compiled_method->GetQuickCode().empty() || !kIsVdexEnabled) {
+ DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
- ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
- uint32_t map_size = map.size() * sizeof(map[0]);
- if (map_size != 0u) {
- size_t offset = dedupe_map_.GetOrCreate(
- map.data(),
- [this, map_size]() {
- uint32_t new_offset = offset_;
- offset_ += map_size;
- return new_offset;
- });
- // Code offset is not initialized yet, so set the map offset to 0u-offset.
- DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
- oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ uint32_t map_size = map.size() * sizeof(map[0]);
+ if (map_size != 0u) {
+ size_t offset = dedupe_map_.GetOrCreate(
+ map.data(),
+ [this, map_size]() {
+ uint32_t new_offset = offset_;
+ offset_ += map_size;
+ return new_offset;
+ });
+ // Code offset is not initialized yet, so set the map offset to 0u-offset.
+ DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
+ oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
+ }
}
++method_offsets_index_;
}
@@ -1372,7 +1393,10 @@
<< compiled_method->GetVmapTable().size() << " " << map_offset << " "
<< dex_file_->PrettyMethod(it.GetMemberIndex());
- if (map_offset != 0u) {
+ // If vdex is enabled, only emit the map for compiled code. The quickening info
+ // is emitted in the vdex already.
+ if (map_offset != 0u &&
+ !(kIsVdexEnabled && compiled_method->GetQuickCode().empty())) {
// Transform map_offset to actual oat data offset.
map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset;
DCHECK_NE(map_offset, 0u);
@@ -1539,21 +1563,18 @@
}
size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
- #define VISIT(VisitorType) \
- do { \
- VisitorType visitor(this, offset); \
- bool success = VisitDexMethods(&visitor); \
- DCHECK(success); \
- offset = visitor.GetOffset(); \
- } while (false)
+ InitCodeMethodVisitor code_visitor(this, offset, vdex_quickening_info_offset_);
+ bool success = VisitDexMethods(&code_visitor);
+ DCHECK(success);
+ offset = code_visitor.GetOffset();
- VISIT(InitCodeMethodVisitor);
if (HasImage()) {
- VISIT(InitImageMethodVisitor);
+ InitImageMethodVisitor image_visitor(this, offset);
+ success = VisitDexMethods(&image_visitor);
+ DCHECK(success);
+ offset = image_visitor.GetOffset();
}
- #undef VISIT
-
return offset;
}
@@ -1626,6 +1647,90 @@
return true;
}
+class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
+ public:
+ WriteQuickeningInfoMethodVisitor(OatWriter* writer, OutputStream* out, uint32_t offset)
+ : DexMethodVisitor(writer, offset),
+ out_(out),
+ written_bytes_(0u) {}
+
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
+ const ClassDataItemIterator& it) {
+ if (it.GetMethodCodeItem() == nullptr) {
+ // No CodeItem. Native or abstract method.
+ return true;
+ }
+
+ uint32_t method_idx = it.GetMemberIndex();
+ CompiledMethod* compiled_method =
+ writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
+
+ uint32_t length = 0;
+ const uint8_t* data = nullptr;
+ // VMap only contains quickening info if this method is not compiled.
+ if (compiled_method != nullptr && compiled_method->GetQuickCode().empty()) {
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ data = map.data();
+ length = map.size() * sizeof(map.front());
+ }
+
+ if (!out_->WriteFully(&length, sizeof(length)) ||
+ !out_->WriteFully(data, length)) {
+ PLOG(ERROR) << "Failed to write quickening info for "
+ << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to " << out_->GetLocation();
+ return false;
+ }
+ offset_ += sizeof(length) + length;
+ written_bytes_ += sizeof(length) + length;
+ return true;
+ }
+
+ size_t GetNumberOfWrittenBytes() const {
+ return written_bytes_;
+ }
+
+ private:
+ OutputStream* const out_;
+ size_t written_bytes_;
+};
+
+bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
+ if (!kIsVdexEnabled) {
+ return true;
+ }
+
+ size_t initial_offset = vdex_size_;
+ size_t start_offset = RoundUp(initial_offset, 4u);
+
+ vdex_size_ = start_offset;
+ vdex_quickening_info_offset_ = vdex_size_;
+ size_quickening_info_alignment_ = start_offset - initial_offset;
+
+ off_t actual_offset = vdex_out->Seek(start_offset, kSeekSet);
+ if (actual_offset != static_cast<off_t>(start_offset)) {
+ PLOG(ERROR) << "Failed to seek to quickening info section. Actual: " << actual_offset
+ << " Expected: " << start_offset
+ << " Output: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ WriteQuickeningInfoMethodVisitor visitor(this, vdex_out, start_offset);
+ if (!VisitDexMethods(&visitor)) {
+ PLOG(ERROR) << "Failed to write the vdex quickening info. File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ if (!vdex_out->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ size_quickening_info_ = visitor.GetNumberOfWrittenBytes();
+ vdex_size_ += size_quickening_info_;
+ return true;
+}
+
bool OatWriter::WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps) {
if (!kIsVdexEnabled) {
return true;
@@ -1717,6 +1822,8 @@
DO_STAT(size_dex_file_);
DO_STAT(size_verifier_deps_);
DO_STAT(size_verifier_deps_alignment_);
+ DO_STAT(size_quickening_info_);
+ DO_STAT(size_quickening_info_alignment_);
DO_STAT(size_interpreter_to_interpreter_bridge_);
DO_STAT(size_interpreter_to_compiled_code_bridge_);
DO_STAT(size_jni_dlsym_lookup_);
@@ -2434,9 +2541,11 @@
DCHECK_NE(vdex_verifier_deps_offset_, 0u);
size_t dex_section_size = vdex_verifier_deps_offset_ - vdex_dex_files_offset_;
- size_t verifier_deps_section_size = vdex_size_ - vdex_verifier_deps_offset_;
+ size_t verifier_deps_section_size = vdex_quickening_info_offset_ - vdex_verifier_deps_offset_;
+ size_t quickening_info_section_size = vdex_size_ - vdex_quickening_info_offset_;
- VdexFile::Header vdex_header(dex_section_size, verifier_deps_section_size);
+ VdexFile::Header vdex_header(
+ dex_section_size, verifier_deps_section_size, quickening_info_section_size);
if (!vdex_out->WriteFully(&vdex_header, sizeof(VdexFile::Header))) {
PLOG(ERROR) << "Failed to write vdex header. File: " << vdex_out->GetLocation();
return false;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 1cc193b..3d08ad3 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -118,6 +118,10 @@
// - AddRawDexFileSource().
// Then the user must call in order
// - WriteAndOpenDexFiles()
+ // - Initialize()
+ // - WriteVerifierDeps()
+ // - WriteQuickeningInfo()
+ // - WriteVdexHeader()
// - PrepareLayout(),
// - WriteRodata(),
// - WriteCode(),
@@ -154,14 +158,20 @@
bool verify,
/*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
+ bool WriteQuickeningInfo(OutputStream* vdex_out);
bool WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps);
bool WriteVdexHeader(OutputStream* vdex_out);
+ // Initialize the writer with the given parameters.
+ void Initialize(const CompilerDriver* compiler,
+ ImageWriter* image_writer,
+ const std::vector<const DexFile*>& dex_files) {
+ compiler_driver_ = compiler;
+ image_writer_ = image_writer;
+ dex_files_ = &dex_files;
+ }
// Prepare layout of remaining data.
- void PrepareLayout(const CompilerDriver* compiler,
- ImageWriter* image_writer,
- const std::vector<const DexFile*>& dex_files,
- linker::MultiOatRelativePatcher* relative_patcher);
+ void PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher);
// Write the rest of .rodata section (ClassOffsets[], OatClass[], maps).
bool WriteRodata(OutputStream* out);
// Write the code to the .text section.
@@ -239,6 +249,7 @@
class InitImageMethodVisitor;
class WriteCodeMethodVisitor;
class WriteMapMethodVisitor;
+ class WriteQuickeningInfoMethodVisitor;
// Visit all the methods in all the compiled dex files in their definition order
// with a given DexMethodVisitor.
@@ -325,6 +336,9 @@
// Offset of section holding VerifierDeps inside Vdex.
size_t vdex_verifier_deps_offset_;
+ // Offset of section holding quickening info inside Vdex.
+ size_t vdex_quickening_info_offset_;
+
// Size required for Oat data structures.
size_t oat_size_;
@@ -368,6 +382,8 @@
uint32_t size_dex_file_;
uint32_t size_verifier_deps_;
uint32_t size_verifier_deps_alignment_;
+ uint32_t size_quickening_info_;
+ uint32_t size_quickening_info_alignment_;
uint32_t size_interpreter_to_interpreter_bridge_;
uint32_t size_interpreter_to_compiled_code_bridge_;
uint32_t size_jni_dlsym_lookup_;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index a4df9e5..7b66ef3 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -129,6 +129,7 @@
} else if (can_merge_with_load_class && !load_class->NeedsAccessCheck()) {
// Pass the initialization duty to the `HLoadClass` instruction,
// and remove the instruction from the graph.
+ DCHECK(load_class->HasEnvironment());
load_class->SetMustGenerateClinitCheck(true);
check->GetBlock()->RemoveInstruction(check);
}
@@ -136,7 +137,7 @@
void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
- bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
+ const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
// Change the entrypoint to kQuickAllocObject if either:
// - the class is finalizable (only kQuickAllocObject handles finalizable classes),
// - the class needs access checks (we do not know if it's finalizable),
@@ -144,19 +145,25 @@
if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
instruction->SetEntrypoint(kQuickAllocObject);
instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex()), 0);
- // The allocation entry point that deals with access checks does not work with inlined
- // methods, so we need to check whether this allocation comes from an inlined method.
- // We also need to make the same check as for moving clinit check, whether the HLoadClass
- // has the clinit check responsibility or not (HLoadClass can throw anyway).
- if (has_only_one_use &&
- !instruction->GetEnvironment()->IsFromInlinedInvoke() &&
- CanMoveClinitCheck(load_class, instruction)) {
- // We can remove the load class from the graph. If it needed access checks, we delegate
- // the access check to the allocation.
- if (load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
+ if (has_only_one_use) {
+ // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass,
+ // do it manually if possible.
+ if (!load_class->CanThrow()) {
+ // If the load class can not throw, it has no side effects and can be removed if there is
+ // only one use.
+ load_class->GetBlock()->RemoveInstruction(load_class);
+ } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() &&
+ CanMoveClinitCheck(load_class, instruction)) {
+ // The allocation entry point that deals with access checks does not work with inlined
+ // methods, so we need to check whether this allocation comes from an inlined method.
+ // We also need to make the same check as for moving clinit check, whether the HLoadClass
+ // has the clinit check responsibility or not (HLoadClass can throw anyway).
+ // If it needed access checks, we delegate the access check to the allocation.
+ if (load_class->NeedsAccessCheck()) {
+ instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
+ }
+ load_class->GetBlock()->RemoveInstruction(load_class);
}
- load_class->GetBlock()->RemoveInstruction(load_class);
}
}
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index df1b351..fd1db59 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -162,7 +162,6 @@
? compilation_unit_.GetDexCache()
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
mirror::Class* klass = dex_cache->GetResolvedType(type_index);
-
if (codegen_->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Check if the class is a boot image class.
DCHECK(!runtime->UseJitCompilation());
@@ -312,8 +311,7 @@
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(string);
} else {
- // FIXME: Disabled because of BSS root visiting issues. Bug: 32124939
- // desired_load_kind = HLoadString::LoadKind::kBssEntry;
+ desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
}
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0ce1362..8bbe685 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1777,6 +1777,14 @@
}
}
+ // Initialize the writers with the compiler driver, image writer, and their
+ // dex files. The writers were created without those being there yet.
+ for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
+ std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
+ std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
+ oat_writer->Initialize(driver_.get(), image_writer_.get(), dex_files);
+ }
+
{
TimingLogger::ScopedTiming t2("dex2oat Write VDEX", timings_);
DCHECK(IsBootImage() || oat_files_.size() == 1u);
@@ -1791,6 +1799,11 @@
return false;
}
+ if (!oat_writers_[i]->WriteQuickeningInfo(vdex_out.get())) {
+ LOG(ERROR) << "Failed to write quickening info into VDEX " << vdex_file->GetPath();
+ return false;
+ }
+
// VDEX finalized, seek back to the beginning and write the header.
if (!oat_writers_[i]->WriteVdexHeader(vdex_out.get())) {
LOG(ERROR) << "Failed to write vdex header into VDEX " << vdex_file->GetPath();
@@ -1799,15 +1812,14 @@
}
}
- linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
{
TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
+ linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
std::unique_ptr<ElfWriter>& elf_writer = elf_writers_[i];
std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
- std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
- oat_writer->PrepareLayout(driver_.get(), image_writer_.get(), dex_files, &patcher);
+ oat_writer->PrepareLayout(&patcher);
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 3e589f7..60ce363 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -18,6 +18,7 @@
name: "dexdump2",
host_supported: true,
srcs: [
+ "dexdump_cfg.cc",
"dexdump_main.cc",
"dexdump.cc",
],
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index b241c8b..15b6e17 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -43,9 +43,9 @@
#include <vector>
#include "base/stringprintf.h"
+#include "dexdump_cfg.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
-#include "utils.h"
namespace art {
@@ -1358,7 +1358,7 @@
if (code_item != nullptr) {
std::ostringstream oss;
DumpMethodCFG(dex_file, dex_method_idx, oss);
- fprintf(gOutFile, "%s", oss.str().c_str());
+ fputs(oss.str().c_str(), gOutFile);
}
}
diff --git a/dexdump/dexdump_cfg.cc b/dexdump/dexdump_cfg.cc
new file mode 100644
index 0000000..9e58128
--- /dev/null
+++ b/dexdump/dexdump_cfg.cc
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Implementation file for control flow graph dumping for the dexdump utility.
+ */
+
+#include "dexdump_cfg.h"
+
+#include <inttypes.h>
+#include <ostream>
+#include <map>
+#include <set>
+
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+
+namespace art {
+
+static void dumpMethodCFGImpl(const DexFile* dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item,
+ std::ostream& os) {
+ os << "digraph {\n";
+ os << " # /* " << dex_file->PrettyMethod(dex_method_idx, true) << " */\n";
+
+ std::set<uint32_t> dex_pc_is_branch_target;
+ {
+ // Go and populate.
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (inst->IsBranch()) {
+ dex_pc_is_branch_target.insert(dex_pc + inst->GetTargetOffset());
+ } else if (inst->IsSwitch()) {
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ dex_pc_is_branch_target.insert(dex_pc + offset);
+ }
+ }
+ }
+ }
+
+ // Create nodes for "basic blocks."
+ std::map<uint32_t, uint32_t> dex_pc_to_node_id; // This only has entries for block starts.
+ std::map<uint32_t, uint32_t> dex_pc_to_incl_id; // This has entries for all dex pcs.
+
+ {
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ bool first_in_block = true;
+ bool force_new_block = false;
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (dex_pc == 0 ||
+ (dex_pc_is_branch_target.find(dex_pc) != dex_pc_is_branch_target.end()) ||
+ force_new_block) {
+ uint32_t id = dex_pc_to_node_id.size();
+ if (id > 0) {
+ // End last node.
+ os << "}\"];\n";
+ }
+ // Start next node.
+ os << " node" << id << " [shape=record,label=\"{";
+ dex_pc_to_node_id.insert(std::make_pair(dex_pc, id));
+ first_in_block = true;
+ force_new_block = false;
+ }
+
+ // Register instruction.
+ dex_pc_to_incl_id.insert(std::make_pair(dex_pc, dex_pc_to_node_id.size() - 1));
+
+ // Print instruction.
+ if (!first_in_block) {
+ os << " | ";
+ } else {
+ first_in_block = false;
+ }
+
+ // Dump the instruction. Need to escape '"', '<', '>', '{' and '}'.
+ os << "<" << "p" << dex_pc << ">";
+ os << " 0x" << std::hex << dex_pc << std::dec << ": ";
+ std::string inst_str = inst->DumpString(dex_file);
+ size_t cur_start = 0; // It's OK to start at zero, instruction dumps don't start with chars
+ // we need to escape.
+ while (cur_start != std::string::npos) {
+ size_t next_escape = inst_str.find_first_of("\"{}<>", cur_start + 1);
+ if (next_escape == std::string::npos) {
+ os << inst_str.substr(cur_start, inst_str.size() - cur_start);
+ break;
+ } else {
+ os << inst_str.substr(cur_start, next_escape - cur_start);
+ // Escape all necessary characters.
+ while (next_escape < inst_str.size()) {
+ char c = inst_str.at(next_escape);
+ if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') {
+ os << '\\' << c;
+ } else {
+ break;
+ }
+ next_escape++;
+ }
+ if (next_escape >= inst_str.size()) {
+ next_escape = std::string::npos;
+ }
+ cur_start = next_escape;
+ }
+ }
+
+ // Force a new block for some fall-throughs and some instructions that terminate the "local"
+ // control flow.
+ force_new_block = inst->IsSwitch() || inst->IsBasicBlockEnd();
+ }
+ // Close last node.
+ if (dex_pc_to_node_id.size() > 0) {
+ os << "}\"];\n";
+ }
+ }
+
+ // Create edges between them.
+ {
+ std::ostringstream regular_edges;
+ std::ostringstream taken_edges;
+ std::ostringstream exception_edges;
+
+ // Common set of exception edges.
+ std::set<uint32_t> exception_targets;
+
+ // These blocks (given by the first dex pc) need exception per dex-pc handling in a second
+ // pass. In the first pass we try and see whether we can use a common set of edges.
+ std::set<uint32_t> blocks_with_detailed_exceptions;
+
+ {
+ uint32_t last_node_id = std::numeric_limits<uint32_t>::max();
+ uint32_t old_dex_pc = 0;
+ uint32_t block_start_dex_pc = std::numeric_limits<uint32_t>::max();
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ old_dex_pc = dex_pc, dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ {
+ auto it = dex_pc_to_node_id.find(dex_pc);
+ if (it != dex_pc_to_node_id.end()) {
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+
+ block_start_dex_pc = dex_pc;
+
+ // Seems to be a fall-through, connect to last_node_id. May be spurious edges for things
+ // like switch data.
+ uint32_t old_last = last_node_id;
+ last_node_id = it->second;
+ if (old_last != std::numeric_limits<uint32_t>::max()) {
+ regular_edges << " node" << old_last << ":p" << old_dex_pc
+ << " -> node" << last_node_id << ":p" << dex_pc
+ << ";\n";
+ }
+ }
+
+ // Look at the exceptions of the first entry.
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ exception_targets.insert(catch_it.GetHandlerAddress());
+ }
+ }
+
+ // Handle instruction.
+
+ // Branch: something with at most two targets.
+ if (inst->IsBranch()) {
+ const int32_t offset = inst->GetTargetOffset();
+ const bool conditional = !inst->IsUnconditional();
+
+ auto target_it = dex_pc_to_node_id.find(dex_pc + offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (dex_pc + offset)
+ << ";\n";
+ }
+ if (!conditional) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ } else if (inst->IsSwitch()) {
+ // TODO: Iterate through all switch targets.
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ /* make sure the start of the switch is in range */
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ /* offset to switch table is a relative branch-style offset */
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ /* make sure the end of the switch is in range */
+ /* verify each switch target */
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ int32_t abs_offset = dex_pc + offset;
+ auto target_it = dex_pc_to_node_id.find(abs_offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ // TODO: value label.
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (abs_offset)
+ << ";\n";
+ }
+ }
+ }
+
+ // Exception edges. If this is not the first instruction in the block
+ if (block_start_dex_pc != dex_pc) {
+ std::set<uint32_t> current_handler_pcs;
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ current_handler_pcs.insert(catch_it.GetHandlerAddress());
+ }
+ if (current_handler_pcs != exception_targets) {
+ exception_targets.clear(); // Clear so we don't do something at the end.
+ blocks_with_detailed_exceptions.insert(block_start_dex_pc);
+ }
+ }
+
+ if (inst->IsReturn() ||
+ (inst->Opcode() == Instruction::THROW) ||
+ (inst->IsBranch() && inst->IsUnconditional())) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ }
+ // Finish up the last block, if it had common exceptions.
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+ }
+
+ // Second pass for detailed exception blocks.
+ // TODO
+ // Exception edges. If this is not the first instruction in the block
+ for (uint32_t dex_pc : blocks_with_detailed_exceptions) {
+ const Instruction* inst = Instruction::At(&code_item->insns_[dex_pc]);
+ uint32_t this_node_id = dex_pc_to_incl_id.find(dex_pc)->second;
+ while (true) {
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ if (catch_it.HasNext()) {
+ std::set<uint32_t> handled_targets;
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ uint32_t handler_pc = catch_it.GetHandlerAddress();
+ auto it = handled_targets.find(handler_pc);
+ if (it == handled_targets.end()) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << this_node_id << ":p" << dex_pc
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+
+ // Mark as done.
+ handled_targets.insert(handler_pc);
+ }
+ }
+ }
+ if (inst->IsBasicBlockEnd()) {
+ break;
+ }
+
+ // Loop update. Have a break-out if the next instruction is a branch target and thus in
+ // another block.
+ dex_pc += inst->SizeInCodeUnits();
+ if (dex_pc >= code_item->insns_size_in_code_units_) {
+ break;
+ }
+ if (dex_pc_to_node_id.find(dex_pc) != dex_pc_to_node_id.end()) {
+ break;
+ }
+ inst = inst->Next();
+ }
+ }
+
+ // Write out the sub-graphs to make edges styled.
+ os << "\n";
+ os << " subgraph regular_edges {\n";
+ os << " edge [color=\"#000000\",weight=.3,len=3];\n\n";
+ os << " " << regular_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph taken_edges {\n";
+ os << " edge [color=\"#00FF00\",weight=.3,len=3];\n\n";
+ os << " " << taken_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph exception_edges {\n";
+ os << " edge [color=\"#FF0000\",weight=.3,len=3];\n\n";
+ os << " " << exception_edges.str() << "\n";
+ os << " }\n\n";
+ }
+
+ os << "}\n";
+}
+
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) {
+ // This is painful, we need to find the code item. That means finding the class, and then
+ // iterating the table.
+ if (dex_method_idx >= dex_file->NumMethodIds()) {
+ os << "Could not find method-idx.";
+ return;
+ }
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
+
+ const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_);
+ if (class_def == nullptr) {
+ os << "Could not find class-def.";
+ return;
+ }
+
+ const uint8_t* class_data = dex_file->GetClassData(*class_def);
+ if (class_data == nullptr) {
+ os << "No class data.";
+ return;
+ }
+
+ ClassDataItemIterator it(*dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ it.Next();
+ }
+
+ // Find method, and dump it.
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == dex_method_idx) {
+ dumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os);
+ return;
+ }
+ it.Next();
+ }
+
+ // Otherwise complain.
+ os << "Something went wrong, didn't find the method in the class data.";
+}
+
+} // namespace art
diff --git a/dexdump/dexdump_cfg.h b/dexdump/dexdump_cfg.h
new file mode 100644
index 0000000..64e5f9a
--- /dev/null
+++ b/dexdump/dexdump_cfg.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_DEXDUMP_DEXDUMP_CFG_H_
+#define ART_DEXDUMP_DEXDUMP_CFG_H_
+
+#include <inttypes.h>
+#include <ostream>
+
+namespace art {
+
+class DexFile;
+
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
+
+} // namespace art
+
+#endif // ART_DEXDUMP_DEXDUMP_CFG_H_
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 1071880..2b30a1b 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1286,49 +1286,6 @@
}
/*
- * Dumping a CFG. Note that this will do duplicate work. utils.h doesn't expose the code-item
- * version, so the DumpMethodCFG code will have to iterate again to find it. But dexdump is a
- * tool, so this is not performance-critical.
- */
-
-static void DumpCFG(const DexFile* dex_file,
- uint32_t dex_method_idx,
- const DexFile::CodeItem* code) {
- if (code != nullptr) {
- std::ostringstream oss;
- DumpMethodCFG(dex_file, dex_method_idx, oss);
- fprintf(out_file_, "%s", oss.str().c_str());
- }
-}
-
-static void DumpCFG(const DexFile* dex_file, int idx) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(idx);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == nullptr) { // empty class such as a marker interface?
- return;
- }
- ClassDataItemIterator it(*dex_file, class_data);
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
- while (it.HasNextDirectMethod()) {
- DumpCFG(dex_file,
- it.GetMemberIndex(),
- it.GetMethodCodeItem());
- it.Next();
- }
- while (it.HasNextVirtualMethod()) {
- DumpCFG(dex_file,
- it.GetMemberIndex(),
- it.GetMethodCodeItem());
- it.Next();
- }
-}
-
-/*
* Dumps the class.
*
* Note "idx" is a DexClassDef index, not a DexTypeId index.
@@ -1336,10 +1293,7 @@
* If "*last_package" is nullptr or does not match the current class' package,
* the value will be replaced with a newly-allocated string.
*/
-static void DumpClass(const DexFile* dex_file,
- dex_ir::Header* header,
- int idx,
- char** last_package) {
+static void DumpClass(dex_ir::Header* header, int idx, char** last_package) {
dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx);
// Omitting non-public class.
if (options_.exports_only_ && (class_def->GetAccessFlags() & kAccPublic) == 0) {
@@ -1354,11 +1308,6 @@
DumpClassAnnotations(header, idx);
}
- if (options_.show_cfg_) {
- DumpCFG(dex_file, idx);
- return;
- }
-
// For the XML output, show the package name. Ideally we'd gather
// up the classes, sort them, and dump them alphabetically so the
// package name wouldn't jump around, but that's not a great plan
@@ -1561,7 +1510,7 @@
char* package = nullptr;
const uint32_t class_defs_size = header->GetCollections().ClassDefsSize();
for (uint32_t i = 0; i < class_defs_size; i++) {
- DumpClass(dex_file, header.get(), i, &package);
+ DumpClass(header.get(), i, &package);
} // for
// Free the last package allocated.
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index c01eb79..a5bd992 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -44,7 +44,6 @@
bool exports_only_;
bool ignore_bad_checksum_;
bool show_annotations_;
- bool show_cfg_;
bool show_file_headers_;
bool show_section_headers_;
bool verbose_;
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 2203fba..825dd50 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -51,7 +51,6 @@
fprintf(stderr, " -d : disassemble code sections\n");
fprintf(stderr, " -e : display exported items only\n");
fprintf(stderr, " -f : display summary information from file header\n");
- fprintf(stderr, " -g : display CFG for dex\n");
fprintf(stderr, " -h : display file header details\n");
fprintf(stderr, " -i : ignore checksum failures\n");
fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
@@ -99,9 +98,6 @@
case 'f': // display outer file header
options_.show_file_headers_ = true;
break;
- case 'g': // display cfg
- options_.show_cfg_ = true;
- break;
case 'h': // display section headers, i.e. all meta-data
options_.show_section_headers_ = true;
break;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c7bf231..4e81d50 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -64,6 +64,7 @@
#include "string_reference.h"
#include "thread_list.h"
#include "type_lookup_table.h"
+#include "vdex_file.h"
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
@@ -1029,13 +1030,19 @@
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable());
}
- uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
+ uint32_t vmap_table_offset = method_header == nullptr ? 0 : method_header->vmap_table_offset_;
vios->Stream() << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
- if (vmap_table_offset > oat_file_.Size()) {
+
+ size_t vmap_table_offset_limit =
+ (kIsVdexEnabled && IsMethodGeneratedByDexToDexCompiler(oat_method, code_item))
+ ? oat_file_.GetVdexFile()->Size()
+ : method_header->GetCode() - oat_file_.Begin();
+ if (vmap_table_offset >= vmap_table_offset_limit) {
vios->Stream() << StringPrintf("WARNING: "
"vmap table offset 0x%08x is past end of file 0x%08zx. "
"vmap table offset was loaded from offset 0x%08x.\n",
- vmap_table_offset, oat_file_.Size(),
+ vmap_table_offset,
+ vmap_table_offset_limit,
oat_method.GetVmapTableOffsetOffset());
success = false;
} else if (options_.dump_vmap_) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 3065f68..c550a1b 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -468,7 +468,18 @@
if (!found || (oat_method.GetQuickCode() != nullptr)) {
return nullptr;
}
- return oat_method.GetVmapTable();
+ if (kIsVdexEnabled) {
+ const OatQuickMethodHeader* header = oat_method.GetOatQuickMethodHeader();
+ // OatMethod without a header: no quickening table.
+ if (header == nullptr) {
+ return nullptr;
+ }
+ // The table is in the .vdex file.
+ const OatFile::OatDexFile* oat_dex_file = GetDexCache()->GetDexFile()->GetOatDexFile();
+ return oat_dex_file->GetOatFile()->DexBegin() + header->vmap_table_offset_;
+ } else {
+ return oat_method.GetVmapTable();
+ }
}
const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cea8377..439849b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1180,8 +1180,7 @@
<< resolved_types << " is not in image starting at "
<< reinterpret_cast<void*>(header_.GetImageBegin());
if (!is_copied || in_image_space) {
- // Go through the array so that we don't need to do a slow map lookup.
- method->SetDexCacheResolvedTypes(*reinterpret_cast<GcRoot<mirror::Class>**>(resolved_types),
+ method->SetDexCacheResolvedTypes(method->GetDexCache()->GetResolvedTypes(),
kRuntimePointerSize);
}
}
@@ -1197,8 +1196,7 @@
<< resolved_methods << " is not in image starting at "
<< reinterpret_cast<void*>(header_.GetImageBegin());
if (!is_copied || in_image_space) {
- // Go through the array so that we don't need to do a slow map lookup.
- method->SetDexCacheResolvedMethods(*reinterpret_cast<ArtMethod***>(resolved_methods),
+ method->SetDexCacheResolvedMethods(method->GetDexCache()->GetResolvedMethods(),
kRuntimePointerSize);
}
}
@@ -1283,7 +1281,7 @@
}
// Only add the classes to the class loader after the points where we can return false.
for (size_t i = 0; i < num_dex_caches; i++) {
- ObjPtr<mirror::DexCache> const dex_cache = dex_caches->Get(i);
+ ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
const DexFile* const dex_file = dex_cache->GetDexFile();
const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) {
@@ -1333,10 +1331,6 @@
DCHECK(types[j].IsNull());
}
std::copy_n(image_resolved_types, num_types, types);
- // Store a pointer to the new location for fast ArtMethod patching without requiring map.
- // This leaves random garbage at the start of the dex cache array, but nobody should ever
- // read from it again.
- *reinterpret_cast<GcRoot<mirror::Class>**>(image_resolved_types) = types;
dex_cache->SetResolvedTypes(types);
}
if (num_methods != 0u) {
@@ -1347,8 +1341,6 @@
DCHECK(methods[j] == nullptr);
}
std::copy_n(image_resolved_methods, num_methods, methods);
- // Store a pointer to the new location for fast ArtMethod patching without requiring map.
- *reinterpret_cast<ArtMethod***>(image_resolved_methods) = methods;
dex_cache->SetResolvedMethods(methods);
}
if (num_fields != 0u) {
@@ -1391,7 +1383,11 @@
/*allow_failure*/true);
CHECK(existing_dex_cache == nullptr);
StackHandleScope<1> hs3(self);
- RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache));
+ Handle<mirror::DexCache> h_dex_cache = hs3.NewHandle(dex_cache);
+ RegisterDexFileLocked(*dex_file, h_dex_cache);
+ if (kIsDebugBuild) {
+ dex_cache.Assign(h_dex_cache.Get()); // Update dex_cache, used below in debug build.
+ }
}
if (kIsDebugBuild) {
CHECK(new_class_set != nullptr);
@@ -1781,6 +1777,12 @@
<< reinterpret_cast<const void*>(section_end);
}
}
+ if (!oat_file->GetBssGcRoots().empty()) {
+ // Insert oat file to class table for visiting .bss GC roots.
+ class_table->InsertOatFile(oat_file);
+ }
+ } else {
+ DCHECK(oat_file->GetBssGcRoots().empty());
}
if (added_class_table) {
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -3242,6 +3244,10 @@
WriterMutexLock mu(self, dex_lock_);
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
+ // Another thread managed to initialize the dex cache faster, so use that DexCache.
+ // If this thread encountered OOME, ignore it.
+ DCHECK_EQ(h_dex_cache.Get() == nullptr, self->IsExceptionPending());
+ self->ClearException();
return dex_cache.Ptr();
}
if (h_dex_cache.Get() == nullptr) {
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 97c0abd..b44104e 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -170,14 +170,27 @@
const DexFile* dex_file = ObjPtr<mirror::DexCache>::DownCast(obj)->GetDexFile();
if (dex_file != nullptr && dex_file->GetOatDexFile() != nullptr) {
const OatFile* oat_file = dex_file->GetOatDexFile()->GetOatFile();
- if (!oat_file->GetBssGcRoots().empty() && !ContainsElement(oat_files_, oat_file)) {
- oat_files_.push_back(oat_file);
+ if (!oat_file->GetBssGcRoots().empty()) {
+ InsertOatFileLocked(oat_file); // Ignore return value.
}
}
}
return true;
}
+bool ClassTable::InsertOatFile(const OatFile* oat_file) {
+ WriterMutexLock mu(Thread::Current(), lock_);
+ return InsertOatFileLocked(oat_file);
+}
+
+bool ClassTable::InsertOatFileLocked(const OatFile* oat_file) {
+ if (ContainsElement(oat_files_, oat_file)) {
+ return false;
+ }
+ oat_files_.push_back(oat_file);
+ return true;
+}
+
size_t ClassTable::WriteToMemory(uint8_t* ptr) const {
ReaderMutexLock mu(Thread::Current(), lock_);
ClassSet combined;
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 1344990..bc9eaf4 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -141,6 +141,11 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Return true if we inserted the oat file, false if it already exists.
+ bool InsertOatFile(const OatFile* oat_file)
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
REQUIRES(!lock_)
@@ -168,6 +173,11 @@
private:
void InsertWithoutLocks(ObjPtr<mirror::Class> klass) NO_THREAD_SAFETY_ANALYSIS;
+ // Return true if we inserted the oat file, false if it already exists.
+ bool InsertOatFileLocked(const OatFile* oat_file)
+ REQUIRES(lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Lock to guard inserting and removing.
mutable ReaderWriterMutex lock_;
// We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 4d47b83..d438418 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -75,6 +75,10 @@
!dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
DCHECK(class_loader != nullptr); // We do not use .bss GC roots for boot image.
+ DCHECK(
+ !class_loader->GetClassTable()->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
+ << "Oat file with .bss GC roots was not registered in class table: "
+ << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
// Note that we emit the barrier before the compiled code stores the string as GC root.
// This is OK as there is no suspend point point in between.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 18c4adf..ed16854 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -25,6 +25,8 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "gc/accounting/heap_bitmap.h"
+#include "gc/gc_pause_listener.h"
+#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "thread-inl.h"
@@ -156,12 +158,22 @@
GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector)
: start_time_(NanoTime()), collector_(collector) {
- Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
}
GarbageCollector::ScopedPause::~ScopedPause() {
collector_->RegisterPause(NanoTime() - start_time_);
- Runtime::Current()->GetThreadList()->ResumeAll();
+ Runtime* runtime = Runtime::Current();
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
+ runtime->GetThreadList()->ResumeAll();
}
// Returns the current GC iteration and assocated info.
diff --git a/runtime/gc/gc_pause_listener.h b/runtime/gc/gc_pause_listener.h
new file mode 100644
index 0000000..da35d2a
--- /dev/null
+++ b/runtime/gc/gc_pause_listener.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_
+#define ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_
+
+namespace art {
+namespace gc {
+
+class GcPauseListener {
+ public:
+ virtual ~GcPauseListener() {}
+
+ virtual void StartPause() = 0;
+ virtual void EndPause() = 0;
+};
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 918b8db..3b9abd2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -58,6 +58,7 @@
#include "gc/space/zygote_space.h"
#include "gc/task_processor.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "gc_pause_listener.h"
#include "heap-inl.h"
#include "image.h"
#include "intern_table.h"
@@ -4216,6 +4217,13 @@
}
}
+void Heap::SetGcPauseListener(GcPauseListener* l) {
+ gc_pause_listener_.StoreRelaxed(l);
+}
+
+void Heap::RemoveGcPauseListener() {
+ gc_pause_listener_.StoreRelaxed(nullptr);
+}
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6d37140..e8eb69e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -61,6 +61,7 @@
class AllocationListener;
class AllocRecordObjectMap;
+class GcPauseListener;
class ReferenceProcessor;
class TaskProcessor;
@@ -811,6 +812,16 @@
// reasons, we assume it stays valid when we read it (so that we don't require a lock).
void RemoveAllocationListener();
+ // Install a gc pause listener.
+ void SetGcPauseListener(GcPauseListener* l);
+ // Get the currently installed gc pause listener, or null.
+ GcPauseListener* GetGcPauseListener() {
+ return gc_pause_listener_.LoadAcquire();
+ }
+ // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
+ // reasons, we assume it stays valid when we read it (so that we don't require a lock).
+ void RemoveGcPauseListener();
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -1377,6 +1388,8 @@
// An installed allocation listener.
Atomic<AllocationListener*> alloc_listener_;
+ // An installed GC Pause listener.
+ Atomic<GcPauseListener*> gc_pause_listener_;
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index b71236b..72722dd 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -795,7 +795,7 @@
self, new_shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
if (!PerformArgumentConversions<is_range>(self, callsite_type, target_type,
shadow_frame, vregC, first_dest_reg,
- arg, new_shadow_frame, result)) {
+ arg, new_shadow_frame)) {
DCHECK(self->IsExceptionPending());
result->SetL(0);
return false;
diff --git a/runtime/method_handles-inl.h b/runtime/method_handles-inl.h
index 5f9824c..7a77bda 100644
--- a/runtime/method_handles-inl.h
+++ b/runtime/method_handles-inl.h
@@ -97,6 +97,70 @@
size_t arg_index_;
};
+REQUIRES_SHARED(Locks::mutator_lock_)
+bool ConvertJValue(Handle<mirror::Class> from,
+ Handle<mirror::Class> to,
+ const JValue& from_value,
+ JValue* to_value) {
+ const Primitive::Type from_type = from->GetPrimitiveType();
+ const Primitive::Type to_type = to->GetPrimitiveType();
+
+ // This method must be called only when the types don't match.
+ DCHECK(from.Get() != to.Get());
+
+ if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
+ // Throws a ClassCastException if we're unable to convert a primitive value.
+ return ConvertPrimitiveValue(false, from_type, to_type, from_value, to_value);
+ } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
+ // They're both reference types. If "from" is null, we can pass it
+ // through unchanged. If not, we must generate a cast exception if
+ // |to| is not assignable from the dynamic type of |ref|.
+ mirror::Object* const ref = from_value.GetL();
+ if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
+ to_value->SetL(ref);
+ return true;
+ } else {
+ ThrowClassCastException(to.Get(), ref->GetClass());
+ return false;
+ }
+ } else {
+ // Precisely one of the source or the destination are reference types.
+ // We must box or unbox.
+ if (to_type == Primitive::kPrimNot) {
+ // The target type is a reference, we must box.
+ Primitive::Type type;
+ // TODO(narayan): This is a CHECK for now. There might be a few corner cases
+ // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
+ // we will need to box this "naturally".
+ CHECK(GetPrimitiveType(to.Get(), &type));
+ // First perform a primitive conversion to the unboxed equivalent of the target,
+ // if necessary. This should be for the rarer cases like (int->Long) etc.
+ if (UNLIKELY(from_type != type)) {
+ if (!ConvertPrimitiveValue(false, from_type, type, from_value, to_value)) {
+ return false;
+ }
+ } else {
+ *to_value = from_value;
+ }
+
+ // Then perform the actual boxing, and then set the reference.
+ ObjPtr<mirror::Object> boxed = BoxPrimitive(type, from_value);
+ to_value->SetL(boxed.Ptr());
+ return true;
+ } else {
+ // The target type is a primitive, we must unbox.
+ ObjPtr<mirror::Object> ref(from_value.GetL());
+
+ // Note that UnboxPrimitiveForResult already performs all of the type
+ // conversions that we want, based on |to|.
+ JValue unboxed_value;
+ return UnboxPrimitiveForResult(ref, to.Get(), to_value);
+ }
+ }
+
+ return true;
+}
+
template <bool is_range>
bool PerformArgumentConversions(Thread* self,
Handle<mirror::MethodType> callsite_type,
@@ -105,8 +169,7 @@
uint32_t first_src_reg,
uint32_t first_dest_reg,
const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- ShadowFrame* callee_frame,
- JValue* result) {
+ ShadowFrame* callee_frame) {
StackHandleScope<4> hs(self);
Handle<mirror::ObjectArray<mirror::Class>> from_types(hs.NewHandle(callsite_type->GetPTypes()));
Handle<mirror::ObjectArray<mirror::Class>> to_types(hs.NewHandle(callee_type->GetPTypes()));
@@ -114,7 +177,6 @@
const int32_t num_method_params = from_types->GetLength();
if (to_types->GetLength() != num_method_params) {
ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
- result->SetJ(0);
return false;
}
@@ -149,106 +211,33 @@
}
continue;
- } else if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
- // They are both primitive types - we should perform any widening or
- // narrowing conversions as applicable.
+ } else {
JValue from_value;
JValue to_value;
if (Primitive::Is64BitType(from_type)) {
from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
+ } else if (from_type == Primitive::kPrimNot) {
+ from_value.SetL(caller_frame.GetVRegReference(input_args.Next()));
} else {
from_value.SetI(caller_frame.GetVReg(input_args.Next()));
}
- // Throws a ClassCastException if we're unable to convert a primitive value.
- if (!ConvertPrimitiveValue(false, from_type, to_type, from_value, &to_value)) {
+ if (!ConvertJValue(from, to, from_value, &to_value)) {
DCHECK(self->IsExceptionPending());
- result->SetL(0);
return false;
}
if (Primitive::Is64BitType(to_type)) {
callee_frame->SetVRegLong(first_dest_reg + to_arg_index, to_value.GetJ());
to_arg_index += 2;
+ } else if (to_type == Primitive::kPrimNot) {
+ callee_frame->SetVRegReference(first_dest_reg + to_arg_index, to_value.GetL());
+ ++to_arg_index;
} else {
callee_frame->SetVReg(first_dest_reg + to_arg_index, to_value.GetI());
++to_arg_index;
}
- } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
- // They're both reference types. If "from" is null, we can pass it
- // through unchanged. If not, we must generate a cast exception if
- // |to| is not assignable from the dynamic type of |ref|.
- const size_t next_arg_reg = input_args.Next();
- mirror::Object* const ref = caller_frame.GetVRegReference(next_arg_reg);
- if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
- interpreter::AssignRegister(callee_frame,
- caller_frame,
- first_dest_reg + to_arg_index,
- next_arg_reg);
- ++to_arg_index;
- } else {
- ThrowClassCastException(to.Get(), ref->GetClass());
- result->SetL(0);
- return false;
- }
- } else {
- // Precisely one of the source or the destination are reference types.
- // We must box or unbox.
- if (to_type == Primitive::kPrimNot) {
- // The target type is a reference, we must box.
- Primitive::Type type;
- // TODO(narayan): This is a CHECK for now. There might be a few corner cases
- // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
- // we will need to box this "naturally".
- CHECK(GetPrimitiveType(to.Get(), &type));
-
- JValue from_value;
- JValue to_value;
-
- if (Primitive::Is64BitType(from_type)) {
- from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
- } else {
- from_value.SetI(caller_frame.GetVReg(input_args.Next()));
- }
-
- // First perform a primitive conversion to the unboxed equivalent of the target,
- // if necessary. This should be for the rarer cases like (int->Long) etc.
- if (UNLIKELY(from_type != type)) {
- if (!ConvertPrimitiveValue(false, from_type, type, from_value, &to_value)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
- }
- } else {
- to_value = from_value;
- }
-
- // Then perform the actual boxing, and then set the reference.
- ObjPtr<mirror::Object> boxed = BoxPrimitive(type, to_value);
- callee_frame->SetVRegReference(first_dest_reg + to_arg_index, boxed.Ptr());
- ++to_arg_index;
- } else {
- // The target type is a primitive, we must unbox.
- ObjPtr<mirror::Object> ref(caller_frame.GetVRegReference(input_args.Next()));
-
- // Note that UnboxPrimitiveForResult already performs all of the type
- // conversions that we want, based on |to|.
- JValue unboxed_value;
- if (!UnboxPrimitiveForResult(ref, to.Get(), &unboxed_value)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
- }
-
- if (Primitive::Is64BitType(to_type)) {
- callee_frame->SetVRegLong(first_dest_reg + to_arg_index, unboxed_value.GetJ());
- to_arg_index += 2;
- } else {
- callee_frame->SetVReg(first_dest_reg + to_arg_index, unboxed_value.GetI());
- ++to_arg_index;
- }
- }
}
}
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index a36b66d..26a29b3 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -55,6 +55,14 @@
return handle_kind <= kLastInvokeKind;
}
+// Performs a single argument conversion from type |from| to a distinct
+// type |to|. Returns true on success, false otherwise.
+REQUIRES_SHARED(Locks::mutator_lock_)
+bool ConvertJValue(Handle<mirror::Class> from,
+ Handle<mirror::Class> to,
+ const JValue& from_value,
+ JValue* to_value) ALWAYS_INLINE;
+
// Perform argument conversions between |callsite_type| (the type of the
// incoming arguments) and |callee_type| (the type of the method being
// invoked). These include widening and narrowing conversions as well as
@@ -68,8 +76,7 @@
uint32_t first_src_reg,
uint32_t first_dest_reg,
const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- ShadowFrame* callee_frame,
- JValue* result);
+ ShadowFrame* callee_frame);
} // namespace art
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b99bcb5..63a0e14 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -301,6 +301,10 @@
// error and sets found to false.
static OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found);
+ VdexFile* GetVdexFile() const {
+ return vdex_.get();
+ }
+
protected:
OatFile(const std::string& filename, bool executable);
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 59e01ea..12692a1 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -34,6 +34,8 @@
#include "art_jvmti.h"
#include "base/logging.h"
#include "gc/allocation_listener.h"
+#include "gc/gc_pause_listener.h"
+#include "gc/heap.h"
#include "instrumentation.h"
#include "jni_env_ext-inl.h"
#include "mirror/class.h"
@@ -131,7 +133,7 @@
explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK_EQ(self, art::Thread::Current());
if (handler_->IsEventEnabledAnywhere(JVMTI_EVENT_VM_OBJECT_ALLOC)) {
@@ -185,11 +187,74 @@
}
}
+// Report GC pauses (see spec) as GARBAGE_COLLECTION_START and GARBAGE_COLLECTION_END.
+class JvmtiGcPauseListener : public art::gc::GcPauseListener {
+ public:
+ explicit JvmtiGcPauseListener(EventHandler* handler)
+ : handler_(handler),
+ start_enabled_(false),
+ finish_enabled_(false) {}
+
+ void StartPause() OVERRIDE {
+ handler_->DispatchEvent(nullptr, JVMTI_EVENT_GARBAGE_COLLECTION_START);
+ }
+
+ void EndPause() OVERRIDE {
+ handler_->DispatchEvent(nullptr, JVMTI_EVENT_GARBAGE_COLLECTION_FINISH);
+ }
+
+ bool IsEnabled() {
+ return start_enabled_ || finish_enabled_;
+ }
+
+ void SetStartEnabled(bool e) {
+ start_enabled_ = e;
+ }
+
+ void SetFinishEnabled(bool e) {
+ finish_enabled_ = e;
+ }
+
+ private:
+ EventHandler* handler_;
+ bool start_enabled_;
+ bool finish_enabled_;
+};
+
+static void SetupGcPauseTracking(JvmtiGcPauseListener* listener, jvmtiEvent event, bool enable) {
+ bool old_state = listener->IsEnabled();
+
+ if (event == JVMTI_EVENT_GARBAGE_COLLECTION_START) {
+ listener->SetStartEnabled(enable);
+ } else {
+ listener->SetFinishEnabled(enable);
+ }
+
+ bool new_state = listener->IsEnabled();
+
+ if (old_state != new_state) {
+ if (new_state) {
+ art::Runtime::Current()->GetHeap()->SetGcPauseListener(listener);
+ } else {
+ art::Runtime::Current()->GetHeap()->RemoveGcPauseListener();
+ }
+ }
+}
+
// Handle special work for the given event type, if necessary.
void EventHandler::HandleEventType(jvmtiEvent event, bool enable) {
- if (event == JVMTI_EVENT_VM_OBJECT_ALLOC) {
- SetupObjectAllocationTracking(alloc_listener_.get(), enable);
- return;
+ switch (event) {
+ case JVMTI_EVENT_VM_OBJECT_ALLOC:
+ SetupObjectAllocationTracking(alloc_listener_.get(), enable);
+ return;
+
+ case JVMTI_EVENT_GARBAGE_COLLECTION_START:
+ case JVMTI_EVENT_GARBAGE_COLLECTION_FINISH:
+ SetupGcPauseTracking(gc_pause_listener_.get(), event, enable);
+ return;
+
+ default:
+ break;
}
}
@@ -253,6 +318,7 @@
EventHandler::EventHandler() {
alloc_listener_.reset(new JvmtiAllocationListener(this));
+ gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
}
EventHandler::~EventHandler() {
diff --git a/runtime/openjdkjvmti/events.h b/runtime/openjdkjvmti/events.h
index 3212b12..07d6bfd 100644
--- a/runtime/openjdkjvmti/events.h
+++ b/runtime/openjdkjvmti/events.h
@@ -28,6 +28,7 @@
struct ArtJvmTiEnv;
class JvmtiAllocationListener;
+class JvmtiGcPauseListener;
struct EventMask {
static constexpr size_t kEventsSize = JVMTI_MAX_EVENT_TYPE_VAL - JVMTI_MIN_EVENT_TYPE_VAL + 1;
@@ -103,6 +104,7 @@
EventMask global_mask;
std::unique_ptr<JvmtiAllocationListener> alloc_listener_;
+ std::unique_ptr<JvmtiGcPauseListener> gc_pause_listener_;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index 9ea14a2..f16b023 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -46,7 +46,9 @@
namespace openjdkjvmti {
-void ObjectTagTable::UpdateTable() {
+void ObjectTagTable::UpdateTableWithReadBarrier() {
+ update_since_last_sweep_ = true;
+
auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -57,7 +59,11 @@
}
bool ObjectTagTable::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result) {
- UpdateTable();
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+ UpdateTableWithReadBarrier();
return GetTagLocked(self, obj, result);
}
@@ -84,9 +90,14 @@
return true;
}
- if (art::kUseReadBarrier && self->GetIsGcMarking()) {
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
// Update the table.
- UpdateTable();
+ UpdateTableWithReadBarrier();
// And try again.
return RemoveLocked(self, obj, tag);
@@ -111,9 +122,14 @@
return true;
}
- if (art::kUseReadBarrier && self->GetIsGcMarking()) {
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
// Update the table.
- UpdateTable();
+ UpdateTableWithReadBarrier();
// And try again.
return SetLocked(self, obj, new_tag);
@@ -131,6 +147,14 @@
} else {
SweepImpl<false>(visitor);
}
+
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. We explicitly update the table then
+ // to ensure we compare against to-space pointers. But we want to do this only once. Once
+ // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
+ // so we re-enable the explicit update for the next marking.
+ update_since_last_sweep_ = false;
}
template <bool kHandleNull>
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 90c40f6..579dc22 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -34,6 +34,7 @@
public:
explicit ObjectTagTable(EventHandler* event_handler)
: art::gc::SystemWeakHolder(art::LockLevel::kAllocTrackerLock),
+ update_since_last_sweep_(false),
event_handler_(event_handler) {
}
@@ -83,7 +84,8 @@
if (art::kUseReadBarrier &&
self != nullptr &&
- self->GetIsGcMarking()) {
+ self->GetIsGcMarking() &&
+ !update_since_last_sweep_) {
return GetTagSlowPath(self, obj, result);
}
@@ -96,7 +98,9 @@
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
- void UpdateTable()
+ // Update the table by doing read barriers on each element, ensuring that to-space pointers
+ // are stored.
+ void UpdateTableWithReadBarrier()
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
@@ -138,6 +142,8 @@
EqGcRoot> tagged_objects_
GUARDED_BY(allow_disallow_lock_)
GUARDED_BY(art::Locks::mutator_lock_);
+ // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
+ bool update_since_last_sweep_;
EventHandler* event_handler_;
};
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 5557d5f..6ed54f7 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1124,370 +1124,6 @@
return PrettyDescriptor(Primitive::Descriptor(type));
}
-static void DumpMethodCFGImpl(const DexFile* dex_file,
- uint32_t dex_method_idx,
- const DexFile::CodeItem* code_item,
- std::ostream& os) {
- os << "digraph {\n";
- os << " # /* " << dex_file->PrettyMethod(dex_method_idx, true) << " */\n";
-
- std::set<uint32_t> dex_pc_is_branch_target;
- {
- // Go and populate.
- const Instruction* inst = Instruction::At(code_item->insns_);
- for (uint32_t dex_pc = 0;
- dex_pc < code_item->insns_size_in_code_units_;
- dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
- if (inst->IsBranch()) {
- dex_pc_is_branch_target.insert(dex_pc + inst->GetTargetOffset());
- } else if (inst->IsSwitch()) {
- const uint16_t* insns = code_item->insns_ + dex_pc;
- int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
- const uint16_t* switch_insns = insns + switch_offset;
- uint32_t switch_count = switch_insns[1];
- int32_t targets_offset;
- if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
- /* 0=sig, 1=count, 2/3=firstKey */
- targets_offset = 4;
- } else {
- /* 0=sig, 1=count, 2..count*2 = keys */
- targets_offset = 2 + 2 * switch_count;
- }
- for (uint32_t targ = 0; targ < switch_count; targ++) {
- int32_t offset =
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
- dex_pc_is_branch_target.insert(dex_pc + offset);
- }
- }
- }
- }
-
- // Create nodes for "basic blocks."
- std::map<uint32_t, uint32_t> dex_pc_to_node_id; // This only has entries for block starts.
- std::map<uint32_t, uint32_t> dex_pc_to_incl_id; // This has entries for all dex pcs.
-
- {
- const Instruction* inst = Instruction::At(code_item->insns_);
- bool first_in_block = true;
- bool force_new_block = false;
- for (uint32_t dex_pc = 0;
- dex_pc < code_item->insns_size_in_code_units_;
- dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
- if (dex_pc == 0 ||
- (dex_pc_is_branch_target.find(dex_pc) != dex_pc_is_branch_target.end()) ||
- force_new_block) {
- uint32_t id = dex_pc_to_node_id.size();
- if (id > 0) {
- // End last node.
- os << "}\"];\n";
- }
- // Start next node.
- os << " node" << id << " [shape=record,label=\"{";
- dex_pc_to_node_id.insert(std::make_pair(dex_pc, id));
- first_in_block = true;
- force_new_block = false;
- }
-
- // Register instruction.
- dex_pc_to_incl_id.insert(std::make_pair(dex_pc, dex_pc_to_node_id.size() - 1));
-
- // Print instruction.
- if (!first_in_block) {
- os << " | ";
- } else {
- first_in_block = false;
- }
-
- // Dump the instruction. Need to escape '"', '<', '>', '{' and '}'.
- os << "<" << "p" << dex_pc << ">";
- os << " 0x" << std::hex << dex_pc << std::dec << ": ";
- std::string inst_str = inst->DumpString(dex_file);
- size_t cur_start = 0; // It's OK to start at zero, instruction dumps don't start with chars
- // we need to escape.
- while (cur_start != std::string::npos) {
- size_t next_escape = inst_str.find_first_of("\"{}<>", cur_start + 1);
- if (next_escape == std::string::npos) {
- os << inst_str.substr(cur_start, inst_str.size() - cur_start);
- break;
- } else {
- os << inst_str.substr(cur_start, next_escape - cur_start);
- // Escape all necessary characters.
- while (next_escape < inst_str.size()) {
- char c = inst_str.at(next_escape);
- if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') {
- os << '\\' << c;
- } else {
- break;
- }
- next_escape++;
- }
- if (next_escape >= inst_str.size()) {
- next_escape = std::string::npos;
- }
- cur_start = next_escape;
- }
- }
-
- // Force a new block for some fall-throughs and some instructions that terminate the "local"
- // control flow.
- force_new_block = inst->IsSwitch() || inst->IsBasicBlockEnd();
- }
- // Close last node.
- if (dex_pc_to_node_id.size() > 0) {
- os << "}\"];\n";
- }
- }
-
- // Create edges between them.
- {
- std::ostringstream regular_edges;
- std::ostringstream taken_edges;
- std::ostringstream exception_edges;
-
- // Common set of exception edges.
- std::set<uint32_t> exception_targets;
-
- // These blocks (given by the first dex pc) need exception per dex-pc handling in a second
- // pass. In the first pass we try and see whether we can use a common set of edges.
- std::set<uint32_t> blocks_with_detailed_exceptions;
-
- {
- uint32_t last_node_id = std::numeric_limits<uint32_t>::max();
- uint32_t old_dex_pc = 0;
- uint32_t block_start_dex_pc = std::numeric_limits<uint32_t>::max();
- const Instruction* inst = Instruction::At(code_item->insns_);
- for (uint32_t dex_pc = 0;
- dex_pc < code_item->insns_size_in_code_units_;
- old_dex_pc = dex_pc, dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
- {
- auto it = dex_pc_to_node_id.find(dex_pc);
- if (it != dex_pc_to_node_id.end()) {
- if (!exception_targets.empty()) {
- // It seems the last block had common exception handlers. Add the exception edges now.
- uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
- for (uint32_t handler_pc : exception_targets) {
- auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
- if (node_id_it != dex_pc_to_incl_id.end()) {
- exception_edges << " node" << node_id
- << " -> node" << node_id_it->second << ":p" << handler_pc
- << ";\n";
- }
- }
- exception_targets.clear();
- }
-
- block_start_dex_pc = dex_pc;
-
- // Seems to be a fall-through, connect to last_node_id. May be spurious edges for things
- // like switch data.
- uint32_t old_last = last_node_id;
- last_node_id = it->second;
- if (old_last != std::numeric_limits<uint32_t>::max()) {
- regular_edges << " node" << old_last << ":p" << old_dex_pc
- << " -> node" << last_node_id << ":p" << dex_pc
- << ";\n";
- }
- }
-
- // Look at the exceptions of the first entry.
- CatchHandlerIterator catch_it(*code_item, dex_pc);
- for (; catch_it.HasNext(); catch_it.Next()) {
- exception_targets.insert(catch_it.GetHandlerAddress());
- }
- }
-
- // Handle instruction.
-
- // Branch: something with at most two targets.
- if (inst->IsBranch()) {
- const int32_t offset = inst->GetTargetOffset();
- const bool conditional = !inst->IsUnconditional();
-
- auto target_it = dex_pc_to_node_id.find(dex_pc + offset);
- if (target_it != dex_pc_to_node_id.end()) {
- taken_edges << " node" << last_node_id << ":p" << dex_pc
- << " -> node" << target_it->second << ":p" << (dex_pc + offset)
- << ";\n";
- }
- if (!conditional) {
- // No fall-through.
- last_node_id = std::numeric_limits<uint32_t>::max();
- }
- } else if (inst->IsSwitch()) {
- // TODO: Iterate through all switch targets.
- const uint16_t* insns = code_item->insns_ + dex_pc;
- /* make sure the start of the switch is in range */
- int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
- /* offset to switch table is a relative branch-style offset */
- const uint16_t* switch_insns = insns + switch_offset;
- uint32_t switch_count = switch_insns[1];
- int32_t targets_offset;
- if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
- /* 0=sig, 1=count, 2/3=firstKey */
- targets_offset = 4;
- } else {
- /* 0=sig, 1=count, 2..count*2 = keys */
- targets_offset = 2 + 2 * switch_count;
- }
- /* make sure the end of the switch is in range */
- /* verify each switch target */
- for (uint32_t targ = 0; targ < switch_count; targ++) {
- int32_t offset =
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
- int32_t abs_offset = dex_pc + offset;
- auto target_it = dex_pc_to_node_id.find(abs_offset);
- if (target_it != dex_pc_to_node_id.end()) {
- // TODO: value label.
- taken_edges << " node" << last_node_id << ":p" << dex_pc
- << " -> node" << target_it->second << ":p" << (abs_offset)
- << ";\n";
- }
- }
- }
-
- // Exception edges. If this is not the first instruction in the block
- if (block_start_dex_pc != dex_pc) {
- std::set<uint32_t> current_handler_pcs;
- CatchHandlerIterator catch_it(*code_item, dex_pc);
- for (; catch_it.HasNext(); catch_it.Next()) {
- current_handler_pcs.insert(catch_it.GetHandlerAddress());
- }
- if (current_handler_pcs != exception_targets) {
- exception_targets.clear(); // Clear so we don't do something at the end.
- blocks_with_detailed_exceptions.insert(block_start_dex_pc);
- }
- }
-
- if (inst->IsReturn() ||
- (inst->Opcode() == Instruction::THROW) ||
- (inst->IsBranch() && inst->IsUnconditional())) {
- // No fall-through.
- last_node_id = std::numeric_limits<uint32_t>::max();
- }
- }
- // Finish up the last block, if it had common exceptions.
- if (!exception_targets.empty()) {
- // It seems the last block had common exception handlers. Add the exception edges now.
- uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
- for (uint32_t handler_pc : exception_targets) {
- auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
- if (node_id_it != dex_pc_to_incl_id.end()) {
- exception_edges << " node" << node_id
- << " -> node" << node_id_it->second << ":p" << handler_pc
- << ";\n";
- }
- }
- exception_targets.clear();
- }
- }
-
- // Second pass for detailed exception blocks.
- // TODO
- // Exception edges. If this is not the first instruction in the block
- for (uint32_t dex_pc : blocks_with_detailed_exceptions) {
- const Instruction* inst = Instruction::At(&code_item->insns_[dex_pc]);
- uint32_t this_node_id = dex_pc_to_incl_id.find(dex_pc)->second;
- while (true) {
- CatchHandlerIterator catch_it(*code_item, dex_pc);
- if (catch_it.HasNext()) {
- std::set<uint32_t> handled_targets;
- for (; catch_it.HasNext(); catch_it.Next()) {
- uint32_t handler_pc = catch_it.GetHandlerAddress();
- auto it = handled_targets.find(handler_pc);
- if (it == handled_targets.end()) {
- auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
- if (node_id_it != dex_pc_to_incl_id.end()) {
- exception_edges << " node" << this_node_id << ":p" << dex_pc
- << " -> node" << node_id_it->second << ":p" << handler_pc
- << ";\n";
- }
-
- // Mark as done.
- handled_targets.insert(handler_pc);
- }
- }
- }
- if (inst->IsBasicBlockEnd()) {
- break;
- }
-
- // Loop update. Have a break-out if the next instruction is a branch target and thus in
- // another block.
- dex_pc += inst->SizeInCodeUnits();
- if (dex_pc >= code_item->insns_size_in_code_units_) {
- break;
- }
- if (dex_pc_to_node_id.find(dex_pc) != dex_pc_to_node_id.end()) {
- break;
- }
- inst = inst->Next();
- }
- }
-
- // Write out the sub-graphs to make edges styled.
- os << "\n";
- os << " subgraph regular_edges {\n";
- os << " edge [color=\"#000000\",weight=.3,len=3];\n\n";
- os << " " << regular_edges.str() << "\n";
- os << " }\n\n";
-
- os << " subgraph taken_edges {\n";
- os << " edge [color=\"#00FF00\",weight=.3,len=3];\n\n";
- os << " " << taken_edges.str() << "\n";
- os << " }\n\n";
-
- os << " subgraph exception_edges {\n";
- os << " edge [color=\"#FF0000\",weight=.3,len=3];\n\n";
- os << " " << exception_edges.str() << "\n";
- os << " }\n\n";
- }
-
- os << "}\n";
-}
-
-void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) {
- // This is painful, we need to find the code item. That means finding the class, and then
- // iterating the table.
- if (dex_method_idx >= dex_file->NumMethodIds()) {
- os << "Could not find method-idx.";
- return;
- }
- const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
-
- const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_);
- if (class_def == nullptr) {
- os << "Could not find class-def.";
- return;
- }
-
- const uint8_t* class_data = dex_file->GetClassData(*class_def);
- if (class_data == nullptr) {
- os << "No class data.";
- return;
- }
-
- ClassDataItemIterator it(*dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- it.Next();
- }
-
- // Find method, and dump it.
- while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- if (method_idx == dex_method_idx) {
- DumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os);
- return;
- }
- it.Next();
- }
-
- // Otherwise complain.
- os << "Something went wrong, didn't find the method in the class data.";
-}
-
static void ParseStringAfterChar(const std::string& s,
char c,
std::string* parsed_value,
diff --git a/runtime/utils.h b/runtime/utils.h
index f96ddd7..94738d2 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -36,12 +36,8 @@
#include "obj_ptr.h"
#include "primitive.h"
-class BacktraceMap;
-
namespace art {
-class DexFile;
-
template <typename T>
bool ParseUint(const char *in, T* out) {
char* end;
@@ -274,8 +270,6 @@
return pointer_size == 4 || pointer_size == 8;
}
-void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
-
static inline const void* EntryPointToCodePointer(const void* entry_point) {
uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
// TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 5ca7684..c7875b5 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -70,10 +70,7 @@
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
- // App image patching relies on having enough room for a forwarding pointer in the types array.
- // See FixupArtMethodArrayVisitor and ClassLinker::AddImageSpace.
- return std::max(ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements),
- static_cast<size_t>(pointer_size_));
+ return ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements);
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
@@ -85,8 +82,7 @@
}
inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
- // App image patching relies on having enough room for a forwarding pointer in the methods array.
- return std::max(ArraySize(pointer_size_, num_elements), static_cast<size_t>(pointer_size_));
+ return ArraySize(pointer_size_, num_elements);
}
inline size_t DexCacheArraysLayout::MethodsAlignment() const {
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 9fbf875..b3dab58 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -34,9 +34,12 @@
return (memcmp(version_, kVdexVersion, sizeof(kVdexVersion)) == 0);
}
-VdexFile::Header::Header(uint32_t dex_size, uint32_t verifier_deps_size)
+VdexFile::Header::Header(uint32_t dex_size,
+ uint32_t verifier_deps_size,
+ uint32_t quickening_info_size)
: dex_size_(dex_size),
- verifier_deps_size_(verifier_deps_size) {
+ verifier_deps_size_(verifier_deps_size),
+ quickening_info_size_(quickening_info_size) {
memcpy(magic_, kVdexMagic, sizeof(kVdexMagic));
memcpy(version_, kVdexVersion, sizeof(kVdexVersion));
DCHECK(IsMagicValid());
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 6bea153..28f9bb3 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -42,13 +42,14 @@
public:
struct Header {
public:
- Header(uint32_t dex_size, uint32_t verifier_deps_size);
+ Header(uint32_t dex_size, uint32_t verifier_deps_size, uint32_t quickening_info_size);
bool IsMagicValid() const;
bool IsVersionValid() const;
uint32_t GetDexSize() const { return dex_size_; }
uint32_t GetVerifierDepsSize() const { return verifier_deps_size_; }
+ uint32_t GetQuickeningInfoSize() const { return quickening_info_size_; }
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
@@ -58,6 +59,7 @@
uint8_t version_[4];
uint32_t dex_size_;
uint32_t verifier_deps_size_;
+ uint32_t quickening_info_size_;
};
static VdexFile* Open(const std::string& vdex_filename,
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index ff6ccd4..3c053cf 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -284,29 +284,28 @@
/// CHECK-START: java.lang.String Main.$noinline$getNonBootImageString() sharpening (before)
/// CHECK: LoadString load_kind:DexCacheViaMethod
- // FIXME: Disabled because of BSS root visiting issues. Bug: 32124939
- // CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (after)
- // CHECK-DAG: X86ComputeBaseMethodAddress
- // CHECK-DAG: LoadString load_kind:BssEntry
+ /// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (after)
+ /// CHECK-DAG: X86ComputeBaseMethodAddress
+ /// CHECK-DAG: LoadString load_kind:BssEntry
- // CHECK-START-X86_64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-X86_64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
- // CHECK-DAG: MipsComputeBaseMethodAddress
- // CHECK-DAG: LoadString load_kind:BssEntry
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
+ /// CHECK-DAG: MipsComputeBaseMethodAddress
+ /// CHECK-DAG: LoadString load_kind:BssEntry
public static String $noinline$getNonBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
diff --git a/test/621-checker-new-instance/expected.txt b/test/621-checker-new-instance/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/621-checker-new-instance/expected.txt
diff --git a/test/621-checker-new-instance/info.txt b/test/621-checker-new-instance/info.txt
new file mode 100644
index 0000000..c27c45c
--- /dev/null
+++ b/test/621-checker-new-instance/info.txt
@@ -0,0 +1 @@
+Tests for removing useless load class.
diff --git a/test/621-checker-new-instance/src/Main.java b/test/621-checker-new-instance/src/Main.java
new file mode 100644
index 0000000..68a4644
--- /dev/null
+++ b/test/621-checker-new-instance/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (before)
+ /// CHECK: LoadClass
+ /// CHECK: NewInstance
+
+ /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (after)
+ /// CHECK-NOT: LoadClass
+ /// CHECK: NewInstance
+ public static Object newObject() {
+ return new Object();
+ }
+
+ /// CHECK-START: java.lang.Object Main.newFinalizableMayThrow() prepare_for_register_allocation (after)
+ /// CHECK: LoadClass
+ /// CHECK: NewInstance
+ public static Object newFinalizableMayThrow() {
+ return $inline$newFinalizableMayThrow();
+ }
+
+ public static Object $inline$newFinalizableMayThrow() {
+ return new FinalizableMayThrow();
+ }
+
+ public static void main(String[] args) {
+ newFinalizableMayThrow();
+ newObject();
+ }
+}
+
+class FinalizableMayThrow {
+ // clinit may throw OOME.
+ static Object o = new Object();
+ static String s;
+ public void finalize() {
+ s = "Test";
+ }
+}
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
index e752bcb..9ceefcb 100644
--- a/test/907-get-loaded-classes/get_loaded_classes.cc
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -77,5 +77,16 @@
return ret;
}
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ return 0;
+}
+
} // namespace Test907GetLoadedClasses
} // namespace art
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
index 3e135a3..3f5a059 100755
--- a/test/907-get-loaded-classes/run
+++ b/test/907-get-loaded-classes/run
@@ -37,7 +37,7 @@
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
+ --runtime-option -agentpath:${agent}=907-get-loaded-classes,${arg} \
--android-runtime-option -Xplugin:${plugin} \
${other_args} \
--args ${lib}
diff --git a/test/908-gc-start-finish/build b/test/908-gc-start-finish/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/908-gc-start-finish/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/908-gc-start-finish/expected.txt b/test/908-gc-start-finish/expected.txt
new file mode 100644
index 0000000..45f89dc
--- /dev/null
+++ b/test/908-gc-start-finish/expected.txt
@@ -0,0 +1,12 @@
+---
+true true
+---
+true true
+---
+true true
+---
+false false
+---
+false false
+---
+false false
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
new file mode 100644
index 0000000..d546513
--- /dev/null
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc_callbacks.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test908GcStartFinish {
+
+static size_t starts = 0;
+static size_t finishes = 0;
+
+static void JNICALL GarbageCollectionFinish(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) {
+ finishes++;
+}
+
+static void JNICALL GarbageCollectionStart(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) {
+ starts++;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_setupGcCallback(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.GarbageCollectionFinish = GarbageCollectionFinish;
+ callbacks.GarbageCollectionStart = GarbageCollectionStart;
+
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error setting callbacks: %s\n", err);
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED,
+ jboolean enable) {
+ jvmtiError ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_GARBAGE_COLLECTION_START,
+ nullptr);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error enabling/disabling gc callbacks: %s\n", err);
+ }
+ ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
+ nullptr);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error enabling/disabling gc callbacks: %s\n", err);
+ }
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getGcStarts(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ jint result = static_cast<jint>(starts);
+ starts = 0;
+ return result;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getGcFinishes(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ jint result = static_cast<jint>(finishes);
+ finishes = 0;
+ return result;
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace Test908GcStartFinish
+} // namespace art
diff --git a/test/908-gc-start-finish/gc_callbacks.h b/test/908-gc-start-finish/gc_callbacks.h
new file mode 100644
index 0000000..177a4eb
--- /dev/null
+++ b/test/908-gc-start-finish/gc_callbacks.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
+#define ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test908GcStartFinish {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test908GcStartFinish
+} // namespace art
+
+#endif // ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
diff --git a/test/908-gc-start-finish/info.txt b/test/908-gc-start-finish/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/908-gc-start-finish/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/908-gc-start-finish/run b/test/908-gc-start-finish/run
new file mode 100755
index 0000000..2fc35f0
--- /dev/null
+++ b/test/908-gc-start-finish/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=908-gc-start-finish,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/908-gc-start-finish/src/Main.java b/test/908-gc-start-finish/src/Main.java
new file mode 100644
index 0000000..2be0eea
--- /dev/null
+++ b/test/908-gc-start-finish/src/Main.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Use a list to ensure objects must be allocated.
+ ArrayList<Object> l = new ArrayList<>(100);
+
+ setupGcCallback();
+
+ enableGcTracking(true);
+ run(l);
+
+ enableGcTracking(false);
+ run(l);
+ }
+
+ private static void run(ArrayList<Object> l) {
+ allocate(l, 1);
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+
+ // Note: the reporting will not depend on the heap layout (which could be unstable). Walking
+ // the tag table should give us a stable output order.
+ for (int i = 10; i <= 1000; i *= 10) {
+ allocate(l, i);
+ }
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+ }
+
+ private static void allocate(ArrayList<Object> l, long tag) {
+ Object obj = new Object();
+ l.add(obj);
+ }
+
+ private static void printStats() {
+ System.out.println("---");
+ int s = getGcStarts();
+ int f = getGcFinishes();
+ System.out.println((s > 0) + " " + (f > 0));
+ }
+
+ private static native void setupGcCallback();
+ private static native void enableGcTracking(boolean enable);
+ private static native int getGcStarts();
+ private static native int getGcFinishes();
+}
diff --git a/test/Android.bp b/test/Android.bp
index c303616..aaa1343 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -251,6 +251,7 @@
"905-object-free/tracking_free.cc",
"906-iterate-heap/iterate_heap.cc",
"907-get-loaded-classes/get_loaded_classes.cc",
+ "908-gc-start-finish/gc_callbacks.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index c99510a..d962472 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -263,17 +263,20 @@
# 147-stripped-dex-fallback isn't supported on device because --strip-dex
# requires the zip command.
# 569-checker-pattern-replacement tests behaviour present only on host.
-# 90{2,3,4,5,6,7} are not supported in current form due to linker
-# restrictions. See b/31681198
TEST_ART_BROKEN_TARGET_TESTS := \
147-stripped-dex-fallback \
- 569-checker-pattern-replacement \
+ 569-checker-pattern-replacement
+
+# These 9** tests are not supported in current form due to linker
+# restrictions. See b/31681198
+TEST_ART_BROKEN_TARGET_TESTS += \
902-hello-transformation \
903-hello-tagging \
904-object-allocation \
905-object-free \
906-iterate-heap \
907-get-loaded-classes \
+ 908-gc-start-finish \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index c412636..79b41ec 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -30,6 +30,8 @@
#include "904-object-allocation/tracking.h"
#include "905-object-free/tracking_free.h"
#include "906-iterate-heap/iterate_heap.h"
+#include "907-get-loaded-classes/get_loaded_classes.h"
+#include "908-gc-start-finish/gc_callbacks.h"
namespace art {
@@ -52,6 +54,8 @@
{ "904-object-allocation", Test904ObjectAllocation::OnLoad, nullptr },
{ "905-object-free", Test905ObjectFree::OnLoad, nullptr },
{ "906-iterate-heap", Test906IterateHeap::OnLoad, nullptr },
+ { "907-get-loaded-classes", Test907GetLoadedClasses::OnLoad, nullptr },
+ { "908-gc-start-finish", Test908GcStartFinish::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {