Merge "ART: Refactor imgdiag internals"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index f67da3f..8309bd8 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -195,7 +195,6 @@
runtime/entrypoints/math_entrypoints_test.cc \
runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc \
runtime/entrypoints_order_test.cc \
- runtime/exception_test.cc \
runtime/gc/accounting/card_table_test.cc \
runtime/gc/accounting/mod_union_table_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
@@ -251,6 +250,7 @@
compiler/driver/compiled_method_storage_test.cc \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
+ compiler/exception_test.cc \
compiler/image_test.cc \
compiler/jni/jni_compiler_test.cc \
compiler/linker/multi_oat_relative_patcher_test.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 6483ef6..f75a252 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -59,36 +59,20 @@
ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
uint32_t vmap_table_offset = vmap_table.empty() ? 0u
: sizeof(OatQuickMethodHeader) + vmap_table.size();
- ArrayRef<const uint8_t> mapping_table = compiled_method->GetMappingTable();
- bool mapping_table_used = !mapping_table.empty();
- size_t mapping_table_size = mapping_table.size();
- uint32_t mapping_table_offset = !mapping_table_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size;
- ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap();
- bool gc_map_used = !gc_map.empty();
- size_t gc_map_size = gc_map.size();
- uint32_t gc_map_offset = !gc_map_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size + gc_map_size;
- OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
+ OatQuickMethodHeader method_header(vmap_table_offset,
compiled_method->GetFrameSizeInBytes(),
compiled_method->GetCoreSpillMask(),
- compiled_method->GetFpSpillMask(), code_size);
+ compiled_method->GetFpSpillMask(),
+ code_size);
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet());
- const size_t size =
- gc_map_size + mapping_table_size + vmap_table.size() + sizeof(method_header) + code_size;
+ const size_t size = vmap_table.size() + sizeof(method_header) + code_size;
chunk->reserve(size + max_padding);
chunk->resize(sizeof(method_header));
memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
- if (mapping_table_used) {
- chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
- }
- if (gc_map_used) {
- chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end());
- }
chunk->insert(chunk->end(), code.begin(), code.end());
CHECK_EQ(chunk->size(), size);
const void* unaligned_code_ptr = chunk->data() + (size - code_size);
@@ -301,7 +285,7 @@
MemMap::Init();
image_reservation_.reset(MemMap::MapAnonymous("image reservation",
reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
- (size_t)100 * 1024 * 1024, // 100MB
+ (size_t)120 * 1024 * 1024, // 120MB
PROT_NONE,
false /* no need for 4gb flag with fixed mmap*/,
false /* not reusing existing reservation */,
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 7c2c844..2d139eb 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -115,13 +115,6 @@
std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
};
-// TODO: When heap reference poisoning works with all compilers in use, get rid of this.
-#define TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK() \
- if (kPoisonHeapReferences && GetCompilerKind() == Compiler::kQuick) { \
- printf("WARNING: TEST DISABLED FOR HEAP REFERENCE POISONING WITH QUICK\n"); \
- return; \
- }
-
// TODO: When read barrier works with all tests, get rid of this.
#define TEST_DISABLED_FOR_READ_BARRIER() \
if (kUseReadBarrier) { \
@@ -129,13 +122,6 @@
return; \
}
-// TODO: When read barrier works with all compilers in use, get rid of this.
-#define TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK() \
- if (kUseReadBarrier && GetCompilerKind() == Compiler::kQuick) { \
- printf("WARNING: TEST DISABLED FOR READ BARRIER WITH QUICK\n"); \
- return; \
- }
-
// TODO: When read barrier works with all Optimizing back ends, get rid of this.
#define TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS() \
if (kUseReadBarrier && GetCompilerKind() == Compiler::kOptimizing) { \
@@ -155,13 +141,6 @@
} \
}
-// TODO: When non-PIC works with all compilers in use, get rid of this.
-#define TEST_DISABLED_FOR_NON_PIC_COMPILING_WITH_OPTIMIZING() \
- if (GetCompilerKind() == Compiler::kOptimizing) { \
- printf("WARNING: TEST DISABLED FOR NON-PIC COMPILING WITH OPTIMIZING\n"); \
- return; \
- }
-
} // namespace art
#endif // ART_COMPILER_COMMON_COMPILER_TEST_H_
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 9551d22..f06d90c 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -106,9 +106,7 @@
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
const ArrayRef<const SrcMapElem>& src_mapping_table,
- const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
- const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches)
: CompiledCode(driver, instruction_set, quick_code),
@@ -116,9 +114,7 @@
fp_spill_mask_(fp_spill_mask),
src_mapping_table_(
driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)),
- mapping_table_(driver->GetCompiledMethodStorage()->DeduplicateMappingTable(mapping_table)),
vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
- gc_map_(driver->GetCompiledMethodStorage()->DeduplicateGCMap(native_gc_map)),
cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
}
@@ -131,15 +127,20 @@
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
const ArrayRef<const SrcMapElem>& src_mapping_table,
- const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
- const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches) {
SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
CompiledMethod* ret = alloc.allocate(1);
- alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
- fp_spill_mask, src_mapping_table, mapping_table, vmap_table, native_gc_map,
+ alloc.construct(ret,
+ driver,
+ instruction_set,
+ quick_code,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ src_mapping_table,
+ vmap_table,
cfi_info, patches);
return ret;
}
@@ -154,9 +155,7 @@
CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
storage->ReleaseLinkerPatches(patches_);
storage->ReleaseCFIInfo(cfi_info_);
- storage->ReleaseGCMap(gc_map_);
storage->ReleaseVMapTable(vmap_table_);
- storage->ReleaseMappingTable(mapping_table_);
storage->ReleaseSrcMappingTable(src_mapping_table_);
}
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 70161eb..9479ff3 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -377,9 +377,7 @@
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
const ArrayRef<const SrcMapElem>& src_mapping_table,
- const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
- const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches);
@@ -393,9 +391,7 @@
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
const ArrayRef<const SrcMapElem>& src_mapping_table,
- const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
- const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches);
@@ -417,18 +413,10 @@
return GetArray(src_mapping_table_);
}
- ArrayRef<const uint8_t> GetMappingTable() const {
- return GetArray(mapping_table_);
- }
-
ArrayRef<const uint8_t> GetVmapTable() const {
return GetArray(vmap_table_);
}
- ArrayRef<const uint8_t> GetGcMap() const {
- return GetArray(gc_map_);
- }
-
ArrayRef<const uint8_t> GetCFIInfo() const {
return GetArray(cfi_info_);
}
@@ -446,14 +434,8 @@
const uint32_t fp_spill_mask_;
// For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset.
const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_;
- // For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
- // native PC offset. Size prefixed.
- const LengthPrefixedArray<uint8_t>* const mapping_table_;
// For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
const LengthPrefixedArray<uint8_t>* const vmap_table_;
- // For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
- // are live.
- const LengthPrefixedArray<uint8_t>* const gc_map_;
// For quick code, a FDE entry for the debug_frame section.
const LengthPrefixedArray<uint8_t>* const cfi_info_;
// For quick code, linker patches needed by the method.
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index efddeba..3ce786e 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -363,9 +363,7 @@
0,
0,
ArrayRef<const SrcMapElem>(), // src_mapping_table
- ArrayRef<const uint8_t>(), // mapping_table
ArrayRef<const uint8_t>(builder.GetData()), // vmap_table
- ArrayRef<const uint8_t>(), // gc_map
ArrayRef<const uint8_t>(), // cfi data
ArrayRef<const LinkerPatch>());
}
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index 510613e..a0a8f81 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -174,11 +174,8 @@
dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_src_mapping_table_("dedupe source mapping table",
LengthPrefixedArrayAlloc<SrcMapElem>(swap_space_.get())),
- dedupe_mapping_table_("dedupe mapping table",
- LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_vmap_table_("dedupe vmap table",
LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
- dedupe_gc_map_("dedupe gc map", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
dedupe_linker_patches_("dedupe cfi info",
LengthPrefixedArrayAlloc<LinkerPatch>(swap_space_.get())) {
@@ -196,9 +193,7 @@
if (extended) {
Thread* self = Thread::Current();
os << "\nCode dedupe: " << dedupe_code_.DumpStats(self);
- os << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats(self);
os << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats(self);
- os << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats(self);
os << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats(self);
}
}
@@ -221,15 +216,6 @@
ReleaseArrayIfNotDeduplicated(src_map);
}
-const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMappingTable(
- const ArrayRef<const uint8_t>& table) {
- return AllocateOrDeduplicateArray(table, &dedupe_mapping_table_);
-}
-
-void CompiledMethodStorage::ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table) {
- ReleaseArrayIfNotDeduplicated(table);
-}
-
const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable(
const ArrayRef<const uint8_t>& table) {
return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_);
@@ -239,15 +225,6 @@
ReleaseArrayIfNotDeduplicated(table);
}
-const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateGCMap(
- const ArrayRef<const uint8_t>& gc_map) {
- return AllocateOrDeduplicateArray(gc_map, &dedupe_gc_map_);
-}
-
-void CompiledMethodStorage::ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map) {
- ReleaseArrayIfNotDeduplicated(gc_map);
-}
-
const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateCFIInfo(
const ArrayRef<const uint8_t>& cfi_info) {
return AllocateOrDeduplicateArray(cfi_info, &dedupe_cfi_info_);
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index d6961a0..8674abf 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -56,15 +56,9 @@
const ArrayRef<const SrcMapElem>& src_map);
void ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map);
- const LengthPrefixedArray<uint8_t>* DeduplicateMappingTable(const ArrayRef<const uint8_t>& table);
- void ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table);
-
const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table);
void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table);
- const LengthPrefixedArray<uint8_t>* DeduplicateGCMap(const ArrayRef<const uint8_t>& gc_map);
- void ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map);
-
const LengthPrefixedArray<uint8_t>* DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info);
void ReleaseCFIInfo(const LengthPrefixedArray<uint8_t>* cfi_info);
@@ -103,9 +97,7 @@
ArrayDedupeSet<uint8_t> dedupe_code_;
ArrayDedupeSet<SrcMapElem> dedupe_src_mapping_table_;
- ArrayDedupeSet<uint8_t> dedupe_mapping_table_;
ArrayDedupeSet<uint8_t> dedupe_vmap_table_;
- ArrayDedupeSet<uint8_t> dedupe_gc_map_;
ArrayDedupeSet<uint8_t> dedupe_cfi_info_;
ArrayDedupeSet<LinkerPatch> dedupe_linker_patches_;
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 0695cb5..9e0c22c 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -61,24 +61,12 @@
ArrayRef<const SrcMapElem>(raw_src_map1),
ArrayRef<const SrcMapElem>(raw_src_map2),
};
- const uint8_t raw_mapping_table1[] = { 5, 6, 7 };
- const uint8_t raw_mapping_table2[] = { 7, 6, 5, 4 };
- ArrayRef<const uint8_t> mapping_table[] = {
- ArrayRef<const uint8_t>(raw_mapping_table1),
- ArrayRef<const uint8_t>(raw_mapping_table2),
- };
const uint8_t raw_vmap_table1[] = { 2, 4, 6 };
const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 };
ArrayRef<const uint8_t> vmap_table[] = {
ArrayRef<const uint8_t>(raw_vmap_table1),
ArrayRef<const uint8_t>(raw_vmap_table2),
};
- const uint8_t raw_gc_map1[] = { 9, 8, 7 };
- const uint8_t raw_gc_map2[] = { 6, 7, 8, 9 };
- ArrayRef<const uint8_t> gc_map[] = {
- ArrayRef<const uint8_t>(raw_gc_map1),
- ArrayRef<const uint8_t>(raw_gc_map2),
- };
const uint8_t raw_cfi_info1[] = { 1, 3, 5 };
const uint8_t raw_cfi_info2[] = { 8, 6, 4, 2 };
ArrayRef<const uint8_t> cfi_info[] = {
@@ -102,49 +90,37 @@
compiled_methods.reserve(1u << 7);
for (auto&& c : code) {
for (auto&& s : src_map) {
- for (auto&& m : mapping_table) {
- for (auto&& v : vmap_table) {
- for (auto&& g : gc_map) {
- for (auto&& f : cfi_info) {
- for (auto&& p : patches) {
- compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
- &driver, kNone, c, 0u, 0u, 0u, s, m, v, g, f, p));
- }
- }
+ for (auto&& v : vmap_table) {
+ for (auto&& f : cfi_info) {
+ for (auto&& p : patches) {
+ compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
+ &driver, kNone, c, 0u, 0u, 0u, s, v, f, p));
}
}
}
}
}
- constexpr size_t code_bit = 1u << 6;
- constexpr size_t src_map_bit = 1u << 5;
- constexpr size_t mapping_table_bit = 1u << 4;
- constexpr size_t vmap_table_bit = 1u << 3;
- constexpr size_t gc_map_bit = 1u << 2;
+ constexpr size_t code_bit = 1u << 4;
+ constexpr size_t src_map_bit = 1u << 3;
+ constexpr size_t vmap_table_bit = 1u << 2;
constexpr size_t cfi_info_bit = 1u << 1;
constexpr size_t patches_bit = 1u << 0;
- CHECK_EQ(compiled_methods.size(), 1u << 7);
+ CHECK_EQ(compiled_methods.size(), 1u << 5);
for (size_t i = 0; i != compiled_methods.size(); ++i) {
for (size_t j = 0; j != compiled_methods.size(); ++j) {
CompiledMethod* lhs = compiled_methods[i];
CompiledMethod* rhs = compiled_methods[j];
bool same_code = ((i ^ j) & code_bit) == 0u;
bool same_src_map = ((i ^ j) & src_map_bit) == 0u;
- bool same_mapping_table = ((i ^ j) & mapping_table_bit) == 0u;
bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u;
- bool same_gc_map = ((i ^ j) & gc_map_bit) == 0u;
bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u;
bool same_patches = ((i ^ j) & patches_bit) == 0u;
ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data())
<< i << " " << j;
ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data())
<< i << " " << j;
- ASSERT_EQ(same_mapping_table, lhs->GetMappingTable().data() == rhs->GetMappingTable().data())
- << i << " " << j;
ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data())
<< i << " " << j;
- ASSERT_EQ(same_gc_map, lhs->GetGcMap().data() == rhs->GetGcMap().data())
- << i << " " << j;
ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data())
<< i << " " << j;
ASSERT_EQ(same_patches, lhs->GetPatches().data() == rhs->GetPatches().data())
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index d29d528..5294068 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -357,7 +357,7 @@
compiler_kind_(compiler_kind),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
- freezing_constructor_lock_("freezing constructor lock"),
+ requires_constructor_barrier_lock_("constructor barrier lock"),
compiled_classes_lock_("compiled classes lock"),
compiled_methods_lock_("compiled method lock"),
compiled_methods_(MethodTable::key_compare()),
@@ -2006,6 +2006,28 @@
self->ClearException();
}
+bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file,
+ uint16_t class_def_idx) const {
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ // Empty class such as a marker interface.
+ return false;
+ }
+ ClassDataItemIterator it(dex_file, class_data);
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ // We require a constructor barrier if there are final instance fields.
+ while (it.HasNextInstanceField()) {
+ if (it.MemberIsFinal()) {
+ return true;
+ }
+ it.Next();
+ }
+ return false;
+}
+
class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
public:
explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
@@ -2110,9 +2132,10 @@
DCHECK(!it.HasNext());
}
}
- if (requires_constructor_barrier) {
- manager_->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index);
- }
+ manager_->GetCompiler()->SetRequiresConstructorBarrier(self,
+ &dex_file,
+ class_def_index,
+ requires_constructor_barrier);
}
private:
@@ -2486,6 +2509,20 @@
context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count);
}
+class InitializeArrayClassVisitor : public ClassVisitor {
+ public:
+ virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (klass->IsArrayClass()) {
+ StackHandleScope<1> hs(Thread::Current());
+ Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(),
+ hs.NewHandle(klass),
+ true,
+ true);
+ }
+ return true;
+ }
+};
+
void CompilerDriver::InitializeClasses(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
@@ -2494,6 +2531,14 @@
CHECK(dex_file != nullptr);
InitializeClasses(class_loader, *dex_file, dex_files, timings);
}
+ {
+ // Make sure that we call EnsureIntiailized on all the array classes to call
+ // SetVerificationAttempted so that the access flags are set. If we do not do this they get
+ // changed at runtime resulting in more dirty image pages.
+ ScopedObjectAccess soa(Thread::Current());
+ InitializeArrayClassVisitor visitor;
+ Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+ }
if (IsBootImage()) {
// Prune garbage objects created during aborted transactions.
Runtime::Current()->GetHeap()->CollectGarbage(true);
@@ -2747,16 +2792,29 @@
return non_relative_linker_patch_count_;
}
-void CompilerDriver::AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index) {
- WriterMutexLock mu(self, freezing_constructor_lock_);
- freezing_constructor_classes_.insert(ClassReference(dex_file, class_def_index));
+void CompilerDriver::SetRequiresConstructorBarrier(Thread* self,
+ const DexFile* dex_file,
+ uint16_t class_def_index,
+ bool requires) {
+ WriterMutexLock mu(self, requires_constructor_barrier_lock_);
+ requires_constructor_barrier_.emplace(ClassReference(dex_file, class_def_index), requires);
}
-bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index) const {
- ReaderMutexLock mu(self, freezing_constructor_lock_);
- return freezing_constructor_classes_.count(ClassReference(dex_file, class_def_index)) != 0;
+bool CompilerDriver::RequiresConstructorBarrier(Thread* self,
+ const DexFile* dex_file,
+ uint16_t class_def_index) {
+ ClassReference class_ref(dex_file, class_def_index);
+ {
+ ReaderMutexLock mu(self, requires_constructor_barrier_lock_);
+ auto it = requires_constructor_barrier_.find(class_ref);
+ if (it != requires_constructor_barrier_.end()) {
+ return it->second;
+ }
+ }
+ WriterMutexLock mu(self, requires_constructor_barrier_lock_);
+ const bool requires = RequiresConstructorBarrier(*dex_file, class_def_index);
+ requires_constructor_barrier_.emplace(class_ref, requires);
+ return requires;
}
std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 64a06a2..905f84d 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -183,12 +183,15 @@
// Remove and delete a compiled method.
void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_);
- void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index)
- REQUIRES(!freezing_constructor_lock_);
- bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index) const
- REQUIRES(!freezing_constructor_lock_);
+ void SetRequiresConstructorBarrier(Thread* self,
+ const DexFile* dex_file,
+ uint16_t class_def_index,
+ bool requires)
+ REQUIRES(!requires_constructor_barrier_lock_);
+ bool RequiresConstructorBarrier(Thread* self,
+ const DexFile* dex_file,
+ uint16_t class_def_index)
+ REQUIRES(!requires_constructor_barrier_lock_);
// Callbacks from compiler to see what runtime checks must be generated.
@@ -619,6 +622,8 @@
void FreeThreadPools();
void CheckThreadPools();
+ bool RequiresConstructorBarrier(const DexFile& dex_file, uint16_t class_def_idx) const;
+
const CompilerOptions* const compiler_options_;
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
@@ -629,9 +634,11 @@
const InstructionSet instruction_set_;
const InstructionSetFeatures* const instruction_set_features_;
- // All class references that require
- mutable ReaderWriterMutex freezing_constructor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::set<ClassReference> freezing_constructor_classes_ GUARDED_BY(freezing_constructor_lock_);
+ // All class references that require constructor barriers. If the class reference is not in the
+ // set then the result has not yet been computed.
+ mutable ReaderWriterMutex requires_constructor_barrier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::map<ClassReference, bool> requires_constructor_barrier_
+ GUARDED_BY(requires_constructor_barrier_lock_);
typedef SafeMap<const ClassReference, CompiledClass*> ClassTable;
// All class references that this compiler has compiled.
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 0037564..b9a5a78 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -143,21 +143,11 @@
// TODO: check that all Method::GetCode() values are non-null
}
-TEST_F(CompilerDriverTest, DISABLED_AbstractMethodErrorStub) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
- TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
+TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
- CompileVirtualMethod(ScopedNullHandle<mirror::ClassLoader>(),
- "java.lang.Class",
- "isFinalizable",
- "()Z");
- CompileDirectMethod(ScopedNullHandle<mirror::ClassLoader>(),
- "java.lang.Object",
- "<init>",
- "()V");
class_loader = LoadDex("AbstractMethod");
}
ASSERT_TRUE(class_loader != nullptr);
@@ -197,8 +187,6 @@
};
TEST_F(CompilerDriverMethodsTest, Selection) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
- TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
Thread* self = Thread::Current();
jobject class_loader;
@@ -303,8 +291,6 @@
};
TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
- TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
Thread* self = Thread::Current();
jobject class_loader;
diff --git a/runtime/exception_test.cc b/compiler/exception_test.cc
similarity index 76%
rename from runtime/exception_test.cc
rename to compiler/exception_test.cc
index 18ccd08..38ac052 100644
--- a/runtime/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -16,6 +16,7 @@
#include <memory>
+#include "base/arena_allocator.h"
#include "class_linker.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -27,11 +28,11 @@
#include "mirror/object-inl.h"
#include "mirror/stack_trace_element.h"
#include "oat_quick_method_header.h"
+#include "optimizing/stack_map_stream.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
#include "thread.h"
-#include "vmap_table.h"
namespace art {
@@ -57,40 +58,27 @@
fake_code_.push_back(0x70 | i);
}
- fake_mapping_data_.PushBackUnsigned(4); // first element is count
- fake_mapping_data_.PushBackUnsigned(4); // total (non-length) elements
- fake_mapping_data_.PushBackUnsigned(2); // count of pc to dex elements
- // --- pc to dex table
- fake_mapping_data_.PushBackUnsigned(3 - 0); // offset 3
- fake_mapping_data_.PushBackSigned(3 - 0); // maps to dex offset 3
- // --- dex to pc table
- fake_mapping_data_.PushBackUnsigned(3 - 0); // offset 3
- fake_mapping_data_.PushBackSigned(3 - 0); // maps to dex offset 3
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ StackMapStream stack_maps(&allocator);
+ stack_maps.BeginStackMapEntry(/* dex_pc */ 3u,
+ /* native_pc_offset */ 3u,
+ /* register_mask */ 0u,
+ /* sp_mask */ nullptr,
+ /* num_dex_registers */ 0u,
+ /* inlining_depth */ 0u);
+ stack_maps.EndStackMapEntry();
+ size_t stack_maps_size = stack_maps.PrepareForFillIn();
+ size_t stack_maps_offset = stack_maps_size + sizeof(OatQuickMethodHeader);
- fake_vmap_table_data_.PushBackUnsigned(0 + VmapTable::kEntryAdjustment);
-
- fake_gc_map_.push_back(0); // 0 bytes to encode references and native pc offsets.
- fake_gc_map_.push_back(0);
- fake_gc_map_.push_back(0); // 0 entries.
- fake_gc_map_.push_back(0);
-
- const std::vector<uint8_t>& fake_vmap_table_data = fake_vmap_table_data_.GetData();
- const std::vector<uint8_t>& fake_mapping_data = fake_mapping_data_.GetData();
- uint32_t vmap_table_offset = sizeof(OatQuickMethodHeader) + fake_vmap_table_data.size();
- uint32_t mapping_table_offset = vmap_table_offset + fake_mapping_data.size();
- uint32_t gc_map_offset = mapping_table_offset + fake_gc_map_.size();
- OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
- 4 * sizeof(void*), 0u, 0u, code_size);
- fake_header_code_and_maps_.resize(sizeof(method_header));
- memcpy(&fake_header_code_and_maps_[0], &method_header, sizeof(method_header));
- fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
- fake_vmap_table_data.begin(), fake_vmap_table_data.end());
- fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
- fake_mapping_data.begin(), fake_mapping_data.end());
- fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
- fake_gc_map_.begin(), fake_gc_map_.end());
- fake_header_code_and_maps_.insert(fake_header_code_and_maps_.end(),
- fake_code_.begin(), fake_code_.end());
+ fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size());
+ MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
+ stack_maps.FillIn(stack_maps_region);
+ OatQuickMethodHeader method_header(stack_maps_offset, 4 * sizeof(void*), 0u, 0u, code_size);
+ memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header));
+ std::copy(fake_code_.begin(),
+ fake_code_.end(),
+ fake_header_code_and_maps_.begin() + stack_maps_offset);
// Align the code.
const size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
@@ -109,7 +97,7 @@
if (kRuntimeISA == kArm) {
// Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer().
- CHECK_ALIGNED(mapping_table_offset, 2);
+ CHECK_ALIGNED(stack_maps_offset, 2);
}
method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
@@ -124,9 +112,6 @@
const DexFile* dex_;
std::vector<uint8_t> fake_code_;
- Leb128EncodingVector<> fake_mapping_data_;
- Leb128EncodingVector<> fake_vmap_table_data_;
- std::vector<uint8_t> fake_gc_map_;
std::vector<uint8_t> fake_header_code_and_maps_;
ArtMethod* method_f_;
diff --git a/compiler/gc_map_builder.h b/compiler/gc_map_builder.h
deleted file mode 100644
index 2ef7f1a..0000000
--- a/compiler/gc_map_builder.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_GC_MAP_BUILDER_H_
-#define ART_COMPILER_GC_MAP_BUILDER_H_
-
-#include <vector>
-
-#include "base/bit_utils.h"
-#include "gc_map.h"
-
-namespace art {
-
-class GcMapBuilder {
- public:
- template <typename Vector>
- GcMapBuilder(Vector* table, size_t entries, uint32_t max_native_offset,
- size_t references_width)
- : entries_(entries), references_width_(entries != 0u ? references_width : 0u),
- native_offset_width_(entries != 0 && max_native_offset != 0
- ? sizeof(max_native_offset) - CLZ(max_native_offset) / 8u
- : 0u),
- in_use_(entries) {
- static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
-
- // Resize table and set up header.
- table->resize((EntryWidth() * entries) + sizeof(uint32_t));
- table_ = table->data();
- CHECK_LT(native_offset_width_, 1U << 3);
- (*table)[0] = native_offset_width_ & 7;
- CHECK_LT(references_width_, 1U << 13);
- (*table)[0] |= (references_width_ << 3) & 0xFF;
- (*table)[1] = (references_width_ >> 5) & 0xFF;
- CHECK_LT(entries, 1U << 16);
- (*table)[2] = entries & 0xFF;
- (*table)[3] = (entries >> 8) & 0xFF;
- }
-
- void AddEntry(uint32_t native_offset, const uint8_t* references) {
- size_t table_index = TableIndex(native_offset);
- while (in_use_[table_index]) {
- table_index = (table_index + 1) % entries_;
- }
- in_use_[table_index] = true;
- SetCodeOffset(table_index, native_offset);
- DCHECK_EQ(native_offset, GetCodeOffset(table_index));
- SetReferences(table_index, references);
- }
-
- private:
- size_t TableIndex(uint32_t native_offset) {
- return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
- }
-
- uint32_t GetCodeOffset(size_t table_index) {
- uint32_t native_offset = 0;
- size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
- for (size_t i = 0; i < native_offset_width_; i++) {
- native_offset |= table_[table_offset + i] << (i * 8);
- }
- return native_offset;
- }
-
- void SetCodeOffset(size_t table_index, uint32_t native_offset) {
- size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
- for (size_t i = 0; i < native_offset_width_; i++) {
- table_[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
- }
- }
-
- void SetReferences(size_t table_index, const uint8_t* references) {
- size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
- memcpy(&table_[table_offset + native_offset_width_], references, references_width_);
- }
-
- size_t EntryWidth() const {
- return native_offset_width_ + references_width_;
- }
-
- // Number of entries in the table.
- const size_t entries_;
- // Number of bytes used to encode the reference bitmap.
- const size_t references_width_;
- // Number of bytes used to encode a native offset.
- const size_t native_offset_width_;
- // Entries that are in use.
- std::vector<bool> in_use_;
- // The table we're building.
- uint8_t* table_;
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_GC_MAP_BUILDER_H_
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 7779e44..91579e9 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -288,17 +288,14 @@
}
TEST_F(ImageTest, WriteReadUncompressed) {
- TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460
TestWriteRead(ImageHeader::kStorageModeUncompressed);
}
TEST_F(ImageTest, WriteReadLZ4) {
- TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460
TestWriteRead(ImageHeader::kStorageModeLZ4);
}
TEST_F(ImageTest, WriteReadLZ4HC) {
- TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460
TestWriteRead(ImageHeader::kStorageModeLZ4HC);
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index e920460..b8cda24 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -488,9 +488,7 @@
main_jni_conv->CoreSpillMask(),
main_jni_conv->FpSpillMask(),
ArrayRef<const SrcMapElem>(),
- ArrayRef<const uint8_t>(), // mapping_table.
ArrayRef<const uint8_t>(), // vmap_table.
- ArrayRef<const uint8_t>(), // native_gc_map.
ArrayRef<const uint8_t>(*jni_asm->cfi().data()),
ArrayRef<const LinkerPatch>());
}
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index bf61ea0..c07de79 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -85,9 +85,15 @@
const ArrayRef<const LinkerPatch>& patches) {
compiled_method_refs_.push_back(method_ref);
compiled_methods_.emplace_back(new CompiledMethod(
- &driver_, instruction_set_, code,
- 0u, 0u, 0u, ArrayRef<const SrcMapElem>(), ArrayRef<const uint8_t>(),
- ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
+ &driver_,
+ instruction_set_,
+ code,
+ /* frame_size_in_bytes */ 0u,
+ /* core_spill_mask */ 0u,
+ /* fp_spill_mask */ 0u,
+ /* src_mapping_table */ ArrayRef<const SrcMapElem>(),
+ /* vmap_table */ ArrayRef<const uint8_t>(),
+ /* cfi_info */ ArrayRef<const uint8_t>(),
patches));
}
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index eaf0e17..73b16d5 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -442,7 +442,7 @@
// it is time to update OatHeader::kOatVersion
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
- EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
+ EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(132 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 3a67b1e..25c671e 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -275,9 +275,7 @@
size_code_alignment_(0),
size_relative_call_thunks_(0),
size_misc_thunks_(0),
- size_mapping_table_(0),
size_vmap_table_(0),
- size_gc_map_(0),
size_oat_dex_file_location_size_(0),
size_oat_dex_file_location_data_(0),
size_oat_dex_file_location_checksum_(0),
@@ -498,72 +496,6 @@
OatWriter::~OatWriter() {
}
-struct OatWriter::GcMapDataAccess {
- static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
- return compiled_method->GetGcMap();
- }
-
- static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
- uint32_t offset = oat_class->method_headers_[method_offsets_index].gc_map_offset_;
- return offset == 0u ? 0u :
- (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
- }
-
- static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset)
- ALWAYS_INLINE {
- oat_class->method_headers_[method_offsets_index].gc_map_offset_ =
- (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
- }
-
- static const char* Name() {
- return "GC map";
- }
-};
-
-struct OatWriter::MappingTableDataAccess {
- static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
- return compiled_method->GetMappingTable();
- }
-
- static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
- uint32_t offset = oat_class->method_headers_[method_offsets_index].mapping_table_offset_;
- return offset == 0u ? 0u :
- (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
- }
-
- static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset)
- ALWAYS_INLINE {
- oat_class->method_headers_[method_offsets_index].mapping_table_offset_ =
- (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
- }
-
- static const char* Name() {
- return "mapping table";
- }
-};
-
-struct OatWriter::VmapTableDataAccess {
- static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
- return compiled_method->GetVmapTable();
- }
-
- static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
- uint32_t offset = oat_class->method_headers_[method_offsets_index].vmap_table_offset_;
- return offset == 0u ? 0u :
- (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
- }
-
- static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset)
- ALWAYS_INLINE {
- oat_class->method_headers_[method_offsets_index].vmap_table_offset_ =
- (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
- }
-
- static const char* Name() {
- return "vmap table";
- }
-};
-
class OatWriter::DexMethodVisitor {
public:
DexMethodVisitor(OatWriter* writer, size_t offset)
@@ -726,26 +658,24 @@
uint32_t thumb_offset = compiled_method->CodeDelta();
// Deduplicate code arrays if we are not producing debuggable code.
- bool deduped = false;
+ bool deduped = true;
MethodReference method_ref(dex_file_, it.GetMemberIndex());
if (debuggable_) {
quick_code_offset = writer_->relative_patcher_->GetOffset(method_ref);
if (quick_code_offset != 0u) {
// Duplicate methods, we want the same code for both of them so that the oat writer puts
// the same code in both ArtMethods so that we do not get different oat code at runtime.
- deduped = true;
} else {
quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
+ deduped = false;
}
} else {
- auto lb = dedupe_map_.lower_bound(compiled_method);
- if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) {
- quick_code_offset = lb->second;
- deduped = true;
- } else {
- quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
- dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset);
- }
+ quick_code_offset = dedupe_map_.GetOrCreate(
+ compiled_method,
+ [this, &deduped, compiled_method, &it, thumb_offset]() {
+ deduped = false;
+ return NewQuickCodeOffset(compiled_method, it, thumb_offset);
+ });
}
if (code_size != 0) {
@@ -763,33 +693,25 @@
// Update quick method header.
DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
- uint32_t mapping_table_offset = method_header->mapping_table_offset_;
uint32_t vmap_table_offset = method_header->vmap_table_offset_;
// If we don't have quick code, then we must have a vmap, as that is how the dex2dex
// compiler records its transformations.
DCHECK(!quick_code.empty() || vmap_table_offset != 0);
- uint32_t gc_map_offset = method_header->gc_map_offset_;
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
uint32_t code_offset = quick_code_offset - thumb_offset;
- if (mapping_table_offset != 0u && code_offset != 0u) {
- mapping_table_offset += code_offset;
- DCHECK_LT(mapping_table_offset, code_offset) << "Overflow in oat offsets";
- }
if (vmap_table_offset != 0u && code_offset != 0u) {
vmap_table_offset += code_offset;
DCHECK_LT(vmap_table_offset, code_offset) << "Overflow in oat offsets";
}
- if (gc_map_offset != 0u && code_offset != 0u) {
- gc_map_offset += code_offset;
- DCHECK_LT(gc_map_offset, code_offset) << "Overflow in oat offsets";
- }
uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
- *method_header = OatQuickMethodHeader(mapping_table_offset, vmap_table_offset,
- gc_map_offset, frame_size_in_bytes, core_spill_mask,
- fp_spill_mask, code_size);
+ *method_header = OatQuickMethodHeader(vmap_table_offset,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code_size);
if (!deduped) {
// Update offsets. (Checksum is updated when writing.)
@@ -831,30 +753,6 @@
writer_->method_info_.push_back(info);
}
- if (kIsDebugBuild) {
- // We expect GC maps except when the class hasn't been verified or the method is native.
- const CompilerDriver* compiler_driver = writer_->compiler_driver_;
- ClassReference class_ref(dex_file_, class_def_index_);
- CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
- mirror::Class::Status status;
- if (compiled_class != nullptr) {
- status = compiled_class->GetStatus();
- } else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
- status = mirror::Class::kStatusError;
- } else {
- status = mirror::Class::kStatusNotReady;
- }
- ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap();
- if (!gc_map.empty()) {
- size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
- bool is_native = it.MemberIsNative();
- CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
- << gc_map_size << " " << (is_native ? "true" : "false") << " "
- << (status < mirror::Class::kStatusVerified) << " " << status << " "
- << PrettyMethod(it.GetMemberIndex(), *dex_file_);
- }
- }
-
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
OatMethodOffsets* offsets = &oat_class->method_offsets_[method_offsets_index_];
offsets->code_offset_ = quick_code_offset;
@@ -872,15 +770,9 @@
return lhs->GetQuickCode().data() < rhs->GetQuickCode().data();
}
// If the code is the same, all other fields are likely to be the same as well.
- if (UNLIKELY(lhs->GetMappingTable().data() != rhs->GetMappingTable().data())) {
- return lhs->GetMappingTable().data() < rhs->GetMappingTable().data();
- }
if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) {
return lhs->GetVmapTable().data() < rhs->GetVmapTable().data();
}
- if (UNLIKELY(lhs->GetGcMap().data() != rhs->GetGcMap().data())) {
- return lhs->GetGcMap().data() < rhs->GetGcMap().data();
- }
if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) {
return lhs->GetPatches().data() < rhs->GetPatches().data();
}
@@ -907,7 +799,6 @@
const bool debuggable_;
};
-template <typename DataAccess>
class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
public:
InitMapMethodVisitor(OatWriter* writer, size_t offset)
@@ -921,19 +812,21 @@
if (compiled_method != nullptr) {
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
- DCHECK_EQ(DataAccess::GetOffset(oat_class, method_offsets_index_), 0u);
+ DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
- ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method);
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
uint32_t map_size = map.size() * sizeof(map[0]);
if (map_size != 0u) {
- auto lb = dedupe_map_.lower_bound(map.data());
- if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(map.data(), lb->first)) {
- DataAccess::SetOffset(oat_class, method_offsets_index_, lb->second);
- } else {
- DataAccess::SetOffset(oat_class, method_offsets_index_, offset_);
- dedupe_map_.PutBefore(lb, map.data(), offset_);
- offset_ += map_size;
- }
+ size_t offset = dedupe_map_.GetOrCreate(
+ map.data(),
+ [this, map_size]() {
+ uint32_t new_offset = offset_;
+ offset_ += map_size;
+ return new_offset;
+ });
+ // Code offset is not initialized yet, so set the map offset to 0u-offset.
+ DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
+ oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
}
++method_offsets_index_;
}
@@ -1342,10 +1235,11 @@
}
};
-template <typename DataAccess>
class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
public:
- WriteMapMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset,
+ WriteMapMethodVisitor(OatWriter* writer,
+ OutputStream* out,
+ const size_t file_offset,
size_t relative_offset)
: OatDexMethodVisitor(writer, relative_offset),
out_(out),
@@ -1360,22 +1254,31 @@
size_t file_offset = file_offset_;
OutputStream* out = out_;
- uint32_t map_offset = DataAccess::GetOffset(oat_class, method_offsets_index_);
+ uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].vmap_table_offset_;
+ uint32_t code_offset = oat_class->method_offsets_[method_offsets_index_].code_offset_;
++method_offsets_index_;
- // Write deduplicated map.
- ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method);
- size_t map_size = map.size() * sizeof(map[0]);
- DCHECK((map_size == 0u && map_offset == 0u) ||
- (map_size != 0u && map_offset != 0u && map_offset <= offset_))
- << map_size << " " << map_offset << " " << offset_ << " "
- << PrettyMethod(it.GetMemberIndex(), *dex_file_) << " for " << DataAccess::Name();
- if (map_size != 0u && map_offset == offset_) {
- if (UNLIKELY(!writer_->WriteData(out, map.data(), map_size))) {
- ReportWriteFailure(it);
- return false;
+ DCHECK((compiled_method->GetVmapTable().size() == 0u && map_offset == 0u) ||
+ (compiled_method->GetVmapTable().size() != 0u && map_offset != 0u))
+ << compiled_method->GetVmapTable().size() << " " << map_offset << " "
+ << PrettyMethod(it.GetMemberIndex(), *dex_file_);
+
+ if (map_offset != 0u) {
+ // Transform map_offset to actual oat data offset.
+ map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset;
+ DCHECK_NE(map_offset, 0u);
+ DCHECK_LE(map_offset, offset_) << PrettyMethod(it.GetMemberIndex(), *dex_file_);
+
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ size_t map_size = map.size() * sizeof(map[0]);
+ if (map_offset == offset_) {
+ // Write deduplicated map (code info for Optimizing or transformation info for dex2dex).
+ if (UNLIKELY(!writer_->WriteData(out, map.data(), map_size))) {
+ ReportWriteFailure(it);
+ return false;
+ }
+ offset_ += map_size;
}
- offset_ += map_size;
}
DCHECK_OFFSET_();
}
@@ -1388,7 +1291,7 @@
size_t const file_offset_;
void ReportWriteFailure(const ClassDataItemIterator& it) {
- PLOG(ERROR) << "Failed to write " << DataAccess::Name() << " for "
+ PLOG(ERROR) << "Failed to write map for "
<< PrettyMethod(it.GetMemberIndex(), *dex_file_) << " to " << out_->GetLocation();
}
};
@@ -1481,19 +1384,10 @@
}
size_t OatWriter::InitOatMaps(size_t offset) {
- #define VISIT(VisitorType) \
- do { \
- VisitorType visitor(this, offset); \
- bool success = VisitDexMethods(&visitor); \
- DCHECK(success); \
- offset = visitor.GetOffset(); \
- } while (false)
-
- VISIT(InitMapMethodVisitor<GcMapDataAccess>);
- VISIT(InitMapMethodVisitor<MappingTableDataAccess>);
- VISIT(InitMapMethodVisitor<VmapTableDataAccess>);
-
- #undef VISIT
+ InitMapMethodVisitor visitor(this, offset);
+ bool success = VisitDexMethods(&visitor);
+ DCHECK(success);
+ offset = visitor.GetOffset();
return offset;
}
@@ -1647,9 +1541,7 @@
DO_STAT(size_code_alignment_);
DO_STAT(size_relative_call_thunks_);
DO_STAT(size_misc_thunks_);
- DO_STAT(size_mapping_table_);
DO_STAT(size_vmap_table_);
- DO_STAT(size_gc_map_);
DO_STAT(size_oat_dex_file_location_size_);
DO_STAT(size_oat_dex_file_location_data_);
DO_STAT(size_oat_dex_file_location_checksum_);
@@ -1764,29 +1656,14 @@
}
size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) {
- #define VISIT(VisitorType) \
- do { \
- VisitorType visitor(this, out, file_offset, relative_offset); \
- if (UNLIKELY(!VisitDexMethods(&visitor))) { \
- return 0; \
- } \
- relative_offset = visitor.GetOffset(); \
- } while (false)
-
- size_t gc_maps_offset = relative_offset;
- VISIT(WriteMapMethodVisitor<GcMapDataAccess>);
- size_gc_map_ = relative_offset - gc_maps_offset;
-
- size_t mapping_tables_offset = relative_offset;
- VISIT(WriteMapMethodVisitor<MappingTableDataAccess>);
- size_mapping_table_ = relative_offset - mapping_tables_offset;
-
size_t vmap_tables_offset = relative_offset;
- VISIT(WriteMapMethodVisitor<VmapTableDataAccess>);
+ WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset);
+ if (UNLIKELY(!VisitDexMethods(&visitor))) {
+ return 0;
+ }
+ relative_offset = visitor.GetOffset();
size_vmap_table_ = relative_offset - vmap_tables_offset;
- #undef VISIT
-
return relative_offset;
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 5e7a4a3..3862798 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -219,13 +219,6 @@
class OatClass;
class OatDexFile;
- // The DataAccess classes are helper classes that provide access to members related to
- // a given map, i.e. GC map, mapping table or vmap table. By abstracting these away
- // we can share a lot of code for processing the maps with template classes below.
- struct GcMapDataAccess;
- struct MappingTableDataAccess;
- struct VmapTableDataAccess;
-
// The function VisitDexMethods() below iterates through all the methods in all
// the compiled dex files in order of their definitions. The method visitor
// classes provide individual bits of processing for each of the passes we need to
@@ -235,11 +228,9 @@
class OatDexMethodVisitor;
class InitOatClassesMethodVisitor;
class InitCodeMethodVisitor;
- template <typename DataAccess>
class InitMapMethodVisitor;
class InitImageMethodVisitor;
class WriteCodeMethodVisitor;
- template <typename DataAccess>
class WriteMapMethodVisitor;
// Visit all the methods in all the compiled dex files in their definition order
@@ -354,9 +345,7 @@
uint32_t size_code_alignment_;
uint32_t size_relative_call_thunks_;
uint32_t size_misc_thunks_;
- uint32_t size_mapping_table_;
uint32_t size_vmap_table_;
- uint32_t size_gc_map_;
uint32_t size_oat_dex_file_location_size_;
uint32_t size_oat_dex_file_location_data_;
uint32_t size_oat_dex_file_location_checksum_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 65e5c3a..953c0ae 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -44,18 +44,15 @@
#include "compiled_method.h"
#include "dex/verified_method.h"
#include "driver/compiler_driver.h"
-#include "gc_map_builder.h"
#include "graph_visualizer.h"
#include "intrinsics.h"
#include "leb128.h"
-#include "mapping_table.h"
#include "mirror/array-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object_reference.h"
#include "parallel_move_resolver.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
-#include "vmap_table.h"
namespace art {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a0c1412..3049128 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -6373,8 +6373,9 @@
reg,
method_reg,
ArtMethod::DexCacheResolvedMethodsOffset(kArmPointerSize).Int32Value());
- // temp = temp[index_in_cache]
- uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ // temp = temp[index_in_cache];
+ // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
+ uint32_t index_in_cache = invoke->GetDexMethodIndex();
__ LoadFromOffset(kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
break;
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7699ddd..c978aaa 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3680,7 +3680,8 @@
MemOperand(method_reg.X(),
ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
// temp = temp[index_in_cache];
- uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
+ uint32_t index_in_cache = invoke->GetDexMethodIndex();
__ Ldr(reg.X(), MemOperand(reg.X(), GetCachePointerOffset(index_in_cache)));
break;
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 2df37cd..185397c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3871,8 +3871,9 @@
reg,
method_reg,
ArtMethod::DexCacheResolvedMethodsOffset(kMipsPointerSize).Int32Value());
- // temp = temp[index_in_cache]
- uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ // temp = temp[index_in_cache];
+ // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
+ uint32_t index_in_cache = invoke->GetDexMethodIndex();
__ LoadFromOffset(kLoadWord,
reg,
reg,
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index cc1f372..246f5b7 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3085,8 +3085,9 @@
reg,
method_reg,
ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
- // temp = temp[index_in_cache]
- uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ // temp = temp[index_in_cache];
+ // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
+ uint32_t index_in_cache = invoke->GetDexMethodIndex();
__ LoadFromOffset(kLoadDoubleword,
reg,
reg,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 94d2f0c..304cf08 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4366,8 +4366,9 @@
// /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
__ movl(reg, Address(method_reg,
ArtMethod::DexCacheResolvedMethodsOffset(kX86PointerSize).Int32Value()));
- // temp = temp[index_in_cache]
- uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ // temp = temp[index_in_cache];
+ // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
+ uint32_t index_in_cache = invoke->GetDexMethodIndex();
__ movl(reg, Address(reg, CodeGenerator::GetCachePointerOffset(index_in_cache)));
break;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index da126e4..056b69b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -808,8 +808,9 @@
__ movq(reg,
Address(CpuRegister(method_reg),
ArtMethod::DexCacheResolvedMethodsOffset(kX86_64PointerSize).SizeValue()));
- // temp = temp[index_in_cache]
- uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ // temp = temp[index_in_cache];
+ // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
+ uint32_t index_in_cache = invoke->GetDexMethodIndex();
__ movq(reg, Address(reg, CodeGenerator::GetCachePointerOffset(index_in_cache)));
break;
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 33803c1..77e0cbc 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1117,41 +1117,10 @@
}
}
- // Run simple optimizations on the graph.
- HDeadCodeElimination dce(callee_graph, stats_);
- HConstantFolding fold(callee_graph);
- HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
- InstructionSimplifier simplify(callee_graph, stats_);
- IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_, stats_);
-
- HOptimization* optimizations[] = {
- &intrinsics,
- &sharpening,
- &simplify,
- &fold,
- &dce,
- };
-
- for (size_t i = 0; i < arraysize(optimizations); ++i) {
- HOptimization* optimization = optimizations[i];
- optimization->Run();
- }
-
size_t number_of_instructions_budget = kMaximumNumberOfHInstructions;
- if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
- HInliner inliner(callee_graph,
- outermost_graph_,
- codegen_,
- outer_compilation_unit_,
- dex_compilation_unit,
- compiler_driver_,
- handles_,
- stats_,
- total_number_of_dex_registers_ + code_item->registers_size_,
- depth_ + 1);
- inliner.Run();
- number_of_instructions_budget += inliner.number_of_inlined_instructions_;
- }
+ size_t number_of_inlined_instructions =
+ RunOptimizations(callee_graph, code_item, dex_compilation_unit);
+ number_of_instructions_budget += number_of_inlined_instructions;
// TODO: We should abort only if all predecessors throw. However,
// HGraph::InlineInto currently does not handle an exit block with
@@ -1197,7 +1166,7 @@
for (HInstructionIterator instr_it(block->GetInstructions());
!instr_it.Done();
instr_it.Advance()) {
- if (number_of_instructions++ == number_of_instructions_budget) {
+ if (number_of_instructions++ == number_of_instructions_budget) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " is not inlined because its caller has reached"
<< " its instruction budget limit.";
@@ -1278,6 +1247,47 @@
return true;
}
+size_t HInliner::RunOptimizations(HGraph* callee_graph,
+ const DexFile::CodeItem* code_item,
+ const DexCompilationUnit& dex_compilation_unit) {
+ HDeadCodeElimination dce(callee_graph, stats_);
+ HConstantFolding fold(callee_graph);
+ HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
+ InstructionSimplifier simplify(callee_graph, stats_);
+ IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_, stats_);
+
+ HOptimization* optimizations[] = {
+ &intrinsics,
+ &sharpening,
+ &simplify,
+ &fold,
+ &dce,
+ };
+
+ for (size_t i = 0; i < arraysize(optimizations); ++i) {
+ HOptimization* optimization = optimizations[i];
+ optimization->Run();
+ }
+
+ size_t number_of_inlined_instructions = 0u;
+ if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
+ HInliner inliner(callee_graph,
+ outermost_graph_,
+ codegen_,
+ outer_compilation_unit_,
+ dex_compilation_unit,
+ compiler_driver_,
+ handles_,
+ stats_,
+ total_number_of_dex_registers_ + code_item->registers_size_,
+ depth_ + 1);
+ inliner.Run();
+ number_of_inlined_instructions += inliner.number_of_inlined_instructions_;
+ }
+
+ return number_of_inlined_instructions;
+}
+
void HInliner::FixUpReturnReferenceType(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
HInstruction* return_replacement,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index cdb2167..7cf1424 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -76,6 +76,12 @@
bool same_dex_file,
HInstruction** return_replacement);
+ // Run simple optimizations on `callee_graph`.
+ // Returns the number of inlined instructions.
+ size_t RunOptimizations(HGraph* callee_graph,
+ const DexFile::CodeItem* code_item,
+ const DexCompilationUnit& dex_compilation_unit);
+
// Try to recognize known simple patterns and replace invoke call with appropriate instructions.
bool TryPatternSubstitution(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index c5f2342..06b3968 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -567,10 +567,10 @@
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
-static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, const CompilerDriver& driver) {
+static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, CompilerDriver* driver) {
Thread* self = Thread::Current();
return cu->IsConstructor()
- && driver.RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
+ && driver->RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
}
// Returns true if `block` has only one successor which starts at the next
@@ -616,7 +616,7 @@
if (graph_->ShouldGenerateConstructorBarrier()) {
// The compilation unit is null during testing.
if (dex_compilation_unit_ != nullptr) {
- DCHECK(RequiresConstructorBarrier(dex_compilation_unit_, *compiler_driver_))
+ DCHECK(RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_))
<< "Inconsistent use of ShouldGenerateConstructorBarrier. Should not generate a barrier.";
}
AppendInstruction(new (arena_) HMemoryBarrier(kStoreStore, dex_pc));
@@ -1687,9 +1687,29 @@
uint16_t HInstructionBuilder::LookupQuickenedInfo(uint32_t dex_pc) {
DCHECK(interpreter_metadata_ != nullptr);
- uint32_t dex_pc_in_map = DecodeUnsignedLeb128(&interpreter_metadata_);
- DCHECK_EQ(dex_pc, dex_pc_in_map);
- return DecodeUnsignedLeb128(&interpreter_metadata_);
+
+ // First check if the info has already been decoded from `interpreter_metadata_`.
+ auto it = skipped_interpreter_metadata_.find(dex_pc);
+ if (it != skipped_interpreter_metadata_.end()) {
+ // Remove the entry from the map and return the parsed info.
+ uint16_t value_in_map = it->second;
+ skipped_interpreter_metadata_.erase(it);
+ return value_in_map;
+ }
+
+ // Otherwise start parsing `interpreter_metadata_` until the slot for `dex_pc`
+ // is found. Store skipped values in the `skipped_interpreter_metadata_` map.
+ while (true) {
+ uint32_t dex_pc_in_map = DecodeUnsignedLeb128(&interpreter_metadata_);
+ uint16_t value_in_map = DecodeUnsignedLeb128(&interpreter_metadata_);
+ DCHECK_LE(dex_pc_in_map, dex_pc);
+
+ if (dex_pc_in_map == dex_pc) {
+ return value_in_map;
+ } else {
+ skipped_interpreter_metadata_.Put(dex_pc_in_map, value_in_map);
+ }
+ }
}
bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, uint32_t dex_pc) {
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 612594f..f480b70 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -59,6 +59,8 @@
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
interpreter_metadata_(interpreter_metadata),
+ skipped_interpreter_metadata_(std::less<uint32_t>(),
+ arena_->Adapter(kArenaAllocGraphBuilder)),
compilation_stats_(compiler_stats),
dex_cache_(dex_cache),
loop_headers_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) {
@@ -287,7 +289,15 @@
// methods.
const DexCompilationUnit* const outer_compilation_unit_;
+ // Original values kept after instruction quickening. This is a data buffer
+ // of Leb128-encoded (dex_pc, value) pairs sorted by dex_pc.
const uint8_t* interpreter_metadata_;
+
+ // InstructionBuilder does not parse instructions in dex_pc order. Quickening
+ // info for out-of-order dex_pcs is stored in a map until the positions
+ // are eventually visited.
+ ArenaSafeMap<uint32_t, uint16_t> skipped_interpreter_metadata_;
+
OptimizingCompilerStats* compilation_stats_;
Handle<mirror::DexCache> dex_cache_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a589ef0..927e2ec 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1708,7 +1708,7 @@
// Mirrors ARRAYCOPY_SHORT_CHAR_ARRAY_THRESHOLD in libcore, so we can choose to use the native
// implementation there for longer copy lengths.
-static constexpr int32_t kSystemArrayCopyThreshold = 32;
+static constexpr int32_t kSystemArrayCopyCharThreshold = 32;
static void SetSystemArrayCopyLocationRequires(LocationSummary* locations,
uint32_t at,
@@ -1739,7 +1739,7 @@
HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
if (length != nullptr) {
int32_t len = length->GetValue();
- if (len < 0 || len > kSystemArrayCopyThreshold) {
+ if (len < 0 || len > kSystemArrayCopyCharThreshold) {
// Just call as normal.
return;
}
@@ -1882,7 +1882,7 @@
// If the length is negative, bail out.
__ Tbnz(WRegisterFrom(length), kWRegSize - 1, slow_path->GetEntryLabel());
// If the length > 32 then (currently) prefer libcore's native implementation.
- __ Cmp(WRegisterFrom(length), kSystemArrayCopyThreshold);
+ __ Cmp(WRegisterFrom(length), kSystemArrayCopyCharThreshold);
__ B(slow_path->GetEntryLabel(), gt);
} else {
// We have already checked in the LocationsBuilder for the constant case.
@@ -1943,7 +1943,271 @@
__ Bind(slow_path->GetExitLabel());
}
-UNIMPLEMENTED_INTRINSIC(ARM64, SystemArrayCopy)
+// We can choose to use the native implementation there for longer copy lengths.
+static constexpr int32_t kSystemArrayCopyThreshold = 128;
+
+// CodeGenerator::CreateSystemArrayCopyLocationSummary use three temporary registers.
+// We want to use two temporary registers in order to reduce the register pressure in arm64.
+// So we don't use the CodeGenerator::CreateSystemArrayCopyLocationSummary.
+void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) {
+ // Check to see if we have known failures that will cause us to have to bail out
+ // to the runtime, and just generate the runtime call directly.
+ HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+ HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+
+ // The positions must be non-negative.
+ if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
+ (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
+ // We will have to fail anyways.
+ return;
+ }
+
+ // The length must be >= 0.
+ HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+ if (length != nullptr) {
+ int32_t len = length->GetValue();
+ if (len < 0 || len >= kSystemArrayCopyThreshold) {
+ // Just call as normal.
+ return;
+ }
+ }
+
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (optimizations.GetDestinationIsSource()) {
+ if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
+ // We only support backward copying if source and destination are the same.
+ return;
+ }
+ }
+
+ if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
+ // We currently don't intrinsify primitive copying.
+ return;
+ }
+
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ LocationSummary* locations = new (allocator) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
+ locations->SetInAt(0, Location::RequiresRegister());
+ SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1));
+ locations->SetInAt(2, Location::RequiresRegister());
+ SetSystemArrayCopyLocationRequires(locations, 3, invoke->InputAt(3));
+ SetSystemArrayCopyLocationRequires(locations, 4, invoke->InputAt(4));
+
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+
+ Register src = XRegisterFrom(locations->InAt(0));
+ Location src_pos = locations->InAt(1);
+ Register dest = XRegisterFrom(locations->InAt(2));
+ Location dest_pos = locations->InAt(3);
+ Location length = locations->InAt(4);
+ Register temp1 = WRegisterFrom(locations->GetTemp(0));
+ Register temp2 = WRegisterFrom(locations->GetTemp(1));
+
+ SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ vixl::Label conditions_on_positions_validated;
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (!optimizations.GetDestinationIsSource() &&
+ (!src_pos.IsConstant() || !dest_pos.IsConstant())) {
+ __ Cmp(src, dest);
+ }
+ // If source and destination are the same, we go to slow path if we need to do
+ // forward copying.
+ if (src_pos.IsConstant()) {
+ int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ if (dest_pos.IsConstant()) {
+ // Checked when building locations.
+ DCHECK(!optimizations.GetDestinationIsSource()
+ || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ B(&conditions_on_positions_validated, ne);
+ }
+ __ Cmp(WRegisterFrom(dest_pos), src_pos_constant);
+ __ B(slow_path->GetEntryLabel(), gt);
+ }
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ B(&conditions_on_positions_validated, ne);
+ }
+ __ Cmp(RegisterFrom(src_pos, invoke->InputAt(1)->GetType()),
+ OperandFrom(dest_pos, invoke->InputAt(3)->GetType()));
+ __ B(slow_path->GetEntryLabel(), lt);
+ }
+
+ __ Bind(&conditions_on_positions_validated);
+
+ if (!optimizations.GetSourceIsNotNull()) {
+ // Bail out if the source is null.
+ __ Cbz(src, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
+ // Bail out if the destination is null.
+ __ Cbz(dest, slow_path->GetEntryLabel());
+ }
+
+ // We have already checked in the LocationsBuilder for the constant case.
+ if (!length.IsConstant() &&
+ !optimizations.GetCountIsSourceLength() &&
+ !optimizations.GetCountIsDestinationLength()) {
+ // If the length is negative, bail out.
+ __ Tbnz(WRegisterFrom(length), kWRegSize - 1, slow_path->GetEntryLabel());
+ // If the length >= 128 then (currently) prefer native implementation.
+ __ Cmp(WRegisterFrom(length), kSystemArrayCopyThreshold);
+ __ B(slow_path->GetEntryLabel(), ge);
+ }
+ // Validity checks: source.
+ CheckSystemArrayCopyPosition(masm,
+ src_pos,
+ src,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsSourceLength());
+
+ // Validity checks: dest.
+ CheckSystemArrayCopyPosition(masm,
+ dest_pos,
+ dest,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsDestinationLength());
+ {
+ // We use a block to end the scratch scope before the write barrier, thus
+ // freeing the temporary registers so they can be used in `MarkGCCard`.
+ UseScratchRegisterScope temps(masm);
+ Register temp3 = temps.AcquireW();
+ if (!optimizations.GetDoesNotNeedTypeCheck()) {
+ // Check whether all elements of the source array are assignable to the component
+ // type of the destination array. We do two checks: the classes are the same,
+ // or the destination is Object[]. If none of these checks succeed, we go to the
+ // slow path.
+ __ Ldr(temp1, MemOperand(dest, class_offset));
+ __ Ldr(temp2, MemOperand(src, class_offset));
+ bool did_unpoison = false;
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoison them
+ // both to make the identity check valid.
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
+ __ Ldr(temp3, HeapOperand(temp1, component_offset));
+ __ Cbz(temp3, slow_path->GetEntryLabel());
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
+ __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp3, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp2->component_type_
+ __ Ldr(temp3, HeapOperand(temp2, component_offset));
+ __ Cbz(temp3, slow_path->GetEntryLabel());
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
+ __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp3, slow_path->GetEntryLabel());
+ }
+
+ __ Cmp(temp1, temp2);
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ vixl::Label do_copy;
+ __ B(&do_copy, eq);
+ if (!did_unpoison) {
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ }
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ Ldr(temp1, HeapOperand(temp1, component_offset));
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ Ldr(temp1, HeapOperand(temp1, super_offset));
+ // No need to unpoison the result, we're comparing against null.
+ __ Cbnz(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ B(slow_path->GetEntryLabel(), ne);
+ }
+ } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
+ // Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp1 = src->klass_
+ __ Ldr(temp1, HeapOperand(src.W(), class_offset));
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
+ __ Ldr(temp3, HeapOperand(temp1, component_offset));
+ __ Cbz(temp3, slow_path->GetEntryLabel());
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp3);
+ __ Ldrh(temp3, HeapOperand(temp3, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp3, slow_path->GetEntryLabel());
+ }
+
+ Register src_curr_addr = temp1.X();
+ Register dst_curr_addr = temp2.X();
+ Register src_stop_addr = temp3.X();
+
+ GenSystemArrayCopyAddresses(masm,
+ Primitive::kPrimNot,
+ src,
+ src_pos,
+ dest,
+ dest_pos,
+ length,
+ src_curr_addr,
+ dst_curr_addr,
+ src_stop_addr);
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison, nor do any read barrier as the next uses of the destination
+ // array will do it.
+ vixl::Label loop, done;
+ const int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+ __ Bind(&loop);
+ __ Cmp(src_curr_addr, src_stop_addr);
+ __ B(&done, eq);
+ {
+ Register tmp = temps.AcquireW();
+ __ Ldr(tmp, MemOperand(src_curr_addr, element_size, vixl::PostIndex));
+ __ Str(tmp, MemOperand(dst_curr_addr, element_size, vixl::PostIndex));
+ }
+ __ B(&loop);
+ __ Bind(&done);
+ }
+ // We only need one card marking on the destination array.
+ codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3d6bf62..cad94c7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -597,9 +597,7 @@
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
ArrayRef<const SrcMapElem>(),
- ArrayRef<const uint8_t>(), // mapping_table.
ArrayRef<const uint8_t>(stack_map),
- ArrayRef<const uint8_t>(), // native_gc_map.
ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
ArrayRef<const LinkerPatch>(linker_patches));
@@ -916,9 +914,7 @@
const void* code = code_cache->CommitCode(
self,
method,
- nullptr,
stack_map_data,
- nullptr,
codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0889098..370583e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -68,6 +68,7 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "oat_file_assistant.h"
#include "oat_writer.h"
#include "os.h"
#include "runtime.h"
@@ -1325,7 +1326,7 @@
TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
std::vector<gc::space::ImageSpace*> image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
- image_file_location_oat_checksum_ = image_spaces[0]->GetImageHeader().GetOatChecksum();
+ image_file_location_oat_checksum_ = OatFileAssistant::CalculateCombinedImageChecksum();
image_file_location_oat_data_begin_ =
reinterpret_cast<uintptr_t>(image_spaces[0]->GetImageHeader().GetOatDataBegin());
image_patch_delta_ = image_spaces[0]->GetImageHeader().GetPatchDelta();
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 0e709eb..77efb6b 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -776,7 +776,7 @@
if (imm5 == 0) {
args << "rrx";
} else {
- args << "ror";
+ args << "ror #" << imm5;
}
break;
}
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index da7f43c..c2a812e 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -36,7 +36,6 @@
#include "image.h"
#include "scoped_thread_state_change.h"
#include "os.h"
-#include "gc_map.h"
#include "cmdline.h"
#include "backtrace/BacktraceMap.h"
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 9a3bb02..b673eff 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -38,7 +38,6 @@
#include "dex_instruction.h"
#include "disassembler.h"
#include "elf_builder.h"
-#include "gc_map.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
@@ -46,7 +45,6 @@
#include "indenter.h"
#include "linker/buffered_output_stream.h"
#include "linker/file_output_stream.h"
-#include "mapping_table.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
@@ -62,7 +60,6 @@
#include "ScopedLocalRef.h"
#include "thread_list.h"
#include "verifier/method_verifier.h"
-#include "vmap_table.h"
#include "well_known_classes.h"
#include <sys/stat.h>
@@ -282,9 +279,7 @@
class OatDumperOptions {
public:
- OatDumperOptions(bool dump_raw_mapping_table,
- bool dump_raw_gc_map,
- bool dump_vmap,
+ OatDumperOptions(bool dump_vmap,
bool dump_code_info_stack_maps,
bool disassemble_code,
bool absolute_addresses,
@@ -297,9 +292,7 @@
const char* app_image,
const char* app_oat,
uint32_t addr2instr)
- : dump_raw_mapping_table_(dump_raw_mapping_table),
- dump_raw_gc_map_(dump_raw_gc_map),
- dump_vmap_(dump_vmap),
+ : dump_vmap_(dump_vmap),
dump_code_info_stack_maps_(dump_code_info_stack_maps),
disassemble_code_(disassemble_code),
absolute_addresses_(absolute_addresses),
@@ -314,8 +307,6 @@
addr2instr_(addr2instr),
class_loader_(nullptr) {}
- const bool dump_raw_mapping_table_;
- const bool dump_raw_gc_map_;
const bool dump_vmap_;
const bool dump_code_info_stack_maps_;
const bool disassemble_code_;
@@ -572,9 +563,7 @@
code_offset &= ~0x1;
}
offsets_.insert(code_offset);
- offsets_.insert(oat_method.GetMappingTableOffset());
offsets_.insert(oat_method.GetVmapTableOffset());
- offsets_.insert(oat_method.GetGcMapOffset());
}
bool DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
@@ -843,22 +832,6 @@
success = false;
}
vios->Stream() << "\n";
-
- vios->Stream() << "gc_map: ";
- if (options_.absolute_addresses_) {
- vios->Stream() << StringPrintf("%p ", oat_method.GetGcMap());
- }
- uint32_t gc_map_offset = oat_method.GetGcMapOffset();
- vios->Stream() << StringPrintf("(offset=0x%08x)\n", gc_map_offset);
- if (gc_map_offset > oat_file_.Size()) {
- vios->Stream() << StringPrintf("WARNING: "
- "gc map table offset 0x%08x is past end of file 0x%08zx.\n",
- gc_map_offset, oat_file_.Size());
- success = false;
- } else if (options_.dump_raw_gc_map_) {
- ScopedIndentation indent3(vios);
- DumpGcMap(vios->Stream(), oat_method, code_item);
- }
}
{
vios->Stream() << "OatQuickMethodHeader ";
@@ -879,24 +852,6 @@
}
ScopedIndentation indent2(vios);
- vios->Stream() << "mapping_table: ";
- if (options_.absolute_addresses_) {
- vios->Stream() << StringPrintf("%p ", oat_method.GetMappingTable());
- }
- uint32_t mapping_table_offset = oat_method.GetMappingTableOffset();
- vios->Stream() << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset());
- if (mapping_table_offset > oat_file_.Size()) {
- vios->Stream() << StringPrintf("WARNING: "
- "mapping table offset 0x%08x is past end of file 0x%08zx. "
- "mapping table offset was loaded from offset 0x%08x.\n",
- mapping_table_offset, oat_file_.Size(),
- oat_method.GetMappingTableOffsetOffset());
- success = false;
- } else if (options_.dump_raw_mapping_table_) {
- ScopedIndentation indent3(vios);
- DumpMappingTable(vios, oat_method);
- }
-
vios->Stream() << "vmap_table: ";
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable());
@@ -973,7 +928,7 @@
success = false;
if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
- DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+ DumpCode(vios, oat_method, code_item, true, kPrologueBytes);
}
}
} else if (code_size > kMaxCodeSize) {
@@ -986,11 +941,11 @@
success = false;
if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
- DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+ DumpCode(vios, oat_method, code_item, true, kPrologueBytes);
}
}
} else if (options_.disassemble_code_) {
- DumpCode(vios, verifier.get(), oat_method, code_item, !success, 0);
+ DumpCode(vios, oat_method, code_item, !success, 0);
}
}
}
@@ -1040,12 +995,7 @@
ScopedIndentation indent(vios);
vios->Stream() << "quickened data\n";
} else {
- // Otherwise, display the vmap table.
- const uint8_t* raw_table = oat_method.GetVmapTable();
- if (raw_table != nullptr) {
- VmapTable vmap_table(raw_table);
- DumpVmapTable(vios->Stream(), oat_method, vmap_table);
- }
+ // Otherwise, there is nothing to display.
}
}
@@ -1060,32 +1010,6 @@
options_.dump_code_info_stack_maps_);
}
- // Display a vmap table.
- void DumpVmapTable(std::ostream& os,
- const OatFile::OatMethod& oat_method,
- const VmapTable& vmap_table) {
- bool first = true;
- bool processing_fp = false;
- uint32_t spill_mask = oat_method.GetCoreSpillMask();
- for (size_t i = 0; i < vmap_table.Size(); i++) {
- uint16_t dex_reg = vmap_table[i];
- uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i,
- processing_fp ? kFloatVReg : kIntVReg);
- os << (first ? "v" : ", v") << dex_reg;
- if (!processing_fp) {
- os << "/r" << cpu_reg;
- } else {
- os << "/fr" << cpu_reg;
- }
- first = false;
- if (!processing_fp && dex_reg == 0xFFFF) {
- processing_fp = true;
- spill_mask = oat_method.GetFpSpillMask();
- }
- }
- os << "\n";
- }
-
void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method,
const DexFile::CodeItem* code_item) {
if (code_item != nullptr) {
@@ -1128,203 +1052,18 @@
}
}
- void DescribeVReg(std::ostream& os, const OatFile::OatMethod& oat_method,
- const DexFile::CodeItem* code_item, size_t reg, VRegKind kind) {
- const uint8_t* raw_table = oat_method.GetVmapTable();
- if (raw_table != nullptr) {
- const VmapTable vmap_table(raw_table);
- uint32_t vmap_offset;
- if (vmap_table.IsInContext(reg, kind, &vmap_offset)) {
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t spill_mask = is_float ? oat_method.GetFpSpillMask()
- : oat_method.GetCoreSpillMask();
- os << (is_float ? "fr" : "r") << vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
- } else {
- uint32_t offset = StackVisitor::GetVRegOffsetFromQuickCode(
- code_item,
- oat_method.GetCoreSpillMask(),
- oat_method.GetFpSpillMask(),
- oat_method.GetFrameSizeInBytes(),
- reg,
- GetInstructionSet());
- os << "[sp + #" << offset << "]";
- }
- }
- }
-
- void DumpGcMapRegisters(std::ostream& os, const OatFile::OatMethod& oat_method,
- const DexFile::CodeItem* code_item,
- size_t num_regs, const uint8_t* reg_bitmap) {
- bool first = true;
- for (size_t reg = 0; reg < num_regs; reg++) {
- if (((reg_bitmap[reg / 8] >> (reg % 8)) & 0x01) != 0) {
- if (first) {
- os << " v" << reg << " (";
- DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg);
- os << ")";
- first = false;
- } else {
- os << ", v" << reg << " (";
- DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg);
- os << ")";
- }
- }
- }
- if (first) {
- os << "No registers in GC map\n";
- } else {
- os << "\n";
- }
- }
- void DumpGcMap(std::ostream& os, const OatFile::OatMethod& oat_method,
- const DexFile::CodeItem* code_item) {
- const uint8_t* gc_map_raw = oat_method.GetGcMap();
- if (gc_map_raw == nullptr) {
- return; // No GC map.
- }
- const void* quick_code = oat_method.GetQuickCode();
- NativePcOffsetToReferenceMap map(gc_map_raw);
- for (size_t entry = 0; entry < map.NumEntries(); entry++) {
- const uint8_t* native_pc = reinterpret_cast<const uint8_t*>(quick_code) +
- map.GetNativePcOffset(entry);
- os << StringPrintf("%p", native_pc);
- DumpGcMapRegisters(os, oat_method, code_item, map.RegWidth() * 8, map.GetBitMap(entry));
- }
- }
-
- void DumpMappingTable(VariableIndentationOutputStream* vios,
- const OatFile::OatMethod& oat_method) {
- const void* quick_code = oat_method.GetQuickCode();
- if (quick_code == nullptr) {
+ void DumpInformationAtOffset(VariableIndentationOutputStream* vios,
+ const OatFile::OatMethod& oat_method,
+ const DexFile::CodeItem* code_item,
+ size_t offset,
+ bool suspend_point_mapping) {
+ if (!IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) {
+ // Native method.
return;
}
- MappingTable table(oat_method.GetMappingTable());
- if (table.TotalSize() != 0) {
- if (table.PcToDexSize() != 0) {
- typedef MappingTable::PcToDexIterator It;
- vios->Stream() << "suspend point mappings {\n";
- for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- ScopedIndentation indent1(vios);
- vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
- }
- vios->Stream() << "}\n";
- }
- if (table.DexToPcSize() != 0) {
- typedef MappingTable::DexToPcIterator It;
- vios->Stream() << "catch entry mappings {\n";
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- ScopedIndentation indent1(vios);
- vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
- }
- vios->Stream() << "}\n";
- }
- }
- }
-
- uint32_t DumpInformationAtOffset(VariableIndentationOutputStream* vios,
- const OatFile::OatMethod& oat_method,
- const DexFile::CodeItem* code_item,
- size_t offset,
- bool suspend_point_mapping) {
- if (IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) {
- if (suspend_point_mapping) {
- ScopedIndentation indent1(vios);
- DumpDexRegisterMapAtOffset(vios, oat_method, code_item, offset);
- }
- // The return value is not used in the case of a method compiled
- // with the optimizing compiler.
- return DexFile::kDexNoIndex;
- } else {
- return DumpMappingAtOffset(vios->Stream(), oat_method, offset, suspend_point_mapping);
- }
- }
-
- uint32_t DumpMappingAtOffset(std::ostream& os, const OatFile::OatMethod& oat_method,
- size_t offset, bool suspend_point_mapping) {
- MappingTable table(oat_method.GetMappingTable());
- if (suspend_point_mapping && table.PcToDexSize() > 0) {
- typedef MappingTable::PcToDexIterator It;
- for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (offset == cur.NativePcOffset()) {
- os << StringPrintf("suspend point dex PC: 0x%04x\n", cur.DexPc());
- return cur.DexPc();
- }
- }
- } else if (!suspend_point_mapping && table.DexToPcSize() > 0) {
- typedef MappingTable::DexToPcIterator It;
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (offset == cur.NativePcOffset()) {
- os << StringPrintf("catch entry dex PC: 0x%04x\n", cur.DexPc());
- return cur.DexPc();
- }
- }
- }
- return DexFile::kDexNoIndex;
- }
-
- void DumpGcMapAtNativePcOffset(std::ostream& os, const OatFile::OatMethod& oat_method,
- const DexFile::CodeItem* code_item, size_t native_pc_offset) {
- const uint8_t* gc_map_raw = oat_method.GetGcMap();
- if (gc_map_raw != nullptr) {
- NativePcOffsetToReferenceMap map(gc_map_raw);
- if (map.HasEntry(native_pc_offset)) {
- size_t num_regs = map.RegWidth() * 8;
- const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
- bool first = true;
- for (size_t reg = 0; reg < num_regs; reg++) {
- if (((reg_bitmap[reg / 8] >> (reg % 8)) & 0x01) != 0) {
- if (first) {
- os << "GC map objects: v" << reg << " (";
- DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg);
- os << ")";
- first = false;
- } else {
- os << ", v" << reg << " (";
- DescribeVReg(os, oat_method, code_item, reg, kReferenceVReg);
- os << ")";
- }
- }
- }
- if (!first) {
- os << "\n";
- }
- }
- }
- }
-
- void DumpVRegsAtDexPc(std::ostream& os, verifier::MethodVerifier* verifier,
- const OatFile::OatMethod& oat_method,
- const DexFile::CodeItem* code_item, uint32_t dex_pc) {
- DCHECK(verifier != nullptr);
- std::vector<int32_t> kinds = verifier->DescribeVRegs(dex_pc);
- bool first = true;
- for (size_t reg = 0; reg < code_item->registers_size_; reg++) {
- VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
- if (kind != kUndefined) {
- if (first) {
- os << "VRegs: v";
- first = false;
- } else {
- os << ", v";
- }
- os << reg << " (";
- switch (kind) {
- case kImpreciseConstant:
- os << "Imprecise Constant: " << kinds.at((reg * 2) + 1) << ", ";
- DescribeVReg(os, oat_method, code_item, reg, kind);
- break;
- case kConstant:
- os << "Constant: " << kinds.at((reg * 2) + 1);
- break;
- default:
- DescribeVReg(os, oat_method, code_item, reg, kind);
- break;
- }
- os << ")";
- }
- }
- if (!first) {
- os << "\n";
+ if (suspend_point_mapping) {
+ ScopedIndentation indent1(vios);
+ DumpDexRegisterMapAtOffset(vios, oat_method, code_item, offset);
}
}
@@ -1349,7 +1088,7 @@
// null, then this method has been compiled with the optimizing
// compiler.
return oat_method.GetQuickCode() != nullptr &&
- oat_method.GetGcMap() == nullptr &&
+ oat_method.GetVmapTable() != nullptr &&
code_item != nullptr;
}
@@ -1409,7 +1148,6 @@
}
void DumpCode(VariableIndentationOutputStream* vios,
- verifier::MethodVerifier* verifier,
const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item,
bool bad_input, size_t code_size) {
const void* quick_code = oat_method.GetQuickCode();
@@ -1429,14 +1167,7 @@
}
offset += disassembler_->Dump(vios->Stream(), quick_native_pc + offset);
if (!bad_input) {
- uint32_t dex_pc =
- DumpInformationAtOffset(vios, oat_method, code_item, offset, true);
- if (dex_pc != DexFile::kDexNoIndex) {
- DumpGcMapAtNativePcOffset(vios->Stream(), oat_method, code_item, offset);
- if (verifier != nullptr) {
- DumpVRegsAtDexPc(vios->Stream(), verifier, oat_method, code_item, dex_pc);
- }
- }
+ DumpInformationAtOffset(vios, oat_method, code_item, offset, true);
}
}
}
@@ -1986,10 +1717,6 @@
OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
- if (!Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(quick_oat_code_begin)) {
- DCHECK(method_header->GetNativeGcMap() == nullptr) << PrettyMethod(method);
- DCHECK(method_header->GetMappingTable() == nullptr) << PrettyMethod(method);
- }
bool first_occurrence;
uint32_t quick_oat_code_size = GetQuickOatCodeSize(method);
ComputeOatSize(quick_oat_code_begin, &first_occurrence);
@@ -2013,17 +1740,6 @@
stats_.dex_instruction_bytes += dex_instruction_bytes;
bool first_occurrence;
- size_t gc_map_bytes = ComputeOatSize(method_header->GetNativeGcMap(), &first_occurrence);
- if (first_occurrence) {
- stats_.gc_map_bytes += gc_map_bytes;
- }
-
- size_t pc_mapping_table_bytes = ComputeOatSize(
- method_header->GetMappingTable(), &first_occurrence);
- if (first_occurrence) {
- stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
- }
-
size_t vmap_table_bytes = 0u;
if (!method_header->IsOptimized()) {
// Method compiled with the optimizing compiler have no vmap table.
@@ -2052,11 +1768,12 @@
uint32_t method_access_flags = method->GetAccessFlags();
indent_os << StringPrintf("OAT CODE: %p-%p\n", quick_oat_code_begin, quick_oat_code_end);
- indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd AccessFlags=0x%x\n",
- dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes,
+ indent_os << StringPrintf("SIZE: Dex Instructions=%zd StackMaps=%zd AccessFlags=0x%x\n",
+ dex_instruction_bytes,
+ vmap_table_bytes,
method_access_flags);
- size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes +
+ size_t total_size = dex_instruction_bytes +
vmap_table_bytes + quick_oat_code_size + ArtMethod::Size(image_header_.GetPointerSize());
double expansion =
@@ -2101,8 +1818,6 @@
size_t large_initializer_code_bytes;
size_t large_method_code_bytes;
- size_t gc_map_bytes;
- size_t pc_mapping_table_bytes;
size_t vmap_table_bytes;
size_t dex_instruction_bytes;
@@ -2131,8 +1846,6 @@
class_initializer_code_bytes(0),
large_initializer_code_bytes(0),
large_method_code_bytes(0),
- gc_map_bytes(0),
- pc_mapping_table_bytes(0),
vmap_table_bytes(0),
dex_instruction_bytes(0) {}
@@ -2351,11 +2064,7 @@
PercentOfOatBytes(oat_dex_file_size.second));
}
- os << "\n" << StringPrintf("gc_map_bytes = %7zd (%2.0f%% of oat file bytes)\n"
- "pc_mapping_table_bytes = %7zd (%2.0f%% of oat file bytes)\n"
- "vmap_table_bytes = %7zd (%2.0f%% of oat file bytes)\n\n",
- gc_map_bytes, PercentOfOatBytes(gc_map_bytes),
- pc_mapping_table_bytes, PercentOfOatBytes(pc_mapping_table_bytes),
+ os << "\n" << StringPrintf("vmap_table_bytes = %7zd (%2.0f%% of oat file bytes)\n\n",
vmap_table_bytes, PercentOfOatBytes(vmap_table_bytes))
<< std::flush;
@@ -2590,10 +2299,6 @@
oat_filename_ = option.substr(strlen("--oat-file=")).data();
} else if (option.starts_with("--image=")) {
image_location_ = option.substr(strlen("--image=")).data();
- } else if (option =="--dump:raw_mapping_table") {
- dump_raw_mapping_table_ = true;
- } else if (option == "--dump:raw_gc_map") {
- dump_raw_gc_map_ = true;
} else if (option == "--no-dump:vmap") {
dump_vmap_ = false;
} else if (option =="--dump:code_info_stack_maps") {
@@ -2683,12 +2388,6 @@
usage += Base::GetUsage();
usage += // Optional.
- " --dump:raw_mapping_table enables dumping of the mapping table.\n"
- " Example: --dump:raw_mapping_table\n"
- "\n"
- " --dump:raw_gc_map enables dumping of the GC map.\n"
- " Example: --dump:raw_gc_map\n"
- "\n"
" --no-dump:vmap may be used to disable vmap dumping.\n"
" Example: --no-dump:vmap\n"
"\n"
@@ -2739,8 +2438,6 @@
const char* method_filter_ = "";
const char* image_location_ = nullptr;
std::string elf_filename_prefix_;
- bool dump_raw_mapping_table_ = false;
- bool dump_raw_gc_map_ = false;
bool dump_vmap_ = true;
bool dump_code_info_stack_maps_ = false;
bool disassemble_code_ = true;
@@ -2763,8 +2460,6 @@
bool absolute_addresses = (args_->oat_filename_ == nullptr);
oat_dumper_options_.reset(new OatDumperOptions(
- args_->dump_raw_mapping_table_,
- args_->dump_raw_gc_map_,
args_->dump_vmap_,
args_->dump_code_info_stack_maps_,
args_->disassemble_code_,
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index bf062ed..c7ced8a 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -97,16 +97,6 @@
ASSERT_TRUE(Exec(kModeOat, {}, &error_msg)) << error_msg;
}
-TEST_F(OatDumpTest, TestDumpRawMappingTable) {
- std::string error_msg;
- ASSERT_TRUE(Exec(kModeArt, {"--dump:raw_mapping_table"}, &error_msg)) << error_msg;
-}
-
-TEST_F(OatDumpTest, TestDumpRawGcMap) {
- std::string error_msg;
- ASSERT_TRUE(Exec(kModeArt, {"--dump:raw_gc_map"}, &error_msg)) << error_msg;
-}
-
TEST_F(OatDumpTest, TestNoDumpVmap) {
std::string error_msg;
ASSERT_TRUE(Exec(kModeArt, {"--no-dump:vmap"}, &error_msg)) << error_msg;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 542a2c4..c859079 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -149,6 +149,7 @@
native/java_lang_VMClassLoader.cc \
native/java_lang_ref_FinalizerReference.cc \
native/java_lang_ref_Reference.cc \
+ native/java_lang_reflect_AbstractMethod.cc \
native/java_lang_reflect_Array.cc \
native/java_lang_reflect_Constructor.cc \
native/java_lang_reflect_Field.cc \
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 969a038..75d9073 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1934,7 +1934,12 @@
TestFields(self, this, Primitive::Type::kPrimLong);
}
-TEST_F(StubTest, IMT) {
+// Disabled, b/27991555 .
+// FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken.
+// The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header
+// and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
+// the bridge and uses that to check for inlined frames, crashing in the process.
+TEST_F(StubTest, DISABLED_IMT) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f97ad51..34d19d1 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -31,7 +31,6 @@
#include "jit/jit_code_cache.h"
#include "jit/profiling_info.h"
#include "jni_internal.h"
-#include "mapping_table.h"
#include "mirror/abstract_method.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index fc1a52f..12d3be7 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -140,7 +140,7 @@
HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
- HashSet(double min_load_factor, double max_load_factor)
+ HashSet(double min_load_factor, double max_load_factor) noexcept
: num_elements_(0u),
num_buckets_(0u),
elements_until_expand_(0u),
@@ -152,7 +152,7 @@
DCHECK_LT(max_load_factor, 1.0);
}
- explicit HashSet(const allocator_type& alloc)
+ explicit HashSet(const allocator_type& alloc) noexcept
: allocfn_(alloc),
hashfn_(),
emptyfn_(),
@@ -166,7 +166,7 @@
max_load_factor_(kDefaultMaxLoadFactor) {
}
- HashSet(const HashSet& other)
+ HashSet(const HashSet& other) noexcept
: allocfn_(other.allocfn_),
hashfn_(other.hashfn_),
emptyfn_(other.emptyfn_),
@@ -184,7 +184,9 @@
}
}
- HashSet(HashSet&& other)
+ // noexcept required so that the move constructor is used instead of copy constructor.
+ // b/27860101
+ HashSet(HashSet&& other) noexcept
: allocfn_(std::move(other.allocfn_)),
hashfn_(std::move(other.hashfn_)),
emptyfn_(std::move(other.emptyfn_)),
@@ -206,7 +208,7 @@
// Construct from existing data.
// Read from a block of memory, if make_copy_of_data is false, then data_ points to within the
// passed in ptr_.
- HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) {
+ HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) noexcept {
uint64_t temp;
size_t offset = 0;
offset = ReadFromBytes(ptr, offset, &temp);
@@ -256,12 +258,12 @@
DeallocateStorage();
}
- HashSet& operator=(HashSet&& other) {
+ HashSet& operator=(HashSet&& other) noexcept {
HashSet(std::move(other)).swap(*this);
return *this;
}
- HashSet& operator=(const HashSet& other) {
+ HashSet& operator=(const HashSet& other) noexcept {
HashSet(other).swap(*this); // NOLINT(runtime/explicit) - a case of lint gone mad.
return *this;
}
@@ -298,6 +300,11 @@
return Size() == 0;
}
+ // Return true if the hash set has ownership of the underlying data.
+ bool OwnsData() const {
+ return owns_data_;
+ }
+
// Erase algorithm:
// Make an empty slot where the iterator is pointing.
// Scan forwards until we hit another empty slot.
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 293451c..17e0339 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -88,6 +88,8 @@
kOatFileManagerLock,
kTracingUniqueMethodsLock,
kTracingStreamingLock,
+ kDeoptimizedMethodsLock,
+ kClassLoaderClassesLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
kPinTableLock,
@@ -96,7 +98,7 @@
kAllocatedThreadIdsLock,
kMonitorPoolLock,
kMethodVerifiersLock,
- kClassLinkerClassesLock,
+ kClassLinkerClassesLock, // TODO rename.
kBreakpointLock,
kMonitorLock,
kMonitorListLock,
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 7595d14..0e2f9f2 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -18,7 +18,6 @@
#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
#include "art_method-inl.h"
-#include "gc_map.h"
#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack_map.h"
@@ -54,11 +53,8 @@
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (GetCurrentOatQuickMethodHeader()->IsOptimized()) {
- CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
- } else {
- CheckQuickMethod(registers, number_of_references, native_pc_offset);
- }
+ CHECK(GetCurrentOatQuickMethodHeader()->IsOptimized());
+ CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
}
private:
@@ -104,20 +100,6 @@
}
}
}
-
- void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- NativePcOffsetToReferenceMap map(GetCurrentOatQuickMethodHeader()->GetNativeGcMap());
- const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
- CHECK(ref_bitmap);
- for (int i = 0; i < number_of_references; ++i) {
- int reg = registers[i];
- CHECK(reg < m->GetCodeItem()->registers_size_);
- CHECK((*((ref_bitmap) + reg / 8) >> (reg % 8) ) & 0x01)
- << "Error: Reg @" << i << " is not in GC map";
- }
- }
};
} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 7060593..18def2d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3786,7 +3786,9 @@
while (old_status == mirror::Class::kStatusVerifying ||
old_status == mirror::Class::kStatusVerifyingAtRuntime) {
lock.WaitIgnoringInterrupts();
- CHECK_GT(klass->GetStatus(), old_status);
+ CHECK(klass->IsErroneous() || (klass->GetStatus() > old_status))
+ << "Class '" << PrettyClass(klass.Get()) << "' performed an illegal verification state "
+ << "transition from " << old_status << " to " << klass->GetStatus();
old_status = klass->GetStatus();
}
@@ -5874,9 +5876,14 @@
!target_name_comparator.HasSameNameAndSignature(
current_method->GetInterfaceMethodIfProxy(image_pointer_size_))) {
continue;
+ } else if (!current_method->IsPublic()) {
+ // The verifier should have caught the non-public method for dex version 37. Just warn and
+ // skip it since this is from before default-methods so we don't really need to care that it
+ // has code.
+ LOG(WARNING) << "Interface method " << PrettyMethod(current_method) << " is not public! "
+ << "This will be a fatal error in subsequent versions of android. "
+ << "Continuing anyway.";
}
- // The verifier should have caught the non-public method.
- DCHECK(current_method->IsPublic()) << "Interface method is not public!";
if (UNLIKELY(chosen_iface.Get() != nullptr)) {
// We have multiple default impls of the same method. This is a potential default conflict.
// We need to check if this possibly conflicting method is either a superclass of the chosen
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index e512906..42e320a 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -23,6 +23,7 @@
template<class Visitor>
void ClassTable::VisitRoots(Visitor& visitor) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
for (GcRoot<mirror::Class>& root : class_set) {
visitor.VisitRoot(root.AddressWithoutBarrier());
@@ -35,6 +36,7 @@
template<class Visitor>
void ClassTable::VisitRoots(const Visitor& visitor) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
for (GcRoot<mirror::Class>& root : class_set) {
visitor.VisitRoot(root.AddressWithoutBarrier());
@@ -47,6 +49,7 @@
template <typename Visitor>
bool ClassTable::Visit(Visitor& visitor) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
for (GcRoot<mirror::Class>& root : class_set) {
if (!visitor(root.Read())) {
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index d815b1a..8267c68 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -20,17 +20,19 @@
namespace art {
-ClassTable::ClassTable() {
+ClassTable::ClassTable() : lock_("Class loader classes", kClassLoaderClassesLock) {
Runtime* const runtime = Runtime::Current();
classes_.push_back(ClassSet(runtime->GetHashTableMinLoadFactor(),
runtime->GetHashTableMaxLoadFactor()));
}
void ClassTable::FreezeSnapshot() {
+ WriterMutexLock mu(Thread::Current(), lock_);
classes_.push_back(ClassSet());
}
bool ClassTable::Contains(mirror::Class* klass) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
auto it = class_set.Find(GcRoot<mirror::Class>(klass));
if (it != class_set.end()) {
@@ -41,6 +43,7 @@
}
mirror::Class* ClassTable::LookupByDescriptor(mirror::Class* klass) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
auto it = class_set.Find(GcRoot<mirror::Class>(klass));
if (it != class_set.end()) {
@@ -51,6 +54,7 @@
}
mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
+ WriterMutexLock mu(Thread::Current(), lock_);
// Should only be updating latest table.
auto existing_it = classes_.back().FindWithHash(descriptor, hash);
if (kIsDebugBuild && existing_it == classes_.back().end()) {
@@ -74,6 +78,7 @@
}
size_t ClassTable::NumZygoteClasses() const {
+ ReaderMutexLock mu(Thread::Current(), lock_);
size_t sum = 0;
for (size_t i = 0; i < classes_.size() - 1; ++i) {
sum += classes_[i].Size();
@@ -82,10 +87,12 @@
}
size_t ClassTable::NumNonZygoteClasses() const {
+ ReaderMutexLock mu(Thread::Current(), lock_);
return classes_.back().Size();
}
mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
auto it = class_set.FindWithHash(descriptor, hash);
if (it != class_set.end()) {
@@ -96,14 +103,17 @@
}
void ClassTable::Insert(mirror::Class* klass) {
+ WriterMutexLock mu(Thread::Current(), lock_);
classes_.back().Insert(GcRoot<mirror::Class>(klass));
}
void ClassTable::InsertWithHash(mirror::Class* klass, size_t hash) {
+ WriterMutexLock mu(Thread::Current(), lock_);
classes_.back().InsertWithHash(GcRoot<mirror::Class>(klass), hash);
}
bool ClassTable::Remove(const char* descriptor) {
+ WriterMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
auto it = class_set.Find(descriptor);
if (it != class_set.end()) {
@@ -137,6 +147,7 @@
}
bool ClassTable::InsertDexFile(mirror::Object* dex_file) {
+ WriterMutexLock mu(Thread::Current(), lock_);
DCHECK(dex_file != nullptr);
for (GcRoot<mirror::Object>& root : dex_files_) {
if (root.Read() == dex_file) {
@@ -148,6 +159,7 @@
}
size_t ClassTable::WriteToMemory(uint8_t* ptr) const {
+ ReaderMutexLock mu(Thread::Current(), lock_);
ClassSet combined;
// Combine all the class sets in case there are multiple, also adjusts load factor back to
// default in case classes were pruned.
@@ -173,6 +185,7 @@
}
void ClassTable::AddClassSet(ClassSet&& set) {
+ WriterMutexLock mu(Thread::Current(), lock_);
classes_.insert(classes_.begin(), std::move(set));
}
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 0e0e860..eb784b5 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -71,87 +71,96 @@
// Used by image writer for checking.
bool Contains(mirror::Class* klass)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Freeze the current class tables by allocating a new table and never updating or modifying the
// existing table. This helps prevents dirty pages after caused by inserting after zygote fork.
void FreezeSnapshot()
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of classes in previous snapshots.
- size_t NumZygoteClasses() const SHARED_REQUIRES(Locks::classlinker_classes_lock_);
+ size_t NumZygoteClasses() const REQUIRES(!lock_);
// Returns all off the classes in the lastest snapshot.
- size_t NumNonZygoteClasses() const SHARED_REQUIRES(Locks::classlinker_classes_lock_);
+ size_t NumNonZygoteClasses() const REQUIRES(!lock_);
// Update a class in the table with the new class. Returns the existing class which was replaced.
mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for object marking requiring heap bitmap lock.
template<class Visitor>
void VisitRoots(Visitor& visitor)
NO_THREAD_SAFETY_ANALYSIS
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
template<class Visitor>
void VisitRoots(const Visitor& visitor)
NO_THREAD_SAFETY_ANALYSIS
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Stops visit if the visitor returns false.
template <typename Visitor>
bool Visit(Visitor& visitor)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return the first class that matches the descriptor. Returns null if there are none.
mirror::Class* Lookup(const char* descriptor, size_t hash)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Return the first class that matches the descriptor of klass. Returns null if there are none.
mirror::Class* LookupByDescriptor(mirror::Class* klass)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void Insert(mirror::Class* klass)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+
void InsertWithHash(mirror::Class* klass, size_t hash)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class was found and removed, false otherwise.
bool Remove(const char* descriptor)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Return true if we inserted the dex file, false if it already exists.
bool InsertDexFile(mirror::Object* dex_file)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Read a table from ptr and put it at the front of the class set.
size_t ReadFromMemory(uint8_t* ptr)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Add a class set to the front of classes.
void AddClassSet(ClassSet&& set)
- REQUIRES(Locks::classlinker_classes_lock_)
+ REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
private:
- // TODO: shard lock to have one per class loader.
+ // Lock to guard inserting and removing.
+ mutable ReaderWriterMutex lock_;
// We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
- std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ std::vector<ClassSet> classes_ GUARDED_BY(lock_);
// Dex files used by the class loader which may not be owned by the class loader. We keep these
// live so that we do not have issues closing any of the dex files.
- std::vector<GcRoot<mirror::Object>> dex_files_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ std::vector<GcRoot<mirror::Object>> dex_files_ GUARDED_BY(lock_);
};
} // namespace art
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index e63eaa2..63f3f08 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -1351,6 +1351,17 @@
return ProcessAnnotationSetRefList(method_class, set_ref_list, size);
}
+mirror::ObjectArray<mirror::String>* DexFile::GetSignatureAnnotationForMethod(ArtMethod* method)
+ const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
+ return GetSignatureValue(method_class, annotation_set);
+}
+
bool DexFile::IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class)
const {
const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
@@ -1548,6 +1559,15 @@
return true;
}
+mirror::ObjectArray<mirror::String>* DexFile::GetSignatureAnnotationForClass(
+ Handle<mirror::Class> klass) const {
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ return GetSignatureValue(klass, annotation_set);
+}
+
bool DexFile::IsClassAnnotationPresent(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class) const {
const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 1456636..3a28422 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -991,6 +991,8 @@
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1013,6 +1015,8 @@
SHARED_REQUIRES(Locks::mutator_lock_);
bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass)
+ const SHARED_REQUIRES(Locks::mutator_lock_);
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class)
const SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 811e763..681c5f9 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -2626,6 +2626,23 @@
// From here on out it is easier to mask out the bits we're supposed to ignore.
method_access_flags &= kMethodAccessFlags;
+ // Interfaces are special.
+ if ((class_access_flags & kAccInterface) != 0) {
+ // Non-static interface methods must be public.
+ if ((method_access_flags & (kAccPublic | kAccStatic)) == 0) {
+ *error_msg = StringPrintf("Interface virtual method %" PRIu32 "(%s) is not public",
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+ if (header_->GetVersion() >= 37) {
+ return false;
+ } else {
+ // Allow in older versions, but warn.
+ LOG(WARNING) << "This dex file is invalid and will be rejected in the future. Error is: "
+ << *error_msg;
+ }
+ }
+ }
+
// If there aren't any instructions, make sure that's expected.
if (!has_code) {
// Only native or abstract methods may not have code.
@@ -2664,7 +2681,7 @@
}
// Interfaces are special.
if ((class_access_flags & kAccInterface) != 0) {
- // Interface methods must be public and abstract.
+ // Interface methods without code must be abstract.
if ((method_access_flags & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
*error_msg = StringPrintf("Interface method %" PRIu32 "(%s) is not public and abstract",
method_index,
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 4a5ed5d..344d186 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -767,7 +767,7 @@
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
},
- "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
+ "Interface virtual method 1(LInterfaceMethodFlags;.foo) is not public");
VerifyModification(
kMethodFlagsInterface,
@@ -817,7 +817,7 @@
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
},
- "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
+ "Interface virtual method 1(LInterfaceMethodFlags;.foo) is not public");
VerifyModification(
kMethodFlagsInterface,
@@ -839,7 +839,7 @@
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
OrMaskToMethodFlags(dex_file, "foo", kAccProtected);
},
- "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
+ "Interface virtual method 1(LInterfaceMethodFlags;.foo) is not public");
constexpr uint32_t kAllMethodFlags =
kAccPublic |
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index e46576e..197caa1 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -272,19 +272,19 @@
if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
if (outer_method != nullptr) {
const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
- if (current_code->IsOptimized()) {
- uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- caller = GetResolvedMethod(outer_method,
- inline_info,
- encoding.inline_info_encoding,
- inline_info.GetDepth(encoding.inline_info_encoding) - 1);
- }
+ DCHECK(current_code != nullptr);
+ DCHECK(current_code->IsOptimized());
+ uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
+ CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ DCHECK(stack_map.IsValid());
+ if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ caller = GetResolvedMethod(outer_method,
+ inline_info,
+ encoding.inline_info_encoding,
+ inline_info.GetDepth(encoding.inline_info_encoding) - 1);
}
}
if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index d16afd9..4e40aea 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -210,7 +210,11 @@
if (mod_union_table_->ShouldAddReference(root->AsMirrorPtr())) {
*has_target_reference_ = true;
// TODO: Add MarkCompressedReference callback here.
- root->Assign(visitor_->MarkObject(root->AsMirrorPtr()));
+ mirror::Object* old_ref = root->AsMirrorPtr();
+ mirror::Object* new_ref = visitor_->MarkObject(old_ref);
+ if (old_ref != new_ref) {
+ root->Assign(new_ref);
+ }
}
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index b61bef7..c19107a 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -231,7 +231,7 @@
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
mirror::Object* holder = nullptr,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 01db90a..c2f772f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1304,6 +1304,13 @@
}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
+ // If we're in a stack overflow, do not create a new exception. It would require running the
+ // constructor, which will of course still be in a stack overflow.
+ if (self->IsHandlingStackOverflow()) {
+ self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
+ return;
+ }
+
std::ostringstream oss;
size_t total_bytes_free = GetFreeMemory();
oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 22bf5f9..a84b366 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -407,7 +407,7 @@
&is_global_cache);
}
- if (Runtime::Current()->IsZygote() && !secondary_image) {
+ if (is_zygote && !secondary_image) {
MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
}
@@ -444,7 +444,7 @@
// Whether we can write to the cache.
success = false;
} else if (secondary_image) {
- if (Runtime::Current()->IsZygote()) {
+ if (is_zygote) {
// Secondary image is out of date. Clear cache and exit to let it retry from scratch.
LOG(ERROR) << "Cannot patch secondary image '" << image_location
<< "', clearing dalvik_cache and restarting zygote.";
@@ -503,7 +503,16 @@
// descriptor (and the associated exclusive lock) to be released when
// we leave Create.
ScopedFlock image_lock;
- image_lock.Init(image_filename->c_str(), error_msg);
+ // Should this be a RDWR lock? This is only a defensive measure, as at
+ // this point the image should exist.
+ // However, only the zygote can write into the global dalvik-cache, so
+ // restrict to zygote processes, or any process that isn't using
+ // /data/dalvik-cache (which we assume to be allowed to write there).
+ const bool rw_lock = is_zygote || !is_global_cache;
+ image_lock.Init(image_filename->c_str(),
+ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
+ true /* block */,
+ error_msg);
VLOG(startup) << "Using image file " << image_filename->c_str() << " for image location "
<< image_location;
// If we are in /system we can assume the image is good. We can also
diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h
index 5237466..eac52f7 100644
--- a/runtime/gc/space/image_space_fs.h
+++ b/runtime/gc/space/image_space_fs.h
@@ -62,7 +62,7 @@
if (recurse) {
DeleteDirectoryContents(file, recurse);
// Try to rmdir the directory.
- if (TEMP_FAILURE_RETRY(rmdir(file.c_str())) != 0) {
+ if (rmdir(file.c_str()) != 0) {
PLOG(ERROR) << "Unable to rmdir " << file;
}
}
@@ -71,12 +71,12 @@
}
} else {
// Try to unlink the file.
- if (TEMP_FAILURE_RETRY(unlink(file.c_str())) != 0) {
+ if (unlink(file.c_str()) != 0) {
PLOG(ERROR) << "Unable to unlink " << file;
}
}
}
- CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(c_dir))) << "Unable to close directory.";
+ CHECK_EQ(0, closedir(c_dir)) << "Unable to close directory.";
}
static bool HasContent(const char* dir) {
@@ -95,10 +95,10 @@
continue;
}
// Something here.
- CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(c_dir))) << "Unable to close directory.";
+ CHECK_EQ(0, closedir(c_dir)) << "Unable to close directory.";
return true;
}
- CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(c_dir))) << "Unable to close directory.";
+ CHECK_EQ(0, closedir(c_dir)) << "Unable to close directory.";
return false;
}
@@ -115,7 +115,7 @@
}
}
if (OS::DirectoryExists(dir.c_str())) {
- if (TEMP_FAILURE_RETRY(rmdir(dir.c_str())) != 0) {
+ if (rmdir(dir.c_str()) != 0) {
PLOG(ERROR) << "Unable to rmdir " << dir;
return;
}
@@ -136,7 +136,7 @@
return;
}
- if (TEMP_FAILURE_RETRY(rename(src, trg)) != 0) {
+ if (rename(src, trg) != 0) {
PLOG(ERROR) << "Could not rename OTA cache " << src << " to target " << trg;
}
}
diff --git a/runtime/gc_map.h b/runtime/gc_map.h
deleted file mode 100644
index b4ccdd6..0000000
--- a/runtime/gc_map.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_GC_MAP_H_
-#define ART_RUNTIME_GC_MAP_H_
-
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "base/macros.h"
-
-namespace art {
-
-// Lightweight wrapper for native PC offset to reference bit maps.
-class NativePcOffsetToReferenceMap {
- public:
- explicit NativePcOffsetToReferenceMap(const uint8_t* data) : data_(data) {
- CHECK(data_ != nullptr);
- }
-
- // The number of entries in the table.
- size_t NumEntries() const {
- return data_[2] | (data_[3] << 8);
- }
-
- // Return address of bitmap encoding what are live references.
- const uint8_t* GetBitMap(size_t index) const {
- size_t entry_offset = index * EntryWidth();
- return &Table()[entry_offset + NativeOffsetWidth()];
- }
-
- // Get the native PC encoded in the table at the given index.
- uintptr_t GetNativePcOffset(size_t index) const {
- size_t entry_offset = index * EntryWidth();
- uintptr_t result = 0;
- for (size_t i = 0; i < NativeOffsetWidth(); ++i) {
- result |= Table()[entry_offset + i] << (i * 8);
- }
- return result;
- }
-
- // Does the given offset have an entry?
- bool HasEntry(uintptr_t native_pc_offset) {
- for (size_t i = 0; i < NumEntries(); ++i) {
- if (GetNativePcOffset(i) == native_pc_offset) {
- return true;
- }
- }
- return false;
- }
-
- // Finds the bitmap associated with the native pc offset.
- const uint8_t* FindBitMap(uintptr_t native_pc_offset) {
- size_t num_entries = NumEntries();
- size_t index = Hash(native_pc_offset) % num_entries;
- size_t misses = 0;
- while (GetNativePcOffset(index) != native_pc_offset) {
- index = (index + 1) % num_entries;
- misses++;
- DCHECK_LT(misses, num_entries) << "Failed to find offset: " << native_pc_offset;
- }
- return GetBitMap(index);
- }
-
- static uint32_t Hash(uint32_t native_offset) {
- uint32_t hash = native_offset;
- hash ^= (hash >> 20) ^ (hash >> 12);
- hash ^= (hash >> 7) ^ (hash >> 4);
- return hash;
- }
-
- // The number of bytes used to encode registers.
- size_t RegWidth() const {
- return (static_cast<size_t>(data_[0]) | (static_cast<size_t>(data_[1]) << 8)) >> 3;
- }
-
- private:
- // Skip the size information at the beginning of data.
- const uint8_t* Table() const {
- return data_ + 4;
- }
-
- // Number of bytes used to encode a native offset.
- size_t NativeOffsetWidth() const {
- return data_[0] & 7;
- }
-
- // The width of an entry in the table.
- size_t EntryWidth() const {
- return NativeOffsetWidth() + RegWidth();
- }
-
- const uint8_t* const data_; // The header and table data
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_GC_MAP_H_
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index a0c6bfb..34bc458 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -80,7 +80,7 @@
have_exception_caught_listeners_(false),
have_branch_listeners_(false),
have_invoke_virtual_or_interface_listeners_(false),
- deoptimized_methods_lock_("deoptimized methods lock"),
+ deoptimized_methods_lock_("deoptimized methods lock", kDeoptimizedMethodsLock),
deoptimization_enabled_(false),
interpreter_handler_table_(kMainHandlerTable),
quick_alloc_entry_points_instrumentation_counter_(0),
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 79f24a8..eceb593 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -393,6 +393,10 @@
size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
size_t read_count = 0;
UnorderedSet set(ptr, /*make copy*/false, &read_count);
+ if (set.Empty()) {
+ // Avoid inserting empty sets.
+ return read_count;
+ }
// TODO: Disable this for app images if app images have intern tables.
static constexpr bool kCheckDuplicates = true;
if (kCheckDuplicates) {
@@ -400,7 +404,7 @@
CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
}
}
- // Insert at the front since we insert into the back.
+ // Insert at the front since we add new interns into the back.
tables_.insert(tables_.begin(), std::move(set));
return read_count;
}
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index b55312f..12d6fdc 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -277,6 +277,7 @@
JValue result;
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -291,6 +292,7 @@
JValue result;
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -306,6 +308,7 @@
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -320,6 +323,7 @@
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -355,6 +359,7 @@
}
}
result.SetL(obj_result);
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -2553,6 +2558,7 @@
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
}
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
instrumentation);
if (found_dex_pc == DexFile::kDexNoIndex) {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 0488dbf..43889c6 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -37,6 +37,7 @@
shadow_frame.GetLockCountData(). \
CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
if (interpret_one_instruction) { \
+ /* Signal mterp to return to caller */ \
shadow_frame.SetDexPC(DexFile::kDexNoIndex); \
} \
return JValue(); /* Handled in caller. */ \
@@ -76,6 +77,10 @@
instrumentation->Branch(self, method, dex_pc, offset); \
JValue result; \
if (jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, &result)) { \
+ if (interpret_one_instruction) { \
+ /* OSR has completed execution of the method. Signal mterp to return to caller */ \
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex); \
+ } \
return result; \
} \
} while (false)
@@ -205,6 +210,7 @@
result);
}
if (interpret_one_instruction) {
+ /* Signal mterp to return to caller */
shadow_frame.SetDexPC(DexFile::kDexNoIndex);
}
return result;
@@ -221,6 +227,7 @@
result);
}
if (interpret_one_instruction) {
+ /* Signal mterp to return to caller */
shadow_frame.SetDexPC(DexFile::kDexNoIndex);
}
return result;
@@ -238,6 +245,7 @@
result);
}
if (interpret_one_instruction) {
+ /* Signal mterp to return to caller */
shadow_frame.SetDexPC(DexFile::kDexNoIndex);
}
return result;
@@ -254,6 +262,7 @@
result);
}
if (interpret_one_instruction) {
+ /* Signal mterp to return to caller */
shadow_frame.SetDexPC(DexFile::kDexNoIndex);
}
return result;
@@ -292,6 +301,7 @@
result);
}
if (interpret_one_instruction) {
+ /* Signal mterp to return to caller */
shadow_frame.SetDexPC(DexFile::kDexNoIndex);
}
return result;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 81be959..4615ec9 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -16,7 +16,11 @@
#include "unstarted_runtime.h"
+#include <errno.h>
+#include <stdlib.h>
+
#include <cmath>
+#include <limits>
#include <unordered_map>
#include "ScopedLocalRef.h"
@@ -282,6 +286,23 @@
}
}
+// Special managed code cut-out to allow constructor lookup in a un-started runtime.
+void UnstartedRuntime::UnstartedClassGetDeclaredConstructor(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ if (klass == nullptr) {
+ ThrowNullPointerExceptionForMethodAccess(shadow_frame->GetMethod(), InvokeType::kVirtual);
+ return;
+ }
+ mirror::ObjectArray<mirror::Class>* args =
+ shadow_frame->GetVRegReference(arg_offset + 1)->AsObjectArray<mirror::Class>();
+ if (Runtime::Current()->IsActiveTransaction()) {
+ result->SetL(mirror::Class::GetDeclaredConstructorInternal<true>(self, klass, args));
+ } else {
+ result->SetL(mirror::Class::GetDeclaredConstructorInternal<false>(self, klass, args));
+ }
+}
+
void UnstartedRuntime::UnstartedClassGetEnclosingClass(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
StackHandleScope<1> hs(self);
@@ -427,6 +448,8 @@
dst_pos, src, src_pos, length, true /* throw_exception */);
}
}
+ } else if (src_type->IsPrimitiveByte()) {
+ PrimitiveArrayCopy<uint8_t>(self, src_array, src_pos, dst_array, dst_pos, length);
} else if (src_type->IsPrimitiveChar()) {
PrimitiveArrayCopy<uint16_t>(self, src_array, src_pos, dst_array, dst_pos, length);
} else if (src_type->IsPrimitiveInt()) {
@@ -437,6 +460,12 @@
}
}
+void UnstartedRuntime::UnstartedSystemArraycopyByte(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ // Just forward.
+ UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset);
+}
+
void UnstartedRuntime::UnstartedSystemArraycopyChar(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
// Just forward.
@@ -1025,6 +1054,24 @@
result->SetL(value);
}
+void UnstartedRuntime::UnstartedUnsafePutObjectVolatile(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Argument 0 is the Unsafe instance, skip.
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+ return;
+ }
+ int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
+ mirror::Object* value = shadow_frame->GetVRegReference(arg_offset + 4);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ obj->SetFieldObjectVolatile<true>(MemberOffset(offset), value);
+ } else {
+ obj->SetFieldObjectVolatile<false>(MemberOffset(offset), value);
+ }
+}
+
void UnstartedRuntime::UnstartedUnsafePutOrderedObject(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -1044,6 +1091,93 @@
}
}
+// A cutout for Integer.parseInt(String). Note: this code is conservative and will bail instead
+// of correctly handling the corner cases.
+void UnstartedRuntime::UnstartedIntegerParseInt(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
+ return;
+ }
+
+ std::string string_value = obj->AsString()->ToModifiedUtf8();
+ if (string_value.empty()) {
+ AbortTransactionOrFail(self, "Cannot parse empty string, retry at runtime.");
+ return;
+ }
+
+ const char* c_str = string_value.c_str();
+ char *end;
+ // Can we set errno to 0? Is this always a variable, and not a macro?
+ // Worst case, we'll incorrectly fail a transaction. Seems OK.
+ int64_t l = strtol(c_str, &end, 10);
+
+ if ((errno == ERANGE && l == LONG_MAX) || l > std::numeric_limits<int32_t>::max() ||
+ (errno == ERANGE && l == LONG_MIN) || l < std::numeric_limits<int32_t>::min()) {
+ AbortTransactionOrFail(self, "Cannot parse string %s, retry at runtime.", c_str);
+ return;
+ }
+ if (l == 0) {
+ // Check whether the string wasn't exactly zero.
+ if (string_value != "0") {
+ AbortTransactionOrFail(self, "Cannot parse string %s, retry at runtime.", c_str);
+ return;
+ }
+ } else if (*end != '\0') {
+ AbortTransactionOrFail(self, "Cannot parse string %s, retry at runtime.", c_str);
+ return;
+ }
+
+ result->SetI(static_cast<int32_t>(l));
+}
+
+// A cutout for Long.parseLong.
+//
+// Note: for now use code equivalent to Integer.parseInt, as the full range may not be supported
+// well.
+void UnstartedRuntime::UnstartedLongParseLong(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
+ return;
+ }
+
+ std::string string_value = obj->AsString()->ToModifiedUtf8();
+ if (string_value.empty()) {
+ AbortTransactionOrFail(self, "Cannot parse empty string, retry at runtime.");
+ return;
+ }
+
+ const char* c_str = string_value.c_str();
+ char *end;
+ // Can we set errno to 0? Is this always a variable, and not a macro?
+ // Worst case, we'll incorrectly fail a transaction. Seems OK.
+ int64_t l = strtol(c_str, &end, 10);
+
+ // Note: comparing against int32_t min/max is intentional here.
+ if ((errno == ERANGE && l == LONG_MAX) || l > std::numeric_limits<int32_t>::max() ||
+ (errno == ERANGE && l == LONG_MIN) || l < std::numeric_limits<int32_t>::min()) {
+ AbortTransactionOrFail(self, "Cannot parse string %s, retry at runtime.", c_str);
+ return;
+ }
+ if (l == 0) {
+ // Check whether the string wasn't exactly zero.
+ if (string_value != "0") {
+ AbortTransactionOrFail(self, "Cannot parse string %s, retry at runtime.", c_str);
+ return;
+ }
+ } else if (*end != '\0') {
+ AbortTransactionOrFail(self, "Cannot parse string %s, retry at runtime.", c_str);
+ return;
+ }
+
+ result->SetJ(l);
+}
+
void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index d669b75..a3ed558 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -25,10 +25,12 @@
V(ClassNewInstance, "java.lang.Object java.lang.Class.newInstance()") \
V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \
V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \
+ V(ClassGetDeclaredConstructor, "java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructorInternal(java.lang.Class[])") \
V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \
V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \
V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \
V(SystemArraycopy, "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)") \
+ V(SystemArraycopyByte, "void java.lang.System.arraycopy(byte[], int, byte[], int, int)") \
V(SystemArraycopyChar, "void java.lang.System.arraycopy(char[], int, char[], int, int)") \
V(SystemArraycopyInt, "void java.lang.System.arraycopy(int[], int, int[], int, int)") \
V(SystemGetSecurityManager, "java.lang.SecurityManager java.lang.System.getSecurityManager()") \
@@ -55,7 +57,10 @@
V(UnsafeCompareAndSwapLong, "boolean sun.misc.Unsafe.compareAndSwapLong(java.lang.Object, long, long, long)") \
V(UnsafeCompareAndSwapObject, "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") \
V(UnsafeGetObjectVolatile, "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") \
- V(UnsafePutOrderedObject, "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)")
+ V(UnsafePutObjectVolatile, "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") \
+ V(UnsafePutOrderedObject, "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") \
+ V(IntegerParseInt, "int java.lang.Integer.parseInt(java.lang.String)") \
+ V(LongParseLong, "long java.lang.Long.parseLong(java.lang.String)")
// Methods that are native.
#define UNSTARTED_RUNTIME_JNI_LIST(V) \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index fb53b1d..f40e4e3 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -508,5 +508,100 @@
ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
+TEST_F(UnstartedRuntimeTest, IntegerParseIntTest) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ // Test string. Should be valid, and between minimal values of LONG_MIN and LONG_MAX (for all
+ // suffixes).
+ constexpr const char* test_string = "-2147483646";
+ constexpr int32_t test_values[] = {
+ 6,
+ 46,
+ 646,
+ 3646,
+ 83646,
+ 483646,
+ 7483646,
+ 47483646,
+ 147483646,
+ 2147483646,
+ -2147483646
+ };
+
+ static_assert(arraysize(test_values) == 11U, "test_values");
+ CHECK_EQ(strlen(test_string), 11U);
+
+ for (size_t i = 0; i <= 10; ++i) {
+ const char* test_value = &test_string[10 - i];
+
+ StackHandleScope<1> hs_str(self);
+ Handle<mirror::String> h_str(
+ hs_str.NewHandle(mirror::String::AllocFromModifiedUtf8(self, test_value)));
+ ASSERT_NE(h_str.Get(), nullptr);
+ ASSERT_FALSE(self->IsExceptionPending());
+
+ tmp->SetVRegReference(0, h_str.Get());
+
+ JValue result;
+ UnstartedIntegerParseInt(self, tmp, &result, 0);
+
+ ASSERT_FALSE(self->IsExceptionPending());
+ EXPECT_EQ(result.GetI(), test_values[i]);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+// Right now the same as Integer.Parse
+TEST_F(UnstartedRuntimeTest, LongParseLongTest) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ // Test string. Should be valid, and between minimal values of LONG_MIN and LONG_MAX (for all
+ // suffixes).
+ constexpr const char* test_string = "-2147483646";
+ constexpr int64_t test_values[] = {
+ 6,
+ 46,
+ 646,
+ 3646,
+ 83646,
+ 483646,
+ 7483646,
+ 47483646,
+ 147483646,
+ 2147483646,
+ -2147483646
+ };
+
+ static_assert(arraysize(test_values) == 11U, "test_values");
+ CHECK_EQ(strlen(test_string), 11U);
+
+ for (size_t i = 0; i <= 10; ++i) {
+ const char* test_value = &test_string[10 - i];
+
+ StackHandleScope<1> hs_str(self);
+ Handle<mirror::String> h_str(
+ hs_str.NewHandle(mirror::String::AllocFromModifiedUtf8(self, test_value)));
+ ASSERT_NE(h_str.Get(), nullptr);
+ ASSERT_FALSE(self->IsExceptionPending());
+
+ tmp->SetVRegReference(0, h_str.Get());
+
+ JValue result;
+ UnstartedLongParseLong(self, tmp, &result, 0);
+
+ ASSERT_FALSE(self->IsExceptionPending());
+ EXPECT_EQ(result.GetJ(), test_values[i]);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 73aaf04..0254727 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -208,7 +208,7 @@
return false;
}
bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
- code_cache_->DoneCompiling(method_to_compile, self);
+ code_cache_->DoneCompiling(method_to_compile, self, osr);
return success;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 53d645c..820ae6a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -195,9 +195,7 @@
uint8_t* JitCodeCache::CommitCode(Thread* self,
ArtMethod* method,
- const uint8_t* mapping_table,
const uint8_t* vmap_table,
- const uint8_t* gc_map,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
@@ -206,9 +204,7 @@
bool osr) {
uint8_t* result = CommitCodeInternal(self,
method,
- mapping_table,
vmap_table,
- gc_map,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
@@ -220,9 +216,7 @@
GarbageCollectCache(self);
result = CommitCodeInternal(self,
method,
- mapping_table,
vmap_table,
- gc_map,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
@@ -254,8 +248,6 @@
// It does nothing if we are not using native debugger.
DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
- FreeData(const_cast<uint8_t*>(method_header->GetNativeGcMap()));
- FreeData(const_cast<uint8_t*>(method_header->GetMappingTable()));
// Use the offset directly to prevent sanity check that the method is
// compiled with optimizing.
// TODO(ngeoffray): Clean up.
@@ -314,9 +306,7 @@
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
- const uint8_t* mapping_table,
const uint8_t* vmap_table,
- const uint8_t* gc_map,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
@@ -346,9 +336,7 @@
std::copy(code, code + code_size, code_ptr);
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
new (method_header) OatQuickMethodHeader(
- (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
(vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
- (gc_map == nullptr) ? 0 : code_ptr - gc_map,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
@@ -939,12 +927,12 @@
return false;
}
- if (info->IsMethodBeingCompiled()) {
+ if (info->IsMethodBeingCompiled(osr)) {
VLOG(jit) << PrettyMethod(method) << " is already being compiled";
return false;
}
- info->SetIsMethodBeingCompiled(true);
+ info->SetIsMethodBeingCompiled(true, osr);
return true;
}
@@ -964,10 +952,10 @@
info->DecrementInlineUse();
}
-void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) {
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED, bool osr) {
ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
- DCHECK(info->IsMethodBeingCompiled());
- info->SetIsMethodBeingCompiled(false);
+ DCHECK(info->IsMethodBeingCompiled(osr));
+ info->SetIsMethodBeingCompiled(false, osr);
}
size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a54f04f..9f18c70 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -80,7 +80,7 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- void DoneCompiling(ArtMethod* method, Thread* self)
+ void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -91,9 +91,7 @@
// Allocate and write code and its metadata to the code cache.
uint8_t* CommitCode(Thread* self,
ArtMethod* method,
- const uint8_t* mapping_table,
const uint8_t* vmap_table,
- const uint8_t* gc_map,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
@@ -201,9 +199,7 @@
// allocation fails. Return null if the allocation fails.
uint8_t* CommitCodeInternal(Thread* self,
ArtMethod* method,
- const uint8_t* mapping_table,
const uint8_t* vmap_table,
- const uint8_t* gc_map,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 55d627a..3a71bba 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -119,12 +119,18 @@
InlineCache* GetInlineCache(uint32_t dex_pc);
- bool IsMethodBeingCompiled() const {
- return is_method_being_compiled_;
+ bool IsMethodBeingCompiled(bool osr) const {
+ return osr
+ ? is_osr_method_being_compiled_
+ : is_method_being_compiled_;
}
- void SetIsMethodBeingCompiled(bool value) {
- is_method_being_compiled_ = value;
+ void SetIsMethodBeingCompiled(bool value, bool osr) {
+ if (osr) {
+ is_osr_method_being_compiled_ = value;
+ } else {
+ is_method_being_compiled_ = value;
+ }
}
void SetSavedEntryPoint(const void* entry_point) {
@@ -155,7 +161,8 @@
}
bool IsInUseByCompiler() const {
- return IsMethodBeingCompiled() || (current_inline_uses_ > 0);
+ return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) ||
+ (current_inline_uses_ > 0);
}
private:
@@ -181,6 +188,7 @@
// is implicitly guarded by the JIT code cache lock.
// TODO: Make the JIT code cache lock global.
bool is_method_being_compiled_;
+ bool is_osr_method_being_compiled_;
// When the compiler inlines the method associated to this ProfilingInfo,
// it updates this counter so that the GC does not try to clear the inline caches.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 421641c..5d89c21 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -426,7 +426,9 @@
if (error_msg != nullptr) {
auto saved_errno = errno;
- PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ if (kIsDebugBuild || VLOG_IS_ON(oat)) {
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ }
*error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
") of file '%s' failed: %s. See process maps in the log.",
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 7900eac..42f003d 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -880,9 +880,10 @@
DCHECK(!IsInterface());
mirror::Class* common_super_class = this;
while (!common_super_class->IsAssignableFrom(klass.Get())) {
- common_super_class = common_super_class->GetSuperClass();
+ mirror::Class* old_common = common_super_class;
+ common_super_class = old_common->GetSuperClass();
+ DCHECK(common_super_class != nullptr) << PrettyClass(old_common);
}
- DCHECK(common_super_class != nullptr);
return common_super_class;
}
@@ -1023,8 +1024,8 @@
// TODO: Move this to java_lang_Class.cc?
ArtMethod* Class::GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) {
- for (auto& m : GetDirectMethods(sizeof(void*))) {
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, size_t pointer_size) {
+ for (auto& m : GetDirectMethods(pointer_size)) {
// Skip <clinit> which is a static constructor, as well as non constructors.
if (m.IsStatic() || !m.IsConstructor()) {
continue;
@@ -1138,5 +1139,31 @@
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args);
+template <bool kTransactionActive>
+mirror::Constructor* Class::GetDeclaredConstructorInternal(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::ObjectArray<mirror::Class>* args) {
+ StackHandleScope<1> hs(self);
+ const size_t pointer_size = kTransactionActive
+ ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
+ : sizeof(void*);
+ ArtMethod* result = klass->GetDeclaredConstructor(self, hs.NewHandle(args), pointer_size);
+ return result != nullptr
+ ? mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
+ : nullptr;
+}
+
+// mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
+
+template mirror::Constructor* Class::GetDeclaredConstructorInternal<false>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::ObjectArray<mirror::Class>* args);
+template mirror::Constructor* Class::GetDeclaredConstructorInternal<true>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::ObjectArray<mirror::Class>* args);
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 7082c88..57c3590 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -299,7 +299,9 @@
// Mutually exclusive from whether or not each method is allowed to skip access checks.
void SetVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
- SetAccessFlags(flags | kAccVerificationAttempted);
+ if ((flags & kAccVerificationAttempted) == 0) {
+ SetAccessFlags(flags | kAccVerificationAttempted);
+ }
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -766,6 +768,11 @@
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template <bool kTransactionActive = false>
+ static Constructor* GetDeclaredConstructorInternal(Thread* self,
+ mirror::Class* klass,
+ mirror::ObjectArray<mirror::Class>* args)
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(size_t pointer_size)
@@ -1213,7 +1220,7 @@
// May cause thread suspension due to EqualParameters.
ArtMethod* GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args)
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index 84fa80f..cc910b0 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -34,7 +34,6 @@
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
if (kVisitClasses) {
// Visit classes loaded after.
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
ClassTable* const class_table = GetClassTable();
if (class_table != nullptr) {
class_table->VisitRoots(visitor);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 97973e6..9838b71 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -96,14 +96,18 @@
array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
+template <bool kTransactionActive>
Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+ static_cast<AbstractMethod*>(ret)->CreateFromArtMethod<kTransactionActive>(method);
}
return ret;
}
+template Constructor* Constructor::CreateFromArtMethod<false>(Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<true>(Thread* self, ArtMethod* method);
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 12a72fe..0b56964 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -60,6 +60,7 @@
// C++ mirror of java.lang.reflect.Constructor.
class MANAGED Constructor: public AbstractMethod {
public:
+ template <bool kTransactionActive = false>
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index bf24de5..c1899af 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -322,15 +322,11 @@
static jobject Class_getDeclaredConstructorInternal(
JNIEnv* env, jobject javaThis, jobjectArray args) {
ScopedFastNativeObjectAccess soa(env);
- auto* klass = DecodeClass(soa, javaThis);
- auto* params = soa.Decode<mirror::ObjectArray<mirror::Class>*>(args);
- StackHandleScope<1> hs(soa.Self());
- auto* declared_constructor = klass->GetDeclaredConstructor(soa.Self(), hs.NewHandle(params));
- if (declared_constructor != nullptr) {
- return soa.AddLocalReference<jobject>(
- mirror::Constructor::CreateFromArtMethod(soa.Self(), declared_constructor));
- }
- return nullptr;
+ mirror::Constructor* result = mirror::Class::GetDeclaredConstructorInternal(
+ soa.Self(),
+ DecodeClass(soa, javaThis),
+ soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
+ return soa.AddLocalReference<jobject>(result);
}
static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
@@ -545,6 +541,17 @@
return soa.AddLocalReference<jstring>(class_name);
}
+static jobjectArray Class_getSignatureAnnotation(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobjectArray>(
+ klass->GetDexFile().GetSignatureAnnotationForClass(klass));
+}
+
static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
@@ -608,7 +615,8 @@
}
auto* constructor = klass->GetDeclaredConstructor(
soa.Self(),
- ScopedNullHandle<mirror::ObjectArray<mirror::Class>>());
+ ScopedNullHandle<mirror::ObjectArray<mirror::Class>>(),
+ sizeof(void*));
if (UNLIKELY(constructor == nullptr)) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
"%s has no zero argument constructor",
@@ -692,6 +700,7 @@
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
NATIVE_METHOD(Class, getPublicDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getSignatureAnnotation, "!()[Ljava/lang/String;"),
NATIVE_METHOD(Class, isAnonymousClass, "!()Z"),
NATIVE_METHOD(Class, isDeclaredAnnotationPresent, "!(Ljava/lang/Class;)Z"),
NATIVE_METHOD(Class, newInstance, "!()Ljava/lang/Object;"),
diff --git a/runtime/native/java_lang_reflect_AbstractMethod.cc b/runtime/native/java_lang_reflect_AbstractMethod.cc
new file mode 100644
index 0000000..7e11c11
--- /dev/null
+++ b/runtime/native/java_lang_reflect_AbstractMethod.cc
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "java_lang_reflect_AbstractMethod.h"
+
+#include "art_method-inl.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "reflection.h"
+#include "scoped_fast_native_object_access.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+static jobjectArray AbstractMethod_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ // Return an empty array instead of a null pointer.
+ mirror::Class* annotation_array_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
+ mirror::ObjectArray<mirror::Object>* empty_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ }
+ return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetAnnotationsForMethod(method));
+}
+
+static jobjectArray AbstractMethod_getSignatureAnnotation(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(soa.Self());
+ return soa.AddLocalReference<jobjectArray>(
+ method->GetDexFile()->GetSignatureAnnotationForMethod(method));
+}
+
+
+static jboolean AbstractMethod_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod,
+ jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ return false;
+ }
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return method->GetDexFile()->IsMethodAnnotationPresent(method, klass);
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(AbstractMethod, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(AbstractMethod, getSignatureAnnotation, "!()[Ljava/lang/String;"),
+ NATIVE_METHOD(AbstractMethod, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
+};
+
+void register_java_lang_reflect_AbstractMethod(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/reflect/AbstractMethod");
+}
+
+} // namespace art
diff --git a/runtime/native/java_lang_reflect_AbstractMethod.h b/runtime/native/java_lang_reflect_AbstractMethod.h
new file mode 100644
index 0000000..222e5a0
--- /dev/null
+++ b/runtime/native/java_lang_reflect_AbstractMethod.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_reflect_AbstractMethod(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index d7cf62e..78999c2 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -41,20 +41,6 @@
method->GetDexFile()->GetAnnotationForMethod(method, klass));
}
-static jobjectArray Method_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
- ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- if (method->GetDeclaringClass()->IsProxyClass()) {
- // Return an empty array instead of a null pointer.
- mirror::Class* annotation_array_class =
- soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
- mirror::ObjectArray<mirror::Object>* empty_array =
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
- return soa.AddLocalReference<jobjectArray>(empty_array);
- }
- return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetAnnotationsForMethod(method));
-}
-
static jobject Method_getDefaultValue(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
@@ -116,27 +102,13 @@
return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
}
-static jboolean Method_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod,
- jclass annotationType) {
- ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- if (method->GetDeclaringClass()->IsProxyClass()) {
- return false;
- }
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
- return method->GetDexFile()->IsMethodAnnotationPresent(method, klass);
-}
-
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Method, getAnnotationNative,
"!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
- NATIVE_METHOD(Method, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
NATIVE_METHOD(Method, getDefaultValue, "!()Ljava/lang/Object;"),
NATIVE_METHOD(Method, getExceptionTypes, "!()[Ljava/lang/Class;"),
NATIVE_METHOD(Method, getParameterAnnotationsNative, "!()[[Ljava/lang/annotation/Annotation;"),
NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
- NATIVE_METHOD(Method, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
};
void register_java_lang_reflect_Method(JNIEnv* env) {
diff --git a/runtime/oat.h b/runtime/oat.h
index 469a65f..543d99f 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '7', '8', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '7', '9', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 7b92120..d7d0c4f 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -71,44 +71,6 @@
return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FpSpillMask();
}
-inline const uint8_t* OatFile::OatMethod::GetGcMap() const {
- const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
- if (code == nullptr) {
- return nullptr;
- }
- uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].gc_map_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code) - offset;
-}
-
-inline uint32_t OatFile::OatMethod::GetGcMapOffset() const {
- const uint8_t* gc_map = GetGcMap();
- return static_cast<uint32_t>(gc_map != nullptr ? gc_map - begin_ : 0u);
-}
-
-inline uint32_t OatFile::OatMethod::GetGcMapOffsetOffset() const {
- const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
- if (method_header == nullptr) {
- return 0u;
- }
- return reinterpret_cast<const uint8_t*>(&method_header->gc_map_offset_) - begin_;
-}
-
-inline uint32_t OatFile::OatMethod::GetMappingTableOffset() const {
- const uint8_t* mapping_table = GetMappingTable();
- return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u);
-}
-
-inline uint32_t OatFile::OatMethod::GetMappingTableOffsetOffset() const {
- const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
- if (method_header == nullptr) {
- return 0u;
- }
- return reinterpret_cast<const uint8_t*>(&method_header->mapping_table_offset_) - begin_;
-}
-
inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
const uint8_t* vmap_table = GetVmapTable();
return static_cast<uint32_t>(vmap_table != nullptr ? vmap_table - begin_ : 0u);
@@ -122,18 +84,6 @@
return reinterpret_cast<const uint8_t*>(&method_header->vmap_table_offset_) - begin_;
}
-inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
- const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
- if (code == nullptr) {
- return nullptr;
- }
- uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].mapping_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code) - offset;
-}
-
inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 7c83715..ccb8b29 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -50,7 +50,6 @@
#include "type_lookup_table.h"
#include "utils.h"
#include "utils/dex_cache_arrays_layout-inl.h"
-#include "vmap_table.h"
namespace art {
@@ -173,7 +172,7 @@
}
if (requested_base != nullptr && begin_ != requested_base) {
// Host can fail this check. Do not dump there to avoid polluting the output.
- if (kIsTargetBuild) {
+ if (kIsTargetBuild && (kIsDebugBuild || VLOG_IS_ON(oat))) {
PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
}
*error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 705ba0d..11a9d76 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -123,18 +123,10 @@
uint32_t GetCoreSpillMask() const;
uint32_t GetFpSpillMask() const;
- const uint8_t* GetMappingTable() const;
- uint32_t GetMappingTableOffset() const;
- uint32_t GetMappingTableOffsetOffset() const;
-
const uint8_t* GetVmapTable() const;
uint32_t GetVmapTableOffset() const;
uint32_t GetVmapTableOffsetOffset() const;
- const uint8_t* GetGcMap() const;
- uint32_t GetGcMapOffset() const;
- uint32_t GetGcMapOffsetOffset() const;
-
// Create an OatMethod with offsets relative to the given base address
OatMethod(const uint8_t* base, const uint32_t code_offset)
: begin_(base), code_offset_(code_offset) {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index ce892f3..78e372a 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -77,7 +77,7 @@
OatFileAssistant::~OatFileAssistant() {
// Clean up the lock file.
if (flock_.HasFile()) {
- TEMP_FAILURE_RETRY(unlink(flock_.GetFile()->GetPath().c_str()));
+ unlink(flock_.GetFile()->GetPath().c_str());
}
}
@@ -109,7 +109,7 @@
std::string lock_file_name = *OatFileName() + ".flock";
if (!flock_.Init(lock_file_name.c_str(), error_msg)) {
- TEMP_FAILURE_RETRY(unlink(lock_file_name.c_str()));
+ unlink(lock_file_name.c_str());
return false;
}
return true;
@@ -495,7 +495,7 @@
return true;
}
- if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
+ if (file.GetOatHeader().GetImageFileLocationOatChecksum() != GetCombinedImageChecksum()) {
VLOG(oat) << "Oat image checksum does not match image checksum.";
return true;
}
@@ -613,7 +613,7 @@
if (!Exec(argv, error_msg)) {
// Manually delete the file. This ensures there is no garbage left over if
// the process unexpectedly died.
- TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ unlink(oat_file_name.c_str());
return kUpdateFailed;
}
@@ -673,13 +673,13 @@
// Manually delete the file. This ensures there is no garbage left over if
// the process unexpectedly died.
oat_file->Erase();
- TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ unlink(oat_file_name.c_str());
return kUpdateFailed;
}
if (oat_file->FlushCloseOrErase() != 0) {
*error_msg = "Unable to close oat file " + oat_file_name;
- TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ unlink(oat_file_name.c_str());
return kUpdateFailed;
}
@@ -931,8 +931,7 @@
cached_image_info_.patch_delta = image_header.GetPatchDelta();
} else {
std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeaderOrDie(
- cached_image_info_.location.c_str(), isa_));
+ gc::space::ImageSpace::ReadImageHeaderOrDie(cached_image_info_.location.c_str(), isa_));
cached_image_info_.oat_checksum = image_header->GetOatChecksum();
cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
image_header->GetOatDataBegin());
@@ -940,10 +939,39 @@
}
}
image_info_load_succeeded_ = (!image_spaces.empty());
+
+ combined_image_checksum_ = CalculateCombinedImageChecksum(isa_);
}
return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
}
+// TODO: Use something better than xor.
+uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) {
+ uint32_t checksum = 0;
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ if (isa == kRuntimeISA) {
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ checksum ^= image_space->GetImageHeader().GetOatChecksum();
+ }
+ } else {
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ std::string location = image_space->GetImageLocation();
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeaderOrDie(location.c_str(), isa));
+ checksum ^= image_header->GetOatChecksum();
+ }
+ }
+ return checksum;
+}
+
+uint32_t OatFileAssistant::GetCombinedImageChecksum() {
+ if (!image_info_load_attempted_) {
+ GetImageInfo();
+ }
+ return combined_image_checksum_;
+}
+
gc::space::ImageSpace* OatFileAssistant::OpenImageSpace(const OatFile* oat_file) {
DCHECK(oat_file != nullptr);
std::string art_file = ArtFileName(oat_file);
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 17f72fe..d3228de 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -279,6 +279,8 @@
static bool DexFilenameToOdexFilename(const std::string& location,
InstructionSet isa, std::string* odex_filename, std::string* error_msg);
+ static uint32_t CalculateCombinedImageChecksum(InstructionSet isa = kRuntimeISA);
+
private:
struct ImageInfo {
uint32_t oat_checksum = 0;
@@ -352,6 +354,8 @@
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
+ uint32_t GetCombinedImageChecksum();
+
// To implement Lock(), we lock a dummy file where the oat file would go
// (adding ".flock" to the target file name) and retain the lock for the
// remaining lifetime of the OatFileAssistant object.
@@ -423,6 +427,7 @@
bool image_info_load_attempted_ = false;
bool image_info_load_succeeded_ = false;
ImageInfo cached_image_info_;
+ uint32_t combined_image_checksum_ = 0;
// For debugging only.
// If this flag is set, the oat or odex file has been released to the user
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index bddfa4f..f50d1cb 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -239,7 +239,8 @@
ASSERT_TRUE(!image_spaces.empty() && image_spaces[0] != nullptr);
const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
const OatHeader& oat_header = odex_file->GetOatHeader();
- EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum());
+ uint32_t combined_checksum = OatFileAssistant::CalculateCombinedImageChecksum();
+ EXPECT_EQ(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
oat_header.GetImageFileLocationOatDataBegin());
EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta());
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 07a112f..0ab2bfe 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -17,23 +17,18 @@
#include "oat_quick_method_header.h"
#include "art_method.h"
-#include "mapping_table.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
namespace art {
OatQuickMethodHeader::OatQuickMethodHeader(
- uint32_t mapping_table_offset,
uint32_t vmap_table_offset,
- uint32_t gc_map_offset,
uint32_t frame_size_in_bytes,
uint32_t core_spill_mask,
uint32_t fp_spill_mask,
uint32_t code_size)
- : mapping_table_offset_(mapping_table_offset),
- vmap_table_offset_(vmap_table_offset),
- gc_map_offset_(gc_map_offset),
+ : vmap_table_offset_(vmap_table_offset),
frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
code_size_(code_size) {}
@@ -52,28 +47,8 @@
return stack_map.GetDexPc(encoding.stack_map_encoding);
}
} else {
- MappingTable table(GetMappingTable());
- // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
- // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
- if (table.TotalSize() == 0) {
- DCHECK(method->IsNative());
- return DexFile::kDexNoIndex;
- }
-
- // Assume the caller wants a pc-to-dex mapping so check here first.
- typedef MappingTable::PcToDexIterator It;
- for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- // Now check dex-to-pc mappings.
- typedef MappingTable::DexToPcIterator It2;
- for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
+ DCHECK(method->IsNative());
+ return DexFile::kDexNoIndex;
}
if (abort_on_failure) {
ScopedObjectAccess soa(Thread::Current());
@@ -91,44 +66,22 @@
bool is_for_catch_handler,
bool abort_on_failure) const {
const void* entry_point = GetEntryPoint();
- if (IsOptimized()) {
- // Optimized code does not have a mapping table. Search for the dex-to-pc
- // mapping in stack maps.
- CodeInfo code_info = GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ DCHECK(!method->IsNative());
+ DCHECK(IsOptimized());
+ // Search for the dex-to-pc mapping in stack maps.
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
- // All stack maps are stored in the same CodeItem section, safepoint stack
- // maps first, then catch stack maps. We use `is_for_catch_handler` to select
- // the order of iteration.
- StackMap stack_map =
- LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
- : code_info.GetStackMapForDexPc(dex_pc, encoding);
- if (stack_map.IsValid()) {
- return reinterpret_cast<uintptr_t>(entry_point) +
- stack_map.GetNativePcOffset(encoding.stack_map_encoding);
- }
- } else {
- MappingTable table(GetMappingTable());
- if (table.TotalSize() == 0) {
- DCHECK_EQ(dex_pc, 0U);
- return 0; // Special no mapping/pc == 0 case
- }
- // Assume the caller wants a dex-to-pc mapping so check here first.
- typedef MappingTable::DexToPcIterator It;
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- // Now check pc-to-dex mappings.
- typedef MappingTable::PcToDexIterator It2;
- for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
+ // All stack maps are stored in the same CodeItem section, safepoint stack
+ // maps first, then catch stack maps. We use `is_for_catch_handler` to select
+ // the order of iteration.
+ StackMap stack_map =
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
+ : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ if (stack_map.IsValid()) {
+ return reinterpret_cast<uintptr_t>(entry_point) +
+ stack_map.GetNativePcOffset(encoding.stack_map_encoding);
}
-
if (abort_on_failure) {
ScopedObjectAccess soa(Thread::Current());
LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index daabc6e..abddc6d 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -30,9 +30,7 @@
// OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
class PACKED(4) OatQuickMethodHeader {
public:
- OatQuickMethodHeader(uint32_t mapping_table_offset = 0U,
- uint32_t vmap_table_offset = 0U,
- uint32_t gc_map_offset = 0U,
+ OatQuickMethodHeader(uint32_t vmap_table_offset = 0U,
uint32_t frame_size_in_bytes = 0U,
uint32_t core_spill_mask = 0U,
uint32_t fp_spill_mask = 0U,
@@ -60,7 +58,7 @@
}
bool IsOptimized() const {
- return gc_map_offset_ == 0 && vmap_table_offset_ != 0;
+ return code_size_ != 0 && vmap_table_offset_ != 0;
}
const void* GetOptimizedCodeInfoPtr() const {
@@ -81,14 +79,6 @@
return code_size_;
}
- const uint8_t* GetNativeGcMap() const {
- return (gc_map_offset_ == 0) ? nullptr : code_ - gc_map_offset_;
- }
-
- const uint8_t* GetMappingTable() const {
- return (mapping_table_offset_ == 0) ? nullptr : code_ - mapping_table_offset_;
- }
-
const uint8_t* GetVmapTable() const {
CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler";
return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_;
@@ -135,12 +125,8 @@
uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const;
- // The offset in bytes from the start of the mapping table to the end of the header.
- uint32_t mapping_table_offset_;
// The offset in bytes from the start of the vmap table to the end of the header.
uint32_t vmap_table_offset_;
- // The offset in bytes from the start of the gc map to the end of the header.
- uint32_t gc_map_offset_;
// The stack frame information.
QuickMethodFrameInfo frame_info_;
// The code size in bytes.
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 6234720..a098bf0 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -506,8 +506,6 @@
};
TEST_F(ReflectionTest, StaticMainMethod) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
- TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 630d101..d3454e8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -103,6 +103,7 @@
#include "native/java_lang_VMClassLoader.h"
#include "native/java_lang_ref_FinalizerReference.h"
#include "native/java_lang_ref_Reference.h"
+#include "native/java_lang_reflect_AbstractMethod.h"
#include "native/java_lang_reflect_Array.h"
#include "native/java_lang_reflect_Constructor.h"
#include "native/java_lang_reflect_Field.h"
@@ -1349,6 +1350,7 @@
register_java_lang_DexCache(env);
register_java_lang_Object(env);
register_java_lang_ref_FinalizerReference(env);
+ register_java_lang_reflect_AbstractMethod(env);
register_java_lang_reflect_Array(env);
register_java_lang_reflect_Constructor(env);
register_java_lang_reflect_Field(env);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index c22eb92..56ef5aa 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -21,7 +21,6 @@
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
-#include "gc_map.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
#include "jit/jit.h"
@@ -36,7 +35,6 @@
#include "thread.h"
#include "thread_list.h"
#include "verify_object-inl.h"
-#include "vmap_table.h"
namespace art {
@@ -215,33 +213,6 @@
return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
}
-bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
- DCHECK_EQ(m, GetMethod());
- // Process register map (which native and runtime methods don't have)
- if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
- return false;
- }
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- if (method_header->IsOptimized()) {
- return true; // TODO: Implement.
- }
- const uint8_t* native_gc_map = method_header->GetNativeGcMap();
- CHECK(native_gc_map != nullptr) << PrettyMethod(m);
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- // Can't be null or how would we compile its instructions?
- DCHECK(code_item != nullptr) << PrettyMethod(m);
- NativePcOffsetToReferenceMap map(native_gc_map);
- size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
- const uint8_t* reg_bitmap = nullptr;
- if (num_regs > 0) {
- uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- reg_bitmap = map.FindBitMap(native_pc_offset);
- DCHECK(reg_bitmap != nullptr);
- }
- // Does this register hold a reference?
- return vreg < num_regs && TestBitmap(vreg, reg_bitmap);
-}
-
bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
VRegKind kind,
uint32_t* val) const {
@@ -273,11 +244,8 @@
if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
return true;
}
- if (cur_oat_quick_method_header_->IsOptimized()) {
- return GetVRegFromOptimizedCode(m, vreg, kind, val);
- } else {
- return GetVRegFromQuickCode(m, vreg, kind, val);
- }
+ DCHECK(cur_oat_quick_method_header_->IsOptimized());
+ return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
DCHECK(cur_shadow_frame_ != nullptr);
if (kind == kReferenceVReg) {
@@ -290,29 +258,6 @@
}
}
-bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
- uint32_t* val) const {
- DCHECK_EQ(m, GetMethod());
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
- const VmapTable vmap_table(method_header->GetVmapTable());
- uint32_t vmap_offset;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
- return GetRegisterIfAccessible(reg, kind, val);
- } else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
- // its instructions?
- *val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- return true;
- }
-}
-
bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
DCHECK_EQ(m, GetMethod());
@@ -432,11 +377,8 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- if (cur_oat_quick_method_header_->IsOptimized()) {
- return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
- } else {
- return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
- }
+ DCHECK(cur_oat_quick_method_header_->IsOptimized());
+ return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
DCHECK(cur_shadow_frame_ != nullptr);
*val = cur_shadow_frame_->GetVRegLong(vreg);
@@ -444,33 +386,6 @@
}
}
-bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
- VRegKind kind_hi, uint64_t* val) const {
- DCHECK_EQ(m, GetMethod());
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
- const VmapTable vmap_table(method_header->GetVmapTable());
- uint32_t vmap_offset_lo, vmap_offset_hi;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
- vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
- bool is_float = (kind_lo == kDoubleLoVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
- uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
- return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
- } else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
- // its instructions?
- uint32_t* addr = GetVRegAddrFromQuickCode(
- cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- *val = *reinterpret_cast<uint64_t*>(addr);
- return true;
- }
-}
-
bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const {
diff --git a/runtime/stack.h b/runtime/stack.h
index 3659560..51f7d63 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -634,9 +634,6 @@
bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsReferenceVReg(ArtMethod* m, uint16_t vreg)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -798,9 +795,6 @@
bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
SHARED_REQUIRES(Locks::mutator_lock_);
- bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
- uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -808,9 +802,6 @@
bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
SHARED_REQUIRES(Locks::mutator_lock_);
- bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
- VRegKind kind_hi, uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3ecb041..57ccabc 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -42,7 +42,6 @@
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
-#include "gc_map.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/allocator/rosalloc.h"
@@ -72,7 +71,6 @@
#include "utils.h"
#include "verifier/method_verifier.h"
#include "verify_object-inl.h"
-#include "vmap_table.h"
#include "well_known_classes.h"
#include "interpreter/interpreter.h"
@@ -2765,83 +2763,36 @@
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- if (method_header->IsOptimized()) {
- auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
- reinterpret_cast<uintptr_t>(cur_quick_frame));
- uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- DCHECK(map.IsValid());
- // Visit stack entries that hold pointers.
- size_t number_of_bits = map.GetNumberOfStackMaskBits(encoding.stack_map_encoding);
- for (size_t i = 0; i < number_of_bits; ++i) {
- if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) {
- auto* ref_addr = vreg_base + i;
- mirror::Object* ref = ref_addr->AsMirrorPtr();
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, -1, this);
- if (ref != new_ref) {
- ref_addr->Assign(new_ref);
- }
+ DCHECK(method_header->IsOptimized());
+ auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
+ reinterpret_cast<uintptr_t>(cur_quick_frame));
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ DCHECK(map.IsValid());
+ // Visit stack entries that hold pointers.
+ size_t number_of_bits = map.GetNumberOfStackMaskBits(encoding.stack_map_encoding);
+ for (size_t i = 0; i < number_of_bits; ++i) {
+ if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) {
+ auto* ref_addr = vreg_base + i;
+ mirror::Object* ref = ref_addr->AsMirrorPtr();
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, -1, this);
+ if (ref != new_ref) {
+ ref_addr->Assign(new_ref);
}
}
}
- // Visit callee-save registers that hold pointers.
- uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding);
- for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
- if (register_mask & (1 << i)) {
- mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
- if (*ref_addr != nullptr) {
- visitor_(ref_addr, -1, this);
- }
- }
- }
- } else {
- const uint8_t* native_gc_map = method_header->GetNativeGcMap();
- CHECK(native_gc_map != nullptr) << PrettyMethod(m);
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- // Can't be null or how would we compile its instructions?
- DCHECK(code_item != nullptr) << PrettyMethod(m);
- NativePcOffsetToReferenceMap map(native_gc_map);
- size_t num_regs = map.RegWidth() * 8;
- if (num_regs > 0) {
- uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
- DCHECK(reg_bitmap != nullptr);
- const VmapTable vmap_table(method_header->GetVmapTable());
- QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
- // For all dex registers in the bitmap
- DCHECK(cur_quick_frame != nullptr);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- // Does this register hold a reference?
- if (TestBitmap(reg, reg_bitmap)) {
- uint32_t vmap_offset;
- if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
- int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
- kReferenceVReg);
- // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
- mirror::Object** ref_addr =
- reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
- if (*ref_addr != nullptr) {
- visitor_(ref_addr, reg, this);
- }
- } else {
- StackReference<mirror::Object>* ref_addr =
- reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode(
- cur_quick_frame, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
- mirror::Object* ref = ref_addr->AsMirrorPtr();
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (ref != new_ref) {
- ref_addr->Assign(new_ref);
- }
- }
- }
- }
+ }
+ // Visit callee-save registers that hold pointers.
+ uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding);
+ for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
+ if (register_mask & (1 << i)) {
+ mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
+ if (*ref_addr != nullptr) {
+ visitor_(ref_addr, -1, this);
}
}
}
diff --git a/runtime/vmap_table.h b/runtime/vmap_table.h
deleted file mode 100644
index db9e1ea..0000000
--- a/runtime/vmap_table.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_VMAP_TABLE_H_
-#define ART_RUNTIME_VMAP_TABLE_H_
-
-#include "base/logging.h"
-#include "leb128.h"
-#include "stack.h"
-
-namespace art {
-
-class VmapTable {
- public:
- // For efficient encoding of special values, entries are adjusted by 2.
- static constexpr uint16_t kEntryAdjustment = 2u;
- static constexpr uint16_t kAdjustedFpMarker = static_cast<uint16_t>(0xffffu + kEntryAdjustment);
-
- explicit VmapTable(const uint8_t* table) : table_(table) {
- }
-
- // Look up nth entry, not called from performance critical code.
- uint16_t operator[](size_t n) const {
- const uint8_t* table = table_;
- size_t size = DecodeUnsignedLeb128(&table);
- CHECK_LT(n, size);
- uint16_t adjusted_entry = DecodeUnsignedLeb128(&table);
- for (size_t i = 0; i < n; ++i) {
- adjusted_entry = DecodeUnsignedLeb128(&table);
- }
- return adjusted_entry - kEntryAdjustment;
- }
-
- size_t Size() const {
- const uint8_t* table = table_;
- return DecodeUnsignedLeb128(&table);
- }
-
- // Is the dex register 'vreg' in the context or on the stack? Should not be called when the
- // 'kind' is unknown or constant.
- bool IsInContext(size_t vreg, VRegKind kind, uint32_t* vmap_offset) const {
- DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
- kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
- kind == kDoubleHiVReg || kind == kImpreciseConstant);
- *vmap_offset = 0xEBAD0FF5;
- // TODO: take advantage of the registers being ordered
- // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
- // are never promoted to floating point registers.
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- bool in_floats = false;
- const uint8_t* table = table_;
- uint16_t adjusted_vreg = vreg + kEntryAdjustment;
- size_t end = DecodeUnsignedLeb128(&table);
- bool high_reg = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
- bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64) || (kRuntimeISA == kMips64);
- if (target64 && high_reg) {
- // Wide promoted registers are associated with the sreg of the low portion.
- adjusted_vreg--;
- }
- for (size_t i = 0; i < end; ++i) {
- // Stop if we find what we are are looking for.
- uint16_t adjusted_entry = DecodeUnsignedLeb128(&table);
- if ((adjusted_entry == adjusted_vreg) && (in_floats == is_float)) {
- *vmap_offset = i;
- return true;
- }
- // 0xffff is the marker for LR (return PC on x86), following it are spilled float registers.
- if (adjusted_entry == kAdjustedFpMarker) {
- in_floats = true;
- }
- }
- return false;
- }
-
- // Compute the register number that corresponds to the entry in the vmap (vmap_offset, computed
- // by IsInContext above). If the kind is floating point then the result will be a floating point
- // register number, otherwise it will be an integer register number.
- uint32_t ComputeRegister(uint32_t spill_mask, uint32_t vmap_offset, VRegKind kind) const {
- // Compute the register we need to load from the context.
- DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
- kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
- kind == kDoubleHiVReg || kind == kImpreciseConstant);
- // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
- // are never promoted to floating point registers.
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t matches = 0;
- if (UNLIKELY(is_float)) {
- const uint8_t* table = table_;
- DecodeUnsignedLeb128(&table); // Skip size.
- while (DecodeUnsignedLeb128(&table) != kAdjustedFpMarker) {
- matches++;
- }
- matches++;
- }
- CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(POPCOUNT(spill_mask)));
- uint32_t spill_shifts = 0;
- while (matches != (vmap_offset + 1)) {
- DCHECK_NE(spill_mask, 0u);
- matches += spill_mask & 1; // Add 1 if the low bit is set
- spill_mask >>= 1;
- spill_shifts++;
- }
- spill_shifts--; // wind back one as we want the last match
- return spill_shifts;
- }
-
- private:
- const uint8_t* const table_;
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_VMAP_TABLE_H_
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 2d26fa1..284e554 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -49,13 +49,7 @@
if (m_name.compare("f") == 0) {
CHECK_REGS_CONTAIN_REFS(0x03U, true, 8); // v8: this
CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
- if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
- CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
- }
CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
- if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
- CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
- }
CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
// v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
// 0024: move-object v3, v2
@@ -68,15 +62,6 @@
CHECK_REGS_CONTAIN_REFS(0x14U, false, 2); // v2: y
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
- // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex)
- CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 4, 2, 1);
- // v8: this, v4: x[1], v2: y, v1: x (dead v0: ex)
- CHECK_REGS_CONTAIN_REFS(0x1eU, true, 8, 4, 2, 1);
- // v4 is removed from the root set because there is a "merge" operation.
- // See 0016: if-nez v2, 0020.
- CHECK_REGS_CONTAIN_REFS(0x20U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- }
CHECK_REGS_CONTAIN_REFS(0x22U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
diff --git a/test/005-annotations/expected.txt b/test/005-annotations/expected.txt
index 3d9fd8b..ee5b0c7 100644
--- a/test/005-annotations/expected.txt
+++ b/test/005-annotations/expected.txt
@@ -93,6 +93,7 @@
--> nombre is 'fubar'
SimplyNoted.get(AnnoSimpleType) = @android.test.anno.AnnoSimpleType()
+SimplyNoted.get(AnnoSimpleTypeInvis) = null
SubNoted.get(AnnoSimpleType) = @android.test.anno.AnnoSimpleType()
Package annotations:
diff --git a/test/005-annotations/src/android/test/anno/TestAnnotations.java b/test/005-annotations/src/android/test/anno/TestAnnotations.java
index bc89f16..d36d43e 100644
--- a/test/005-annotations/src/android/test/anno/TestAnnotations.java
+++ b/test/005-annotations/src/android/test/anno/TestAnnotations.java
@@ -185,6 +185,9 @@
// this is expected to be non-null
Annotation anno = SimplyNoted.class.getAnnotation(AnnoSimpleType.class);
System.out.println("SimplyNoted.get(AnnoSimpleType) = " + anno);
+ // this is expected to be null
+ anno = SimplyNoted.class.getAnnotation(AnnoSimpleTypeInvis.class);
+ System.out.println("SimplyNoted.get(AnnoSimpleTypeInvis) = " + anno);
// this is non-null if the @Inherited tag is present
anno = SubNoted.class.getAnnotation(AnnoSimpleType.class);
System.out.println("SubNoted.get(AnnoSimpleType) = " + anno);
diff --git a/test/031-class-attributes/src/ClassAttrs.java b/test/031-class-attributes/src/ClassAttrs.java
index c2e41c5..38bd525 100644
--- a/test/031-class-attributes/src/ClassAttrs.java
+++ b/test/031-class-attributes/src/ClassAttrs.java
@@ -1,6 +1,7 @@
import otherpackage.OtherPackageClass;
import java.io.Serializable;
+import java.lang.reflect.AbstractMethod;
import java.lang.reflect.AccessibleObject;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
@@ -221,8 +222,11 @@
public static String getSignatureAttribute(Object obj) {
Method method;
try {
- Class c = Class.forName("libcore.reflect.AnnotationAccess");
- method = c.getDeclaredMethod("getSignature", java.lang.reflect.AnnotatedElement.class);
+ Class c = obj.getClass();
+ if (c == Method.class || c == Constructor.class) {
+ c = AbstractMethod.class;
+ }
+ method = c.getDeclaredMethod("getSignatureAttribute");
method.setAccessible(true);
} catch (Exception ex) {
ex.printStackTrace();
@@ -230,7 +234,7 @@
}
try {
- return (String) method.invoke(null, obj);
+ return (String) method.invoke(obj);
} catch (IllegalAccessException ex) {
throw new RuntimeException(ex);
} catch (InvocationTargetException ex) {
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
index 1142d49..6514334 100644
--- a/test/570-checker-osr/src/Main.java
+++ b/test/570-checker-osr/src/Main.java
@@ -16,8 +16,28 @@
public class Main {
public static void main(String[] args) {
- new SubMain();
System.loadLibrary(args[0]);
+ Thread testThread = new Thread() {
+ public void run() {
+ performTest();
+ }
+ };
+ testThread.start();
+ try {
+ testThread.join(20 * 1000); // 20s timeout.
+ } catch (InterruptedException ie) {
+ System.out.println("Interrupted.");
+ System.exit(1);
+ }
+ Thread.State state = testThread.getState();
+ if (state != Thread.State.TERMINATED) {
+ System.out.println("Test timed out, current state: " + state);
+ System.exit(1);
+ }
+ }
+
+ public static void performTest() {
+ new SubMain();
if ($noinline$returnInt() != 53) {
throw new Error("Unexpected return value");
}
diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py
index 6ff19d5..520c4ae 100644
--- a/tools/checker/match/file.py
+++ b/tools/checker/match/file.py
@@ -172,8 +172,8 @@
# match a check group against the first output group of the same name.
c1Pass = c1File.findPass(testCase.name)
if c1Pass is None:
- Logger.fail("Test case \"{}\" not found in the CFG file".format(testCase.name),
- testCase.fileName, testCase.startLineNo)
+ Logger.fail("Test case not found in the CFG file",
+ testCase.fileName, testCase.startLineNo, testCase.name)
Logger.startTest(testCase.name)
try:
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 2533ce2..38b6ea6 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -267,6 +267,24 @@
"libcore.util.NativeAllocationRegistryTest#testNullArguments"]
},
{
+ description: "libnativehelper_compat_libc++.so not found by dlopen on ARM64",
+ result: EXEC_FAILED,
+ modes: [device],
+ bug: 28082914,
+ names: ["libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited",
+ "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull",
+ "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups",
+ "libcore.java.lang.ThreadTest#testGetStackTrace",
+ "libcore.java.lang.ThreadTest#testJavaContextClassLoader",
+ "libcore.java.lang.ThreadTest#testLeakingStartedThreads",
+ "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads",
+ "libcore.java.lang.ThreadTest#testNativeThreadNames",
+ "libcore.java.lang.ThreadTest#testThreadInterrupted",
+ "libcore.java.lang.ThreadTest#testThreadSleep",
+ "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments",
+ "libcore.java.lang.ThreadTest#testThreadWakeup"]
+},
+{
description: "Only work with --mode=activity",
result: EXEC_FAILED,
names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ]