Revert "Revert "Write conflict tables in image""
Added test.
Bug: 27906566
This reverts commit 8e2478d23e89a7022c93ddc608dcbba7b29b91e6.
(cherry picked from commit cdca476bf3394ce9d97a369e84e701b427009318)
Change-Id: Ia94fc7acc7ae3879921c4e8d0a02b156037ac286
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index f75a252..bf29e1c 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -180,6 +180,7 @@
isa,
instruction_set_features_.get(),
/* boot_image */ true,
+ /* app_image */ false,
GetImageClasses(),
GetCompiledClasses(),
GetCompiledMethods(),
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 9e0c22c..6863f42 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -36,6 +36,7 @@
/* instruction_set_ */ kNone,
/* instruction_set_features */ nullptr,
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index be82956..c4cd7b9 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -341,6 +341,7 @@
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool boot_image,
+ bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -363,6 +364,7 @@
compiled_methods_(MethodTable::key_compare()),
non_relative_linker_patch_count_(0u),
boot_image_(boot_image),
+ app_image_(app_image),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
methods_to_compile_(compiled_methods),
@@ -2440,9 +2442,12 @@
context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count);
}
-class InitializeArrayClassVisitor : public ClassVisitor {
+class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
public:
virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
+ return true;
+ }
if (klass->IsArrayClass()) {
StackHandleScope<1> hs(Thread::Current());
Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(),
@@ -2450,6 +2455,10 @@
true,
true);
}
+ // Create the conflict tables.
+ if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) {
+ Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass);
+ }
return true;
}
};
@@ -2462,13 +2471,15 @@
CHECK(dex_file != nullptr);
InitializeClasses(class_loader, *dex_file, dex_files, timings);
}
- {
+ if (boot_image_ || app_image_) {
// Make sure that we call EnsureIntiailized on all the array classes to call
// SetVerificationAttempted so that the access flags are set. If we do not do this they get
// changed at runtime resulting in more dirty image pages.
+ // Also create conflict tables.
+ // Only useful if we are compiling an image (image_classes_ is not null).
ScopedObjectAccess soa(Thread::Current());
- InitializeArrayClassVisitor visitor;
- Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+ InitializeArrayClassesAndCreateConflictTablesVisitor visitor;
+ Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor);
}
if (IsBootImage()) {
// Prune garbage objects created during aborted transactions.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index d63dffa..19a1ecc 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -92,6 +92,7 @@
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool boot_image,
+ bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -652,6 +653,7 @@
size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_);
const bool boot_image_;
+ const bool app_image_;
// If image_ is true, specifies the classes that will be included in the image.
// Note if image_classes_ is null, all classes are included in the image.
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 8bb462c..00ff522 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -653,8 +653,7 @@
for (ImageInfo& image_info : image_infos_) {
ImageSection unused_sections[ImageHeader::kSectionCount];
const size_t length = RoundUp(
- image_info.CreateImageSections(target_ptr_size_, unused_sections),
- kPageSize);
+ image_info.CreateImageSections(unused_sections), kPageSize);
std::string error_msg;
image_info.image_.reset(MemMap::MapAnonymous("image writer image",
@@ -1214,6 +1213,20 @@
AssignMethodOffset(&m, type, oat_index);
}
(any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
+
+ // Assign offsets for all runtime methods in the IMT since these may hold conflict tables
+ // live.
+ if (as_klass->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_);
+ DCHECK(imt_method != nullptr);
+ if (imt_method->IsRuntimeMethod() &&
+ !IsInBootImage(imt_method) &&
+ !NativeRelocationAssigned(imt_method)) {
+ AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index);
+ }
+ }
+ }
}
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
@@ -1237,13 +1250,37 @@
}
}
+bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
+ return native_object_relocations_.find(ptr) != native_object_relocations_.end();
+}
+
+void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) {
+ // No offset, or already assigned.
+ if (table == nullptr || NativeRelocationAssigned(table)) {
+ return;
+ }
+ CHECK(!IsInBootImage(table));
+ // If the method is a conflict method we also want to assign the conflict table offset.
+ ImageInfo& image_info = GetImageInfo(oat_index);
+ const size_t size = table->ComputeSize(target_ptr_size_);
+ native_object_relocations_.emplace(
+ table,
+ NativeObjectRelocation {
+ oat_index,
+ image_info.bin_slot_sizes_[kBinIMTConflictTable],
+ kNativeObjectRelocationTypeIMTConflictTable});
+ image_info.bin_slot_sizes_[kBinIMTConflictTable] += size;
+}
+
void ImageWriter::AssignMethodOffset(ArtMethod* method,
NativeObjectRelocationType type,
size_t oat_index) {
DCHECK(!IsInBootImage(method));
- auto it = native_object_relocations_.find(method);
- CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned "
+ CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned "
<< PrettyMethod(method);
+ if (method->IsRuntimeMethod()) {
+ TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index);
+ }
ImageInfo& image_info = GetImageInfo(oat_index);
size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type });
@@ -1292,8 +1329,7 @@
// know where image_roots is going to end up
image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
- heap->VisitObjects(WalkFieldsCallback, this);
+ const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
// Write the image runtime methods.
image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod();
image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
@@ -1303,31 +1339,19 @@
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
- // Add room for fake length prefixed array for holding the image methods.
- const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean;
- auto it = native_object_relocations_.find(&image_method_array_);
- CHECK(it == native_object_relocations_.end());
- ImageInfo& default_image_info = GetImageInfo(GetDefaultOatIndex());
- size_t& offset =
- default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
- if (!compile_app_image_) {
- native_object_relocations_.emplace(&image_method_array_,
- NativeObjectRelocation { GetDefaultOatIndex(), offset, image_method_type });
- }
- size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
- const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
- 0, ArtMethod::Size(target_ptr_size_), method_alignment);
- CHECK_ALIGNED_PARAM(array_size, method_alignment);
- offset += array_size;
+ // Visit image methods first to have the main runtime methods in the first image.
for (auto* m : image_methods_) {
CHECK(m != nullptr);
CHECK(m->IsRuntimeMethod());
DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image";
if (!IsInBootImage(m)) {
- AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, GetDefaultOatIndex());
+ AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex());
}
}
+
+ // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
+ heap->VisitObjects(WalkFieldsCallback, this);
+
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
@@ -1346,15 +1370,22 @@
for (ImageInfo& image_info : image_infos_) {
size_t bin_offset = image_objects_offset_begin_;
for (size_t i = 0; i != kBinSize; ++i) {
+ switch (i) {
+ case kBinArtMethodClean:
+ case kBinArtMethodDirty: {
+ bin_offset = RoundUp(bin_offset, method_alignment);
+ break;
+ }
+ case kBinIMTConflictTable: {
+ bin_offset = RoundUp(bin_offset, target_ptr_size_);
+ break;
+ }
+ default: {
+ // Normal alignment.
+ }
+ }
image_info.bin_slot_offsets_[i] = bin_offset;
bin_offset += image_info.bin_slot_sizes_[i];
- if (i == kBinArtField) {
- static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
- static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
- DCHECK_ALIGNED(bin_offset, 4u);
- DCHECK(method_alignment == 4u || method_alignment == 8u);
- bin_offset = RoundUp(bin_offset, method_alignment);
- }
}
// NOTE: There may be additional padding between the bin slots and the intern table.
DCHECK_EQ(image_info.image_end_,
@@ -1367,9 +1398,7 @@
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
ImageSection unused_sections[ImageHeader::kSectionCount];
- image_info.image_size_ = RoundUp(
- image_info.CreateImageSections(target_ptr_size_, unused_sections),
- kPageSize);
+ image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
// There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -1396,42 +1425,52 @@
// Note that image_info.image_end_ is left at end of used mirror object section.
}
-size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size,
- ImageSection* out_sections) const {
+size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
DCHECK(out_sections != nullptr);
+
+ // Do not round up any sections here that are represented by the bins since it will break
+ // offsets.
+
// Objects section
- auto* objects_section = &out_sections[ImageHeader::kSectionObjects];
+ ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects];
*objects_section = ImageSection(0u, image_end_);
- size_t cur_pos = objects_section->End();
+
// Add field section.
- auto* field_section = &out_sections[ImageHeader::kSectionArtFields];
- *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
+ ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields];
+ *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]);
CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
- cur_pos = field_section->End();
- // Round up to the alignment the required by the method section.
- cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size));
+
// Add method section.
- auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
- *methods_section = ImageSection(cur_pos,
- bin_slot_sizes_[kBinArtMethodClean] +
- bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
- cur_pos = methods_section->End();
+ ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
+ *methods_section = ImageSection(
+ bin_slot_offsets_[kBinArtMethodClean],
+ bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]);
+
+ // Conflict tables section.
+ ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
+ *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable],
+ bin_slot_sizes_[kBinIMTConflictTable]);
+
+ // Runtime methods section.
+ ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods];
+ *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod],
+ bin_slot_sizes_[kBinRuntimeMethod]);
+
// Add dex cache arrays section.
- auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
- *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
- CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
- cur_pos = dex_cache_arrays_section->End();
+ ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
+ *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray],
+ bin_slot_sizes_[kBinDexCacheArray]);
+
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
- cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
+ size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
// Calculate the size of the interned strings.
- auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
+ ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
*interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
cur_pos = interned_strings_section->End();
// Round up to the alignment the class table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the class table section.
- auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
+ ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
*class_table_section = ImageSection(cur_pos, class_table_bytes_);
cur_pos = class_table_section->End();
// Image end goes right before the start of the image bitmap.
@@ -1446,7 +1485,7 @@
// Create the image sections.
ImageSection sections[ImageHeader::kSectionCount];
- const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections);
+ const size_t image_end = image_info.CreateImageSections(sections);
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
@@ -1531,8 +1570,20 @@
ImageWriter* const image_writer_;
};
+void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
+ const size_t count = orig->NumEntries(target_ptr_size_);
+ for (size_t i = 0; i < count; ++i) {
+ ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
+ ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
+ copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method));
+ copy->SetImplementationMethod(i,
+ target_ptr_size_,
+ NativeLocationInImage(implementation_method));
+ }
+}
+
void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
- ImageInfo& image_info = GetImageInfo(oat_index);
+ const ImageInfo& image_info = GetImageInfo(oat_index);
// Copy ArtFields and methods to their locations and update the array for convenience.
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
@@ -1550,6 +1601,7 @@
GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
break;
}
+ case kNativeObjectRelocationTypeRuntimeMethod:
case kNativeObjectRelocationTypeArtMethodClean:
case kNativeObjectRelocationTypeArtMethodDirty: {
CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
@@ -1575,26 +1627,22 @@
case kNativeObjectRelocationTypeDexCacheArray:
// Nothing to copy here, everything is done in FixupDexCache().
break;
+ case kNativeObjectRelocationTypeIMTConflictTable: {
+ auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
+ CopyAndFixupImtConflictTable(
+ orig_table,
+ new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
+ break;
+ }
}
}
// Fixup the image method roots.
auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
- const ImageSection& methods_section = image_header->GetMethodsSection();
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
- // Only place runtime methods in the image of the default oat file.
- if (method->IsRuntimeMethod() && oat_index != GetDefaultOatIndex()) {
- continue;
- }
if (!IsInBootImage(method)) {
- auto it = native_object_relocations_.find(method);
- CHECK(it != native_object_relocations_.end()) << "No forwarding for " << PrettyMethod(method);
- NativeObjectRelocation& relocation = it->second;
- CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in "
- << methods_section;
- CHECK(relocation.IsArtMethodRelocation()) << relocation.type;
- method = reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset);
+ method = NativeLocationInImage(method);
}
image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), method);
}
@@ -2057,24 +2105,28 @@
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
- if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
- } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
- orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
- } else if (UNLIKELY(orig->IsRuntimeMethod())) {
- bool found_one = false;
- for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
- auto idx = static_cast<Runtime::CalleeSaveType>(i);
- if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
- found_one = true;
- break;
+ if (orig->IsRuntimeMethod()) {
+ ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_);
+ if (orig_table != nullptr) {
+ // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
+ GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
+ copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_);
+ } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
+ GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
+ } else {
+ bool found_one = false;
+ for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
+ auto idx = static_cast<Runtime::CalleeSaveType>(i);
+ if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
+ found_one = true;
+ break;
+ }
}
+ CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
+ CHECK(copy->IsRuntimeMethod());
}
- CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
- CHECK(copy->IsRuntimeMethod());
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
@@ -2141,6 +2193,10 @@
return kBinArtMethodDirty;
case kNativeObjectRelocationTypeDexCacheArray:
return kBinDexCacheArray;
+ case kNativeObjectRelocationTypeRuntimeMethod:
+ return kBinRuntimeMethod;
+ case kNativeObjectRelocationTypeIMTConflictTable:
+ return kBinIMTConflictTable;
}
UNREACHABLE();
}
@@ -2242,7 +2298,6 @@
compile_app_image_(compile_app_image),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
image_infos_(oat_filenames.size()),
- image_method_array_(ImageHeader::kImageMethodsCount),
dirty_methods_(0u),
clean_methods_(0u),
image_storage_mode_(image_storage_mode),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 0cb6aea..51976c5 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -169,6 +169,10 @@
// ArtMethods may be dirty if the class has native methods or a declaring class that isn't
// initialized.
kBinArtMethodDirty,
+ // Conflict tables (clean).
+ kBinIMTConflictTable,
+ // Runtime methods (always clean, do not have a length prefix array).
+ kBinRuntimeMethod,
// Dex cache arrays have a special slot for PC-relative addressing. Since they are
// huge, and as such their dirtiness is not important for the clean/dirty separation,
// we arbitrarily keep them at the end of the native data.
@@ -186,6 +190,8 @@
kNativeObjectRelocationTypeArtMethodArrayClean,
kNativeObjectRelocationTypeArtMethodDirty,
kNativeObjectRelocationTypeArtMethodArrayDirty,
+ kNativeObjectRelocationTypeRuntimeMethod,
+ kNativeObjectRelocationTypeIMTConflictTable,
kNativeObjectRelocationTypeDexCacheArray,
};
friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
@@ -240,7 +246,7 @@
// Create the image sections into the out sections variable, returns the size of the image
// excluding the bitmap.
- size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const;
+ size_t CreateImageSections(ImageSection* out_sections) const;
std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
@@ -395,6 +401,8 @@
void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
@@ -425,6 +433,11 @@
size_t oat_index)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
+ // relocation.
+ void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Return true if klass is loaded by the boot class loader but not in the boot image.
bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -481,6 +494,9 @@
// remove duplicates in the multi image and app image case.
mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return true if there already exists a native allocation for an object.
+ bool NativeRelocationAssigned(void* ptr) const;
+
const CompilerDriver& compiler_driver_;
// Beginning target image address for the first image.
@@ -517,16 +533,14 @@
bool IsArtMethodRelocation() const {
return type == kNativeObjectRelocationTypeArtMethodClean ||
- type == kNativeObjectRelocationTypeArtMethodDirty;
+ type == kNativeObjectRelocationTypeArtMethodDirty ||
+ type == kNativeObjectRelocationTypeRuntimeMethod;
}
};
std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
// Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
- // Fake length prefixed array for image methods. This array does not contain the actual
- // ArtMethods. We only use it for the header and relocation addresses.
- LengthPrefixedArray<ArtMethod> image_method_array_;
// Counters for measurements, used for logging only.
uint64_t dirty_methods_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 5de9842..c2d7ff7 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -155,7 +155,8 @@
Compiler::kOptimizing,
instruction_set,
instruction_set_features_.get(),
- /* image */ false,
+ /* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index c07de79..ec69107 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -51,6 +51,7 @@
instruction_set,
/* instruction_set_features*/ nullptr,
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 73b16d5..5b19284 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -112,6 +112,7 @@
insn_set,
insn_features_.get(),
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 370583e..be38336 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1526,6 +1526,7 @@
instruction_set_,
instruction_set_features_.get(),
IsBootImage(),
+ IsAppImage(),
image_classes_.release(),
compiled_classes_.release(),
/* compiled_methods */ nullptr,
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 3c6a05d..6ecfc74 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1416,11 +1416,10 @@
indent_os << "\n";
// TODO: Dump fields.
// Dump methods after.
- const auto& methods_section = image_header_.GetMethodsSection();
DumpArtMethodVisitor visitor(this);
- methods_section.VisitPackedArtMethods(&visitor,
- image_space_.Begin(),
- image_header_.GetPointerSize());
+ image_header_.VisitPackedArtMethods(&visitor,
+ image_space_.Begin(),
+ image_header_.GetPointerSize());
// Dump the large objects separately.
heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
indent_os << "\n";
@@ -1779,6 +1778,7 @@
DCHECK(method != nullptr);
const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
+ const size_t pointer_size = image_header_.GetPointerSize();
OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
@@ -1792,13 +1792,16 @@
image_header_.GetPointerSize())) {
indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin);
}
- } else if (method->IsAbstract() ||
- method->IsCalleeSaveMethod() ||
- method->IsResolutionMethod() ||
- (method == Runtime::Current()->GetImtConflictMethod()) ||
- method->IsImtUnimplementedMethod() ||
- method->IsClassInitializer()) {
+ } else if (method->IsAbstract() || method->IsClassInitializer()) {
// Don't print information for these.
+ } else if (method->IsRuntimeMethod()) {
+ ImtConflictTable* table = method->GetImtConflictTable(image_header_.GetPointerSize());
+ if (table != nullptr) {
+ indent_os << "IMT conflict table " << table << " method: ";
+ for (size_t i = 0, count = table->NumEntries(pointer_size); i < count; ++i) {
+ indent_os << PrettyMethod(table->GetImplementationMethod(i, pointer_size)) << " ";
+ }
+ }
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 93e40af..0a7ffda 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -472,8 +472,7 @@
void PatchOat::PatchArtFields(const ImageHeader* image_header) {
PatchOatArtFieldVisitor visitor(this);
- const auto& section = image_header->GetImageSection(ImageHeader::kSectionArtFields);
- section.VisitPackedArtFields(&visitor, heap_->Begin());
+ image_header->VisitPackedArtFields(&visitor, heap_->Begin());
}
class PatchOatArtMethodVisitor : public ArtMethodVisitor {
@@ -490,10 +489,20 @@
};
void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
- const auto& section = image_header->GetMethodsSection();
const size_t pointer_size = InstructionSetPointerSize(isa_);
PatchOatArtMethodVisitor visitor(this);
- section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
+ image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
+}
+
+void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
+ const size_t pointer_size = InstructionSetPointerSize(isa_);
+ // We can safely walk target image since the conflict tables are independent.
+ image_header->VisitPackedImtConflictTables(
+ [this](ArtMethod* method) {
+ return RelocatedAddressOfPointer(method);
+ },
+ image_->Begin(),
+ pointer_size);
}
class FixupRootVisitor : public RootVisitor {
@@ -627,6 +636,7 @@
PatchArtFields(image_header);
PatchArtMethods(image_header);
+ PatchImtConflictTables(image_header);
PatchInternedStrings(image_header);
PatchClassTable(image_header);
// Patch dex file int/long arrays which point to ArtFields.
@@ -725,6 +735,7 @@
RelocatedAddressOfPointer(object->GetDexCacheResolvedTypes(pointer_size)), pointer_size);
copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer(
object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size);
+ // No special handling for IMT conflict table since all pointers are moved by the same offset.
copy->SetEntryPointFromJniPtrSize(RelocatedAddressOfPointer(
object->GetEntryPointFromJniPtrSize(pointer_size)), pointer_size);
}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 510ff1e..3ef837f 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -117,6 +117,8 @@
bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PatchImtConflictTables(const ImageHeader* image_header)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
SHARED_REQUIRES(Locks::mutator_lock_);
void PatchClassTable(const ImageHeader* image_header)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 75d9073..3cdff55 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -2010,14 +2010,14 @@
// that will create it: the runtime stub expects to be called by compiled code.
LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
- static ImtConflictTable::Entry empty_entry = { nullptr, nullptr };
- ImtConflictTable* empty_conflict_table = reinterpret_cast<ImtConflictTable*>(&empty_entry);
+ ImtConflictTable* empty_conflict_table =
+ Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
void* data = linear_alloc->Alloc(
self,
- ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table));
+ ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*)));
ImtConflictTable* new_table = new (data) ImtConflictTable(
- empty_conflict_table, inf_contains, contains_amethod);
- conflict_method->SetImtConflictTable(new_table);
+ empty_conflict_table, inf_contains, contains_amethod, sizeof(void*));
+ conflict_method->SetImtConflictTable(new_table, sizeof(void*));
size_t result =
Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ae60447..d239b42 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -41,6 +41,7 @@
namespace mirror {
class Array;
class Class;
+class IfTable;
class PointerArray;
} // namespace mirror
@@ -50,97 +51,151 @@
// with the last entry being null to make an assembly implementation of a lookup
// faster.
class ImtConflictTable {
+ enum MethodIndex {
+ kMethodInterface,
+ kMethodImplementation,
+ kMethodCount, // Number of elements in enum.
+ };
+
public:
// Build a new table copying `other` and adding the new entry formed of
// the pair { `interface_method`, `implementation_method` }
ImtConflictTable(ImtConflictTable* other,
ArtMethod* interface_method,
- ArtMethod* implementation_method) {
- size_t index = 0;
- while (other->entries_[index].interface_method != nullptr) {
- entries_[index] = other->entries_[index];
- index++;
+ ArtMethod* implementation_method,
+ size_t pointer_size) {
+ const size_t count = other->NumEntries(pointer_size);
+ for (size_t i = 0; i < count; ++i) {
+ SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size));
+ SetImplementationMethod(i, pointer_size, other->GetImplementationMethod(i, pointer_size));
}
- entries_[index].interface_method = interface_method;
- entries_[index].implementation_method = implementation_method;
+ SetInterfaceMethod(count, pointer_size, interface_method);
+ SetImplementationMethod(count, pointer_size, implementation_method);
// Add the null marker.
- entries_[index + 1].interface_method = nullptr;
- entries_[index + 1].implementation_method = nullptr;
+ SetInterfaceMethod(count + 1, pointer_size, nullptr);
+ SetImplementationMethod(count + 1, pointer_size, nullptr);
}
// num_entries excludes the header.
- explicit ImtConflictTable(size_t num_entries) {
- entries_[num_entries].interface_method = nullptr;
- entries_[num_entries].implementation_method = nullptr;
+ ImtConflictTable(size_t num_entries, size_t pointer_size) {
+ SetInterfaceMethod(num_entries, pointer_size, nullptr);
+ SetImplementationMethod(num_entries, pointer_size, nullptr);
}
// Set an entry at an index.
- void SetInterfaceMethod(size_t index, ArtMethod* method) {
- entries_[index].interface_method = method;
+ void SetInterfaceMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method);
}
- void SetImplementationMethod(size_t index, ArtMethod* method) {
- entries_[index].implementation_method = method;
+ void SetImplementationMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method);
}
- ArtMethod* GetInterfaceMethod(size_t index) const {
- return entries_[index].interface_method;
+ ArtMethod* GetInterfaceMethod(size_t index, size_t pointer_size) const {
+ return GetMethod(index * kMethodCount + kMethodInterface, pointer_size);
}
- ArtMethod* GetImplementationMethod(size_t index) const {
- return entries_[index].implementation_method;
+ ArtMethod* GetImplementationMethod(size_t index, size_t pointer_size) const {
+ return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
+ }
+
+ // Visit all of the entries.
+ // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
+ // and also returns one. The order is <interface, implementation>.
+ template<typename Visitor>
+ void Visit(const Visitor& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS {
+ uint32_t table_index = 0;
+ for (;;) {
+ ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size);
+ if (interface_method == nullptr) {
+ break;
+ }
+ ArtMethod* implementation_method = GetImplementationMethod(table_index, pointer_size);
+ auto input = std::make_pair(interface_method, implementation_method);
+ std::pair<ArtMethod*, ArtMethod*> updated = visitor(input);
+ if (input.first != updated.first) {
+ SetInterfaceMethod(table_index, pointer_size, updated.first);
+ }
+ if (input.second != updated.second) {
+ SetImplementationMethod(table_index, pointer_size, updated.second);
+ }
+ ++table_index;
+ }
}
// Lookup the implementation ArtMethod associated to `interface_method`. Return null
// if not found.
- ArtMethod* Lookup(ArtMethod* interface_method) const {
+ ArtMethod* Lookup(ArtMethod* interface_method, size_t pointer_size) const {
uint32_t table_index = 0;
- ArtMethod* current_interface_method;
- while ((current_interface_method = entries_[table_index].interface_method) != nullptr) {
- if (current_interface_method == interface_method) {
- return entries_[table_index].implementation_method;
+ for (;;) {
+ ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size);
+ if (current_interface_method == nullptr) {
+ break;
}
- table_index++;
+ if (current_interface_method == interface_method) {
+ return GetImplementationMethod(table_index, pointer_size);
+ }
+ ++table_index;
}
return nullptr;
}
// Compute the number of entries in this table.
- size_t NumEntries() const {
+ size_t NumEntries(size_t pointer_size) const {
uint32_t table_index = 0;
- while (entries_[table_index].interface_method != nullptr) {
- table_index++;
+ while (GetInterfaceMethod(table_index, pointer_size) != nullptr) {
+ ++table_index;
}
return table_index;
}
// Compute the size in bytes taken by this table.
- size_t ComputeSize() const {
+ size_t ComputeSize(size_t pointer_size) const {
// Add the end marker.
- return ComputeSize(NumEntries());
+ return ComputeSize(NumEntries(pointer_size), pointer_size);
}
// Compute the size in bytes needed for copying the given `table` and add
// one more entry.
- static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table) {
- return table->ComputeSize() + sizeof(Entry);
+ static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, size_t pointer_size) {
+ return table->ComputeSize(pointer_size) + EntrySize(pointer_size);
}
// Compute size with a fixed number of entries.
- static size_t ComputeSize(size_t num_entries) {
- return (num_entries + 1) * sizeof(Entry); // Add one for null terminator.
+ static size_t ComputeSize(size_t num_entries, size_t pointer_size) {
+ return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator.
}
- struct Entry {
- ArtMethod* interface_method;
- ArtMethod* implementation_method;
- };
+ static size_t EntrySize(size_t pointer_size) {
+ return pointer_size * static_cast<size_t>(kMethodCount);
+ }
private:
+ ArtMethod* GetMethod(size_t index, size_t pointer_size) const {
+ if (pointer_size == 8) {
+ return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
+ } else {
+ DCHECK_EQ(pointer_size, 4u);
+ return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
+ }
+ }
+
+ void SetMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ if (pointer_size == 8) {
+ data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
+ } else {
+ DCHECK_EQ(pointer_size, 4u);
+ data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
+ }
+ }
+
// Array of entries that the assembly stubs will iterate over. Note that this is
// not fixed size, and we allocate data prior to calling the constructor
// of ImtConflictTable.
- Entry entries_[0];
+ union {
+ uint32_t data32_[0];
+ uint64_t data64_[0];
+ };
DISALLOW_COPY_AND_ASSIGN(ImtConflictTable);
};
@@ -382,7 +437,6 @@
// Find the method that this method overrides.
ArtMethod* FindOverriddenMethod(size_t pointer_size)
- REQUIRES(Roles::uninterruptible_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
@@ -448,8 +502,8 @@
return reinterpret_cast<ImtConflictTable*>(GetEntryPointFromJniPtrSize(pointer_size));
}
- ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table) {
- SetEntryPointFromJniPtrSize(table, sizeof(void*));
+ ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, size_t pointer_size) {
+ SetEntryPointFromJniPtrSize(table, pointer_size);
}
ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ddb254d..f918606 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -687,6 +687,9 @@
self->AssertNoPendingException();
}
+ // Create conflict tables that depend on the class linker.
+ runtime->FixupConflictTables();
+
FinishInit(self);
VLOG(startup) << "ClassLinker::InitFromCompiler exiting";
@@ -773,9 +776,13 @@
bool contains = false;
for (gc::space::ImageSpace* space : spaces) {
auto& header = space->GetImageHeader();
- auto& methods = header.GetMethodsSection();
- auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
- contains |= methods.Contains(offset);
+ size_t offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
+
+ const ImageSection& methods = header.GetMethodsSection();
+ contains = contains || methods.Contains(offset);
+
+ const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
+ contains = contains || runtime_methods.Contains(offset);
}
CHECK(contains) << m << " not found";
}
@@ -1438,29 +1445,14 @@
if (*out_forward_dex_cache_array) {
ScopedTrace timing("Fixup ArtMethod dex cache arrays");
FixupArtMethodArrayVisitor visitor(header);
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor,
- space->Begin(),
- sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
}
if (kVerifyArtMethodDeclaringClasses) {
ScopedTrace timing("Verify declaring classes");
ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
VerifyDeclaringClassVisitor visitor;
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor,
- space->Begin(),
- sizeof(void*));
- }
- if (kVerifyArtMethodDeclaringClasses) {
- ScopedTrace timing("Verify declaring classes");
- ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
- VerifyDeclaringClassVisitor visitor;
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor,
- space->Begin(),
- sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
}
return true;
}
@@ -1738,9 +1730,8 @@
// Set entry point to interpreter if in InterpretOnly mode.
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
- const ImageSection& methods = header.GetMethodsSection();
SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
- methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
+ header.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
}
ClassTable* class_table = nullptr;
@@ -1809,10 +1800,7 @@
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
VerifyClassInTableArtMethodVisitor visitor2(class_table);
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor2,
- space->Begin(),
- sizeof(void*));
+ header.VisitPackedArtMethods(&visitor2, space->Begin(), sizeof(void*));
}
VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
return true;
@@ -5988,14 +5976,16 @@
// Allocate a new table. Note that we will leak this table at the next conflict,
// but that's a tradeoff compared to making the table fixed size.
void* data = linear_alloc->Alloc(
- Thread::Current(), ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table));
+ Thread::Current(), ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table,
+ image_pointer_size_));
if (data == nullptr) {
LOG(ERROR) << "Failed to allocate conflict table";
return conflict_method;
}
ImtConflictTable* new_table = new (data) ImtConflictTable(current_table,
interface_method,
- method);
+ method,
+ image_pointer_size_);
// Do a fence to ensure threads see the data in the table before it is assigned
// to the conflict method.
@@ -6003,7 +5993,7 @@
// memory from the LinearAlloc, but that's a tradeoff compared to using
// atomic operations.
QuasiAtomic::ThreadFenceRelease();
- new_conflict_method->SetImtConflictTable(new_table);
+ new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_);
return new_conflict_method;
}
@@ -6035,18 +6025,53 @@
}
}
+void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) {
+ DCHECK(klass->ShouldHaveEmbeddedImtAndVTable()) << PrettyClass(klass);
+ DCHECK(!klass->IsTemp()) << PrettyClass(klass);
+ ArtMethod* imt[mirror::Class::kImtSize];
+ Runtime* const runtime = Runtime::Current();
+ ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
+ ArtMethod* const conflict_method = runtime->GetImtConflictMethod();
+ std::fill_n(imt, arraysize(imt), unimplemented_method);
+ if (klass->GetIfTable() != nullptr) {
+ FillIMTFromIfTable(klass->GetIfTable(),
+ unimplemented_method,
+ conflict_method,
+ klass,
+ true,
+ false,
+ &imt[0]);
+ }
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_);
+ }
+}
+
static inline uint32_t GetIMTIndex(ArtMethod* interface_method)
SHARED_REQUIRES(Locks::mutator_lock_) {
return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
}
-void ClassLinker::ConstructIMTFromIfTable(mirror::IfTable* if_table,
- ArtMethod* unimplemented_method,
- ArtMethod* imt_conflict_method,
- mirror::Class* klass,
- bool create_conflict_tables,
- bool ignore_copied_methods,
- ArtMethod** out_imt) {
+ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
+ LinearAlloc* linear_alloc,
+ size_t image_pointer_size) {
+ void* data = linear_alloc->Alloc(Thread::Current(),
+ ImtConflictTable::ComputeSize(count,
+ image_pointer_size));
+ return (data != nullptr) ? new (data) ImtConflictTable(count, image_pointer_size) : nullptr;
+}
+
+ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc) {
+ return CreateImtConflictTable(count, linear_alloc, image_pointer_size_);
+}
+
+void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
+ ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ mirror::Class* klass,
+ bool create_conflict_tables,
+ bool ignore_copied_methods,
+ ArtMethod** imt) {
uint32_t conflict_counts[mirror::Class::kImtSize] = {};
for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
mirror::Class* interface = if_table->GetInterface(i);
@@ -6089,7 +6114,7 @@
SetIMTRef(unimplemented_method,
imt_conflict_method,
implementation_method,
- /*out*/&out_imt[imt_index]);
+ /*out*/&imt[imt_index]);
}
}
@@ -6098,24 +6123,22 @@
LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
size_t conflicts = conflict_counts[i];
- if (conflicts > 1) {
- void* data = linear_alloc->Alloc(Thread::Current(),
- ImtConflictTable::ComputeSize(conflicts));
- if (data != nullptr) {
- ImtConflictTable* new_table = new (data) ImtConflictTable(conflicts);
- ArtMethod* new_conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
- new_conflict_method->SetImtConflictTable(new_table);
- out_imt[i] = new_conflict_method;
+ if (imt[i] == imt_conflict_method) {
+ ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc);
+ if (new_table != nullptr) {
+ ArtMethod* new_conflict_method =
+ Runtime::Current()->CreateImtConflictMethod(linear_alloc);
+ new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_);
+ imt[i] = new_conflict_method;
} else {
LOG(ERROR) << "Failed to allocate conflict table";
- out_imt[i] = imt_conflict_method;
+ imt[i] = imt_conflict_method;
}
} else {
- DCHECK_NE(out_imt[i], imt_conflict_method);
+ DCHECK_NE(imt[i], imt_conflict_method);
}
}
- // No imt in the super class, need to reconstruct from the iftable.
for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
mirror::Class* interface = if_table->GetInterface(i);
const size_t method_array_count = if_table->GetMethodArrayCount(i);
@@ -6133,18 +6156,15 @@
DCHECK(implementation_method != nullptr);
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
const uint32_t imt_index = GetIMTIndex(interface_method);
- if (conflict_counts[imt_index] <= 1) {
- continue; // Only care about the conflicts.
+ if (!imt[imt_index]->IsRuntimeMethod() ||
+ imt[imt_index] == unimplemented_method ||
+ imt[imt_index] == imt_conflict_method) {
+ continue;
}
- DCHECK_NE(out_imt[imt_index], unimplemented_method) << PrettyMethod(out_imt[imt_index]);
- DCHECK_NE(out_imt[imt_index], imt_conflict_method) << PrettyMethod(out_imt[imt_index]);
- DCHECK(out_imt[imt_index]->IsRuntimeMethod()) << PrettyMethod(out_imt[imt_index]);
- ImtConflictTable* table = out_imt[imt_index]->GetImtConflictTable(image_pointer_size_);
- // Add to the end of the conflict table.
- const size_t current_count = table->NumEntries();
- CHECK_LT(current_count, conflict_counts[imt_index]);
- table->SetInterfaceMethod(current_count, interface_method);
- table->SetImplementationMethod(current_count, implementation_method);
+ ImtConflictTable* table = imt[imt_index]->GetImtConflictTable(image_pointer_size_);
+ const size_t num_entries = table->NumEntries(image_pointer_size_);
+ table->SetInterfaceMethod(num_entries, image_pointer_size_, interface_method);
+ table->SetImplementationMethod(num_entries, image_pointer_size_, implementation_method);
}
}
}
@@ -6388,25 +6408,25 @@
void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
- ArtMethod** out_imt) {
+ ArtMethod** imt) {
DCHECK(klass->HasSuperClass());
mirror::Class* super_class = klass->GetSuperClass();
if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- out_imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
+ imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
}
} else {
// No imt in the super class, need to reconstruct from the iftable.
mirror::IfTable* if_table = super_class->GetIfTable();
if (if_table != nullptr) {
// Ignore copied methods since we will handle these in LinkInterfaceMethods.
- ConstructIMTFromIfTable(if_table,
- unimplemented_method,
- imt_conflict_method,
- klass.Get(),
- /*create_conflict_table*/false,
- /*ignore_copied_methods*/true,
- out_imt);
+ FillIMTFromIfTable(if_table,
+ unimplemented_method,
+ imt_conflict_method,
+ klass.Get(),
+ /*create_conflict_table*/false,
+ /*ignore_copied_methods*/true,
+ /*out*/imt);
}
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 2743921..ece171c 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -53,6 +53,7 @@
class StackTraceElement;
} // namespace mirror
+class ImtConflictTable;
template<class T> class Handle;
template<class T> class MutableHandle;
class InternTable;
@@ -617,6 +618,19 @@
bool force_new_conflict_method)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Create a conflict table with a specified capacity.
+ ImtConflictTable* CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc);
+
+ // Static version for when the class linker is not yet created.
+ static ImtConflictTable* CreateImtConflictTable(size_t count,
+ LinearAlloc* linear_alloc,
+ size_t pointer_size);
+
+
+ // Create the IMT and conflict tables for a class.
+ void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
+
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
@@ -1073,18 +1087,18 @@
ArtMethod* current_method,
/*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_);
- void ConstructIMTFromIfTable(mirror::IfTable* if_table,
- ArtMethod* unimplemented_method,
- ArtMethod* imt_conflict_method,
- mirror::Class* klass,
- bool create_conflict_tables,
- bool ignore_copied_methods,
- ArtMethod** out_imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillIMTFromIfTable(mirror::IfTable* if_table,
+ ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ mirror::Class* klass,
+ bool create_conflict_tables,
+ bool ignore_copied_methods,
+ ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
void FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
- ArtMethod** out_imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 278c4a3..e9cdbb7 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2174,7 +2174,8 @@
imt_index % mirror::Class::kImtSize, sizeof(void*));
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
- method = current_table->Lookup(interface_method);
+ DCHECK(current_table != nullptr);
+ method = current_table->Lookup(interface_method, sizeof(void*));
} else {
// It seems we aren't really a conflict method!
method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index d386c74..1a33d1f 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -914,10 +914,26 @@
pointer_size_(pointer_size) {}
virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
- if (fixup_heap_objects_) {
- method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_);
+ // TODO: Separate visitor for runtime vs normal methods.
+ if (UNLIKELY(method->IsRuntimeMethod())) {
+ ImtConflictTable* table = method->GetImtConflictTable(pointer_size_);
+ if (table != nullptr) {
+ ImtConflictTable* new_table = ForwardObject(table);
+ if (table != new_table) {
+ method->SetImtConflictTable(new_table, pointer_size_);
+ }
+ }
+ const void* old_code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
+ const void* new_code = ForwardCode(old_code);
+ if (old_code != new_code) {
+ method->SetEntryPointFromQuickCompiledCodePtrSize(new_code, pointer_size_);
+ }
+ } else {
+ if (fixup_heap_objects_) {
+ method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_);
+ }
+ method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
}
- method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
}
private:
@@ -1018,6 +1034,7 @@
const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects);
uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
+ FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
if (fixup_image) {
// Two pass approach, fix up all classes first, then fix up non class-objects.
// The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1037,7 +1054,6 @@
ScopedObjectAccess soa(Thread::Current());
timing.NewTiming("Fixup objects");
bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
- FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
// Fixup image roots.
CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
image_header.GetImageRoots<kWithoutReadBarrier>())));
@@ -1104,19 +1120,18 @@
boot_oat,
app_image,
app_oat);
- image_header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &method_visitor,
- target_base,
- pointer_size);
+ image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
}
if (fixup_image) {
{
// Only touches objects in the app image, no need for mutator lock.
TimingLogger::ScopedTiming timing("Fixup fields", &logger);
FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat);
- image_header.GetImageSection(ImageHeader::kSectionArtFields).VisitPackedArtFields(
- &field_visitor,
- target_base);
+ image_header.VisitPackedArtFields(&field_visitor, target_base);
+ }
+ {
+ TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
+ image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
}
// In the app image case, the image methods are actually in the boot image.
image_header.RelocateImageMethods(boot_image.Delta());
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index e3307d8..ea75a62 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -19,6 +19,8 @@
#include "image.h"
+#include "art_method.h"
+
namespace art {
template <ReadBarrierOption kReadBarrierOption>
@@ -42,6 +44,20 @@
return image_roots;
}
+template <typename Visitor>
+inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor,
+ uint8_t* base,
+ size_t pointer_size) const {
+ const ImageSection& section = GetImageSection(kSectionIMTConflictTables);
+ for (size_t pos = 0; pos < section.Size(); ) {
+ auto* table = reinterpret_cast<ImtConflictTable*>(base + section.Offset() + pos);
+ table->Visit([&visitor](const std::pair<ArtMethod*, ArtMethod*>& methods) {
+ return std::make_pair(visitor(methods.first), visitor(methods.second));
+ }, pointer_size);
+ pos += table->ComputeSize(pointer_size);
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_IMAGE_INL_H_
diff --git a/runtime/image.cc b/runtime/image.cc
index 1f54e3e..a9552c2 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '7', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -147,9 +147,10 @@
return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
}
-void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
- for (size_t pos = 0; pos < Size(); ) {
- auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + Offset() + pos);
+void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
+ const ImageSection& fields = GetFieldsSection();
+ for (size_t pos = 0; pos < fields.Size(); ) {
+ auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
for (size_t i = 0; i < array->size(); ++i) {
visitor->Visit(&array->At(i, sizeof(ArtField)));
}
@@ -157,18 +158,25 @@
}
}
-void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor,
- uint8_t* base,
- size_t pointer_size) const {
+void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
+ uint8_t* base,
+ size_t pointer_size) const {
const size_t method_alignment = ArtMethod::Alignment(pointer_size);
const size_t method_size = ArtMethod::Size(pointer_size);
- for (size_t pos = 0; pos < Size(); ) {
- auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
+ const ImageSection& methods = GetMethodsSection();
+ for (size_t pos = 0; pos < methods.Size(); ) {
+ auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
for (size_t i = 0; i < array->size(); ++i) {
visitor->Visit(&array->At(i, method_size, method_alignment));
}
pos += array->ComputeSize(array->size(), method_size, method_alignment);
}
+ const ImageSection& runtime_methods = GetRuntimeMethodsSection();
+ for (size_t pos = 0; pos < runtime_methods.Size(); ) {
+ auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
+ visitor->Visit(method);
+ pos += method_size;
+ }
}
} // namespace art
diff --git a/runtime/image.h b/runtime/image.h
index 8e5dbad..2ea9af7 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -64,12 +64,6 @@
return offset - offset_ < size_;
}
- // Visit ArtMethods in the section starting at base.
- void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
-
- // Visit ArtMethods in the section starting at base.
- void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
-
private:
uint32_t offset_;
uint32_t size_;
@@ -200,6 +194,8 @@
kSectionObjects,
kSectionArtFields,
kSectionArtMethods,
+ kSectionRuntimeMethods,
+ kSectionIMTConflictTables,
kSectionDexCacheArrays,
kSectionInternedStrings,
kSectionClassTable,
@@ -211,10 +207,19 @@
void SetImageMethod(ImageMethod index, ArtMethod* method);
const ImageSection& GetImageSection(ImageSections index) const;
+
const ImageSection& GetMethodsSection() const {
return GetImageSection(kSectionArtMethods);
}
+ const ImageSection& GetRuntimeMethodsSection() const {
+ return GetImageSection(kSectionRuntimeMethods);
+ }
+
+ const ImageSection& GetFieldsSection() const {
+ return GetImageSection(ImageHeader::kSectionArtFields);
+ }
+
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetImageRoot(ImageRoot image_root) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -265,6 +270,19 @@
return boot_image_size_ != 0u;
}
+ // Visit ArtMethods in the section starting at base. Includes runtime methods.
+ // TODO: Delete base parameter if it is always equal to GetImageBegin.
+ void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
+
+ // Visit ArtMethods in the section starting at base.
+ // TODO: Delete base parameter if it is always equal to GetImageBegin.
+ void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
+
+ template <typename Visitor>
+ void VisitPackedImtConflictTables(const Visitor& visitor,
+ uint8_t* base,
+ size_t pointer_size) const;
+
private:
static const uint8_t kImageMagic[4];
static const uint8_t kImageVersion[4];
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a4d31ef..929bb5b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1613,18 +1613,19 @@
}
}
-static ImtConflictTable::Entry empty_entry = { nullptr, nullptr };
-
ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
- auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(linear_alloc);
+ ClassLinker* const class_linker = GetClassLinker();
+ ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc);
// When compiling, the code pointer will get set later when the image is loaded.
+ const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
if (IsAotCompiler()) {
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
- method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
}
+ // Create empty conflict table.
+ method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
+ pointer_size);
return method;
}
@@ -1632,9 +1633,6 @@
CHECK(method != nullptr);
CHECK(method->IsRuntimeMethod());
imt_conflict_method_ = method;
- if (!IsAotCompiler()) {
- method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
- }
}
ArtMethod* Runtime::CreateResolutionMethod() {
@@ -1944,8 +1942,20 @@
CHECK(method != nullptr);
CHECK(method->IsRuntimeMethod());
imt_unimplemented_method_ = method;
- if (!IsAotCompiler()) {
- method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
+}
+
+void Runtime::FixupConflictTables() {
+ // We can only do this after the class linker is created.
+ const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
+ if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
+ imt_unimplemented_method_->SetImtConflictTable(
+ ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ pointer_size);
+ }
+ if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
+ imt_conflict_method_->SetImtConflictTable(
+ ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ pointer_size);
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index ae25dd1..b6a9125 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -383,6 +383,7 @@
return imt_conflict_method_ != nullptr;
}
+ void FixupConflictTables();
void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 56ef5aa..45aeb92 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -678,8 +678,10 @@
if (space->IsImageSpace()) {
auto* image_space = space->AsImageSpace();
const auto& header = image_space->GetImageHeader();
- const auto* methods = &header.GetMethodsSection();
- if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
+ const ImageSection& methods = header.GetMethodsSection();
+ const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
+ const size_t offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
+ if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
in_image = true;
break;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7922b60..cdbf995 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3010,7 +3010,6 @@
return count;
}
-
void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
ClearException();
@@ -3031,4 +3030,11 @@
interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result);
}
+void Thread::SetException(mirror::Throwable* new_exception) {
+ CHECK(new_exception != nullptr);
+ // TODO: DCHECK(!IsExceptionPending());
+ tlsPtr_.exception = new_exception;
+ // LOG(ERROR) << new_exception->Dump();
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index ed42e46..2092feb 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -363,12 +363,7 @@
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
- void SetException(mirror::Throwable* new_exception)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- CHECK(new_exception != nullptr);
- // TODO: DCHECK(!IsExceptionPending());
- tlsPtr_.exception = new_exception;
- }
+ void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_);
void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
diff --git a/test/803-no-super/expected.txt b/test/803-no-super/expected.txt
new file mode 100644
index 0000000..5036991
--- /dev/null
+++ b/test/803-no-super/expected.txt
@@ -0,0 +1,2 @@
+java.lang.ClassNotFoundException: NoSuper1
+Done!
diff --git a/test/803-no-super/info.txt b/test/803-no-super/info.txt
new file mode 100644
index 0000000..0178a44
--- /dev/null
+++ b/test/803-no-super/info.txt
@@ -0,0 +1,3 @@
+Regression test that temp (erroneous) classes don't get conflict tables created.
+
+Obviously needs to run under Dalvik or ART.
diff --git a/test/803-no-super/smali/nosuper1.smali b/test/803-no-super/smali/nosuper1.smali
new file mode 100644
index 0000000..df2eaa5
--- /dev/null
+++ b/test/803-no-super/smali/nosuper1.smali
@@ -0,0 +1,3 @@
+.class public LNoSuper1;
+
+.super LNoClass;
diff --git a/test/803-no-super/src/Main.java b/test/803-no-super/src/Main.java
new file mode 100644
index 0000000..a07e042
--- /dev/null
+++ b/test/803-no-super/src/Main.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Attempt to load class with no superclass.
+ */
+public class Main {
+ public static void main(String[] args) throws Exception {
+ try {
+ Class<?> c = Class.forName("NoSuper1");
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+ System.out.println("Done!");
+ }
+}