diff options
| author | 2016-06-15 17:44:14 +0000 | |
|---|---|---|
| committer | 2016-06-15 17:44:14 +0000 | |
| commit | abdda2fb52eb7e9a6916899a913f78251d8ed277 (patch) | |
| tree | 165a31831ae077827b1e7b973bd5602125b5b142 | |
| parent | 2b87a47c09823031c4fe336dcef0867605debfd7 (diff) | |
| parent | badee9820fcf5dca5f8c46c3215ae1779ee7736e (diff) | |
Merge "Optimize IMT"
27 files changed, 471 insertions, 179 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index a4b48892fb..131be37a33 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -2522,11 +2522,28 @@ class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor true); } // Create the conflict tables. - if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) { + FillIMTAndConflictTables(klass); + return true; + } + + private: + void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { + if (!klass->ShouldHaveImt()) { + return; + } + if (visited_classes_.find(klass) != visited_classes_.end()) { + return; + } + if (klass->HasSuperClass()) { + FillIMTAndConflictTables(klass->GetSuperClass()); + } + if (!klass->IsTemp()) { Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass); } - return true; + visited_classes_.insert(klass); } + + std::set<mirror::Class*> visited_classes_; }; void CompilerDriver::InitializeClasses(jobject class_loader, diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index da10568475..063eb11718 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -1232,9 +1232,10 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } // Assign offsets for all runtime methods in the IMT since these may hold conflict tables // live. - if (as_klass->ShouldHaveEmbeddedImtAndVTable()) { - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_); + if (as_klass->ShouldHaveImt()) { + ImTable* imt = as_klass->GetImt(target_ptr_size_); + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* imt_method = imt->Get(i, target_ptr_size_); DCHECK(imt_method != nullptr); if (imt_method->IsRuntimeMethod() && !IsInBootImage(imt_method) && @@ -1243,6 +1244,11 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } } } + + if (as_klass->ShouldHaveImt()) { + ImTable* imt = as_klass->GetImt(target_ptr_size_); + TryAssignImTableOffset(imt, oat_index); + } } else if (h_obj->IsObjectArray()) { // Walk elements of an object array. int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength(); @@ -1269,6 +1275,23 @@ bool ImageWriter::NativeRelocationAssigned(void* ptr) const { return native_object_relocations_.find(ptr) != native_object_relocations_.end(); } +void ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) { + // No offset, or already assigned. + if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) { + return; + } + // If the method is a conflict method we also want to assign the conflict table offset. + ImageInfo& image_info = GetImageInfo(oat_index); + const size_t size = ImTable::SizeInBytes(target_ptr_size_); + native_object_relocations_.emplace( + imt, + NativeObjectRelocation { + oat_index, + image_info.bin_slot_sizes_[kBinImTable], + kNativeObjectRelocationTypeIMTable}); + image_info.bin_slot_sizes_[kBinImTable] += size; +} + void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) { // No offset, or already assigned. if (table == nullptr || NativeRelocationAssigned(table)) { @@ -1391,6 +1414,7 @@ void ImageWriter::CalculateNewObjectOffsets() { bin_offset = RoundUp(bin_offset, method_alignment); break; } + case kBinImTable: case kBinIMTConflictTable: { bin_offset = RoundUp(bin_offset, target_ptr_size_); break; @@ -1461,6 +1485,10 @@ size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) c bin_slot_offsets_[kBinArtMethodClean], bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); + // IMT section. + ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables]; + *imt_section = ImageSection(bin_slot_offsets_[kBinImTable], bin_slot_sizes_[kBinImTable]); + // Conflict tables section. ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables]; *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable], @@ -1585,6 +1613,13 @@ class FixupRootVisitor : public RootVisitor { ImageWriter* const image_writer_; }; +void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) { + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* method = orig->Get(i, target_ptr_size_); + copy->Set(i, NativeLocationInImage(method), target_ptr_size_); + } +} + void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) { const size_t count = orig->NumEntries(target_ptr_size_); for (size_t i = 0; i < count; ++i) { @@ -1642,6 +1677,12 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { case kNativeObjectRelocationTypeDexCacheArray: // Nothing to copy here, everything is done in FixupDexCache(). break; + case kNativeObjectRelocationTypeIMTable: { + ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first); + ImTable* dest_imt = reinterpret_cast<ImTable*>(dest); + CopyAndFixupImTable(orig_imt, dest_imt); + break; + } case kNativeObjectRelocationTypeIMTConflictTable: { auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first); CopyAndFixupImtConflictTable( @@ -1850,13 +1891,25 @@ uintptr_t ImageWriter::NativeOffsetInImage(void* obj) { } template <typename T> +std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { + std::ostringstream oss; + oss << ptr; + return oss.str(); +} + +template <> +std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { + return PrettyMethod(method); +} + +template <typename T> T* ImageWriter::NativeLocationInImage(T* obj) { if (obj == nullptr || IsInBootImage(obj)) { return obj; } else { auto it = native_object_relocations_.find(obj); - CHECK(it != native_object_relocations_.end()) << obj << " spaces " - << Runtime::Current()->GetHeap()->DumpSpaces(); + CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj) + << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces(); const NativeObjectRelocation& relocation = it->second; ImageInfo& image_info = GetImageInfo(relocation.oat_index); return reinterpret_cast<T*>(image_info.image_begin_ + relocation.offset); @@ -2210,6 +2263,8 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat return kBinDexCacheArray; case kNativeObjectRelocationTypeRuntimeMethod: return kBinRuntimeMethod; + case kNativeObjectRelocationTypeIMTable: + return kBinImTable; case kNativeObjectRelocationTypeIMTConflictTable: return kBinIMTConflictTable; } diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 51976c511f..1efdc22c0a 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -169,6 +169,8 @@ class ImageWriter FINAL { // ArtMethods may be dirty if the class has native methods or a declaring class that isn't // initialized. kBinArtMethodDirty, + // IMT (clean) + kBinImTable, // Conflict tables (clean). kBinIMTConflictTable, // Runtime methods (always clean, do not have a length prefix array). @@ -191,6 +193,7 @@ class ImageWriter FINAL { kNativeObjectRelocationTypeArtMethodDirty, kNativeObjectRelocationTypeArtMethodArrayDirty, kNativeObjectRelocationTypeRuntimeMethod, + kNativeObjectRelocationTypeIMTable, kNativeObjectRelocationTypeIMTConflictTable, kNativeObjectRelocationTypeDexCacheArray, }; @@ -401,6 +404,7 @@ class ImageWriter FINAL { void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) SHARED_REQUIRES(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) @@ -433,6 +437,8 @@ class ImageWriter FINAL { size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + // Assign the offset for an IMT conflict table. Does nothing if the table already has a native // relocation. void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 6e74d082e0..4fc3b5434b 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1873,8 +1873,6 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); Register hidden_reg = locations->GetTemp(1).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1900,10 +1898,14 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + __ LoadFromOffset(kLoadWord, temp, temp, + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kArmPointerSize)); // temp = temp->GetImtEntryAt(method_offset); + __ LoadFromOffset(kLoadWord, temp, temp, method_offset); uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value(); - __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); __ LoadFromOffset(kLoadWord, LR, temp, entry_point); // LR(); @@ -6777,8 +6779,11 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArmPointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); + __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kArmPointerSize)); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index a37ea1e9a3..b63a3d4c1a 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -3490,8 +3490,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. LocationSummary* locations = invoke->GetLocations(); Register temp = XRegisterFrom(locations->GetTemp(0)); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); Offset class_offset = mirror::Object::ClassOffset(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); @@ -3521,6 +3519,10 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); + __ Ldr(temp, + MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kArm64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); @@ -5148,8 +5150,10 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArm64PointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); + __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), + mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kArm64PointerSize)); } __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), method_offset)); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index ed0767ed52..c8e927d026 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3701,8 +3701,6 @@ void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize); @@ -3719,6 +3717,10 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ LoadFromOffset(kLoadWord, temp, temp, + mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kMipsPointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); @@ -5162,8 +5164,12 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kMipsPointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kMipsPointerSize)); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 8c73e350f6..8d5dc84df9 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2935,8 +2935,6 @@ void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize); @@ -2953,6 +2951,10 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ LoadFromOffset(kLoadDoubleword, temp, temp, + mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kMips64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8c643a05c8..9d0092b674 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -2012,8 +2012,6 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); XmmRegister hidden_reg = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -2040,7 +2038,12 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + // temp = temp->GetAddressOfIMT() + __ movl(temp, + Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kX86PointerSize)); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address(temp, @@ -4060,8 +4063,12 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86PointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); + __ movl(locations->InAt(0).AsRegister<Register>(), + Address(locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); + // temp = temp->GetImtEntryAt(method_offset); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kX86PointerSize)); } __ movl(locations->Out().AsRegister<Register>(), Address(locations->InAt(0).AsRegister<Register>(), method_offset)); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 72de3e6e35..a8da5f2ea5 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -2228,8 +2228,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo LocationSummary* locations = invoke->GetLocations(); CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -2255,6 +2253,12 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + // temp = temp->GetAddressOfIMT() + __ movq(temp, + Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); + // temp = temp->GetImtEntryAt(method_offset); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kX86_64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -3978,8 +3982,11 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86_64PointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); + __ movq(locations->Out().AsRegister<CpuRegister>(), + Address(locations->InAt(0).AsRegister<CpuRegister>(), + mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kX86_64PointerSize)); } __ movq(locations->Out().AsRegister<CpuRegister>(), Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset)); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 59de895182..27b6896150 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -656,8 +656,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } ArtMethod* new_method = nullptr; if (invoke_instruction->IsInvokeInterface()) { - new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry( - method_index % mirror::Class::kImtSize, pointer_size); + new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get( + method_index % ImTable::kSize, pointer_size); if (new_method->IsRuntimeMethod()) { // Bail out as soon as we see a conflict trampoline in one of the target's // interface table. diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 0a7ffda3b4..5bb61bb829 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -494,6 +494,17 @@ void PatchOat::PatchArtMethods(const ImageHeader* image_header) { image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size); } +void PatchOat::PatchImTables(const ImageHeader* image_header) { + const size_t pointer_size = InstructionSetPointerSize(isa_); + // We can safely walk target image since the conflict tables are independent. + image_header->VisitPackedImTables( + [this](ArtMethod* method) { + return RelocatedAddressOfPointer(method); + }, + image_->Begin(), + pointer_size); +} + void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) { const size_t pointer_size = InstructionSetPointerSize(isa_); // We can safely walk target image since the conflict tables are independent. @@ -636,6 +647,7 @@ bool PatchOat::PatchImage(bool primary_image) { PatchArtFields(image_header); PatchArtMethods(image_header); + PatchImTables(image_header); PatchImtConflictTables(image_header); PatchInternedStrings(image_header); PatchClassTable(image_header); diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 3ef837fde9..61ec695d83 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -117,6 +117,7 @@ class PatchOat { bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_); void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); + void PatchImTables(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchImtConflictTables(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchInternedStrings(const ImageHeader* image_header) diff --git a/runtime/art_method.h b/runtime/art_method.h index 2b025f8c62..90b2406a1d 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -99,6 +99,22 @@ class ImtConflictTable { return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size); } + // Return true if two conflict tables are the same. + bool Equals(ImtConflictTable* other, size_t pointer_size) const { + size_t num = NumEntries(pointer_size); + if (num != other->NumEntries(pointer_size)) { + return false; + } + for (size_t i = 0; i < num; ++i) { + if (GetInterfaceMethod(i, pointer_size) != other->GetInterfaceMethod(i, pointer_size) || + GetImplementationMethod(i, pointer_size) != + other->GetImplementationMethod(i, pointer_size)) { + return false; + } + } + return true; + } + // Visit all of the entries. // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod* // and also returns one. The order is <interface, implementation>. diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index fe7448fa25..cb34d8a121 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -857,11 +857,13 @@ static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_ if (vtable != nullptr) { SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_spaces); } - if (klass->ShouldHaveEmbeddedImtAndVTable()) { - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - SanityCheckArtMethod( - klass->GetEmbeddedImTableEntry(i, pointer_size), nullptr, image_spaces); + if (klass->ShouldHaveImt()) { + ImTable* imt = klass->GetImt(pointer_size); + for (size_t i = 0; i < ImTable::kSize; ++i) { + SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr, image_spaces); } + } + if (klass->ShouldHaveEmbeddedVTable()) { for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) { SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_spaces); } @@ -3456,16 +3458,11 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto new_class->SetClassFlags(mirror::kClassFlagObjectArray); } mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self); - { - ArtMethod* imt[mirror::Class::kImtSize]; - std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod()); - new_class->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_); - } + new_class->PopulateEmbeddedVTable(image_pointer_size_); mirror::Class::SetStatus(new_class, mirror::Class::kStatusInitialized, self); // don't need to set new_class->SetObjectSize(..) // because Object::SizeOf delegates to Array::SizeOf - // All arrays have java/lang/Cloneable and java/io/Serializable as // interfaces. We need to set that up here, so that stuff like // "instanceof" works right. @@ -5036,9 +5033,11 @@ bool ClassLinker::LinkClass(Thread* self, if (!LinkSuperClass(klass)) { return false; } - ArtMethod* imt[mirror::Class::kImtSize]; - std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod()); - if (!LinkMethods(self, klass, interfaces, imt)) { + ArtMethod* imt_data[ImTable::kSize]; + // If there are any new conflicts compared to super class. + bool new_conflict = false; + std::fill_n(imt_data, arraysize(imt_data), Runtime::Current()->GetImtUnimplementedMethod()); + if (!LinkMethods(self, klass, interfaces, &new_conflict, imt_data)) { return false; } if (!LinkInstanceFields(self, klass)) { @@ -5051,15 +5050,45 @@ bool ClassLinker::LinkClass(Thread* self, CreateReferenceInstanceOffsets(klass); CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus()); + ImTable* imt = nullptr; + if (klass->ShouldHaveImt()) { + // If there are any new conflicts compared to the super class we can not make a copy. There + // can be cases where both will have a conflict method at the same slot without having the same + // set of conflicts. In this case, we can not share the IMT since the conflict table slow path + // will possibly create a table that is incorrect for either of the classes. + // Same IMT with new_conflict does not happen very often. + if (!new_conflict && klass->HasSuperClass() && klass->GetSuperClass()->ShouldHaveImt()) { + ImTable* super_imt = klass->GetSuperClass()->GetImt(image_pointer_size_); + bool imt_equals = true; + for (size_t i = 0; i < ImTable::kSize && imt_equals; ++i) { + imt_equals = imt_equals && (super_imt->Get(i, image_pointer_size_) == imt_data[i]); + } + if (imt_equals) { + imt = super_imt; + } + } + if (imt == nullptr) { + LinearAlloc* allocator = GetAllocatorForClassLoader(klass->GetClassLoader()); + imt = reinterpret_cast<ImTable*>( + allocator->Alloc(self, ImTable::SizeInBytes(image_pointer_size_))); + if (imt == nullptr) { + return false; + } + imt->Populate(imt_data, image_pointer_size_); + } + } + if (!klass->IsTemp() || (!init_done_ && klass->GetClassSize() == class_size)) { // We don't need to retire this class as it has no embedded tables or it was created the // correct size during class linker initialization. CHECK_EQ(klass->GetClassSize(), class_size) << PrettyDescriptor(klass.Get()); - if (klass->ShouldHaveEmbeddedImtAndVTable()) { - klass->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_); + if (klass->ShouldHaveEmbeddedVTable()) { + klass->PopulateEmbeddedVTable(image_pointer_size_); + } + if (klass->ShouldHaveImt()) { + klass->SetImt(imt, image_pointer_size_); } - // This will notify waiters on klass that saw the not yet resolved // class in the class_table_ during EnsureResolved. mirror::Class::SetStatus(klass, mirror::Class::kStatusResolved, self); @@ -5451,6 +5480,7 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) { bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, + bool* out_new_conflict, ArtMethod** out_imt) { self->AllowThreadSuspension(); // A map from vtable indexes to the method they need to be updated to point to. Used because we @@ -5462,7 +5492,7 @@ bool ClassLinker::LinkMethods(Thread* self, // any vtable entries with new default method implementations. return SetupInterfaceLookupTable(self, klass, interfaces) && LinkVirtualMethods(self, klass, /*out*/ &default_translations) - && LinkInterfaceMethods(self, klass, default_translations, out_imt); + && LinkInterfaceMethods(self, klass, default_translations, out_new_conflict, out_imt); } // Comparator for name and signature of a method, used in finding overriding methods. Implementation @@ -5620,7 +5650,7 @@ bool ClassLinker::LinkVirtualMethods( StackHandleScope<2> hs(self); Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass())); MutableHandle<mirror::PointerArray> vtable; - if (super_class->ShouldHaveEmbeddedImtAndVTable()) { + if (super_class->ShouldHaveEmbeddedVTable()) { vtable = hs.NewHandle(AllocPointerArray(self, max_count)); if (UNLIKELY(vtable.Get() == nullptr)) { self->AssertPendingOOMException(); @@ -6020,6 +6050,7 @@ ArtMethod* ClassLinker::AddMethodToConflictTable(mirror::Class* klass, void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method, ArtMethod* imt_conflict_method, ArtMethod* current_method, + /*out*/bool* new_conflict, /*out*/ArtMethod** imt_ref) { // Place method in imt if entry is empty, place conflict otherwise. if (*imt_ref == unimplemented_method) { @@ -6036,40 +6067,82 @@ void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method, *imt_ref = current_method; } else { *imt_ref = imt_conflict_method; + *new_conflict = true; } } else { // Place the default conflict method. Note that there may be an existing conflict // method in the IMT, but it could be one tailored to the super class, with a // specific ImtConflictTable. *imt_ref = imt_conflict_method; + *new_conflict = true; } } void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) { - DCHECK(klass->ShouldHaveEmbeddedImtAndVTable()) << PrettyClass(klass); + DCHECK(klass->ShouldHaveImt()) << PrettyClass(klass); DCHECK(!klass->IsTemp()) << PrettyClass(klass); - ArtMethod* imt[mirror::Class::kImtSize]; + ArtMethod* imt_data[ImTable::kSize]; Runtime* const runtime = Runtime::Current(); ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod(); ArtMethod* const conflict_method = runtime->GetImtConflictMethod(); - std::fill_n(imt, arraysize(imt), unimplemented_method); + std::fill_n(imt_data, arraysize(imt_data), unimplemented_method); if (klass->GetIfTable() != nullptr) { + bool new_conflict = false; FillIMTFromIfTable(klass->GetIfTable(), unimplemented_method, conflict_method, klass, - true, - false, - &imt[0]); + /*create_conflict_tables*/true, + /*ignore_copied_methods*/false, + &new_conflict, + &imt_data[0]); } - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_); + if (!klass->ShouldHaveImt()) { + return; + } + // Compare the IMT with the super class including the conflict methods. If they are equivalent, + // we can just use the same pointer. + ImTable* imt = nullptr; + mirror::Class* super_class = klass->GetSuperClass(); + if (super_class != nullptr && super_class->ShouldHaveImt()) { + ImTable* super_imt = super_class->GetImt(image_pointer_size_); + bool same = true; + for (size_t i = 0; same && i < ImTable::kSize; ++i) { + ArtMethod* method = imt_data[i]; + ArtMethod* super_method = super_imt->Get(i, image_pointer_size_); + if (method != super_method) { + bool is_conflict_table = method->IsRuntimeMethod() && + method != unimplemented_method && + method != conflict_method; + // Verify conflict contents. + bool super_conflict_table = super_method->IsRuntimeMethod() && + super_method != unimplemented_method && + super_method != conflict_method; + if (!is_conflict_table || !super_conflict_table) { + same = false; + } else { + ImtConflictTable* table1 = method->GetImtConflictTable(image_pointer_size_); + ImtConflictTable* table2 = super_method->GetImtConflictTable(image_pointer_size_); + same = same && table1->Equals(table2, image_pointer_size_); + } + } + } + if (same) { + imt = super_imt; + } + } + if (imt == nullptr) { + imt = klass->GetImt(image_pointer_size_); + DCHECK(imt != nullptr); + imt->Populate(imt_data, image_pointer_size_); + } else { + klass->SetImt(imt, image_pointer_size_); } } static inline uint32_t GetIMTIndex(ArtMethod* interface_method) SHARED_REQUIRES(Locks::mutator_lock_) { - return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; + return interface_method->GetDexMethodIndex() % ImTable::kSize; } ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, @@ -6091,8 +6164,9 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table, mirror::Class* klass, bool create_conflict_tables, bool ignore_copied_methods, - ArtMethod** imt) { - uint32_t conflict_counts[mirror::Class::kImtSize] = {}; + /*out*/bool* new_conflict, + /*out*/ArtMethod** imt) { + uint32_t conflict_counts[ImTable::kSize] = {}; for (size_t i = 0, length = if_table->Count(); i < length; ++i) { mirror::Class* interface = if_table->GetInterface(i); const size_t num_virtuals = interface->NumVirtualMethods(); @@ -6134,6 +6208,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table, SetIMTRef(unimplemented_method, imt_conflict_method, implementation_method, + /*out*/new_conflict, /*out*/&imt[imt_index]); } } @@ -6141,7 +6216,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table, if (create_conflict_tables) { // Create the conflict tables. LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader()); - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + for (size_t i = 0; i < ImTable::kSize; ++i) { size_t conflicts = conflict_counts[i]; if (imt[i] == imt_conflict_method) { ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc); @@ -6428,12 +6503,14 @@ static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass, ArtMethod* unimplemented_method, ArtMethod* imt_conflict_method, + bool* new_conflict, ArtMethod** imt) { DCHECK(klass->HasSuperClass()); mirror::Class* super_class = klass->GetSuperClass(); - if (super_class->ShouldHaveEmbeddedImtAndVTable()) { - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_); + if (super_class->ShouldHaveImt()) { + ImTable* super_imt = super_class->GetImt(image_pointer_size_); + for (size_t i = 0; i < ImTable::kSize; ++i) { + imt[i] = super_imt->Get(i, image_pointer_size_); } } else { // No imt in the super class, need to reconstruct from the iftable. @@ -6446,6 +6523,7 @@ void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass, klass.Get(), /*create_conflict_table*/false, /*ignore_copied_methods*/true, + /*out*/new_conflict, /*out*/imt); } } @@ -6456,6 +6534,7 @@ bool ClassLinker::LinkInterfaceMethods( Thread* self, Handle<mirror::Class> klass, const std::unordered_map<size_t, ClassLinker::MethodTranslation>& default_translations, + bool* out_new_conflict, ArtMethod** out_imt) { StackHandleScope<3> hs(self); Runtime* const runtime = Runtime::Current(); @@ -6491,6 +6570,7 @@ bool ClassLinker::LinkInterfaceMethods( FillImtFromSuperClass(klass, unimplemented_method, imt_conflict_method, + out_new_conflict, out_imt); } // Allocate method arrays before since we don't want miss visiting miranda method roots due to @@ -6622,6 +6702,7 @@ bool ClassLinker::LinkInterfaceMethods( SetIMTRef(unimplemented_method, imt_conflict_method, vtable_method, + /*out*/out_new_conflict, /*out*/imt_ptr); } break; @@ -6764,6 +6845,7 @@ bool ClassLinker::LinkInterfaceMethods( SetIMTRef(unimplemented_method, imt_conflict_method, current_method, + /*out*/out_new_conflict, /*out*/imt_ptr); } } @@ -6963,7 +7045,7 @@ bool ClassLinker::LinkInterfaceMethods( } // Fix up IMT next - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + for (size_t i = 0; i < ImTable::kSize; ++i) { auto it = move_table.find(out_imt[i]); if (it != move_table.end()) { out_imt[i] = it->second; diff --git a/runtime/class_linker.h b/runtime/class_linker.h index ca5af19976..d6822c5225 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -833,6 +833,7 @@ class ClassLinker { bool LinkMethods(Thread* self, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, + bool* out_new_conflict, ArtMethod** out_imt) SHARED_REQUIRES(Locks::mutator_lock_); @@ -968,19 +969,20 @@ class ClassLinker { // * kDefaultConflict - Conflicting method implementations were found when searching for // target_method. The value of *out_default_method is null. DefaultMethodSearchResult FindDefaultMethodImplementation( - Thread* self, - ArtMethod* target_method, - Handle<mirror::Class> klass, - /*out*/ArtMethod** out_default_method) const + Thread* self, + ArtMethod* target_method, + Handle<mirror::Class> klass, + /*out*/ArtMethod** out_default_method) const SHARED_REQUIRES(Locks::mutator_lock_); // Sets the imt entries and fixes up the vtable for the given class by linking all the interface // methods. See LinkVirtualMethods for an explanation of what default_translations is. bool LinkInterfaceMethods( - Thread* self, - Handle<mirror::Class> klass, - const std::unordered_map<size_t, MethodTranslation>& default_translations, - ArtMethod** out_imt) + Thread* self, + Handle<mirror::Class> klass, + const std::unordered_map<size_t, MethodTranslation>& default_translations, + bool* out_new_conflict, + ArtMethod** out_imt) SHARED_REQUIRES(Locks::mutator_lock_); bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size) @@ -1096,6 +1098,7 @@ class ClassLinker { void SetIMTRef(ArtMethod* unimplemented_method, ArtMethod* imt_conflict_method, ArtMethod* current_method, + /*out*/bool* new_conflict, /*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_); void FillIMTFromIfTable(mirror::IfTable* if_table, @@ -1104,11 +1107,13 @@ class ClassLinker { mirror::Class* klass, bool create_conflict_tables, bool ignore_copied_methods, - ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_); + /*out*/bool* new_conflict, + /*out*/ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_); void FillImtFromSuperClass(Handle<mirror::Class> klass, ArtMethod* unimplemented_method, ArtMethod* imt_conflict_method, + bool* new_conflict, ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_); std::vector<const DexFile*> boot_class_path_; diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 488826b6c4..9b59f2bba6 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -148,7 +148,8 @@ class ClassLinkerTest : public CommonRuntimeTest { EXPECT_EQ(0U, array->NumInstanceFields()); EXPECT_EQ(0U, array->NumStaticFields()); EXPECT_EQ(2U, array->NumDirectInterfaces()); - EXPECT_TRUE(array->ShouldHaveEmbeddedImtAndVTable()); + EXPECT_FALSE(array->ShouldHaveImt()); + EXPECT_TRUE(array->ShouldHaveEmbeddedVTable()); EXPECT_EQ(2, array->GetIfTableCount()); ASSERT_TRUE(array->GetIfTable() != nullptr); mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0); diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index fc6257302a..916ca29319 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -559,9 +559,10 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ } } case kInterface: { - uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize; - ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry( - imt_index, class_linker->GetImagePointerSize()); + uint32_t imt_index = resolved_method->GetDexMethodIndex() % ImTable::kSize; + size_t pointer_size = class_linker->GetImagePointerSize(); + ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)-> + Get(imt_index, pointer_size); if (!imt_method->IsRuntimeMethod()) { if (kIsDebugBuild) { mirror::Class* klass = (*this_object)->GetClass(); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 03771aa80e..7175d5436b 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -2169,13 +2169,13 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT dex_method_idx, sizeof(void*)); DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method); ArtMethod* method = nullptr; + ImTable* imt = cls->GetImt(sizeof(void*)); if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { // If the dex cache already resolved the interface method, look whether we have // a match in the ImtConflictTable. uint32_t imt_index = interface_method->GetDexMethodIndex(); - ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry( - imt_index % mirror::Class::kImtSize, sizeof(void*)); + ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*)); if (LIKELY(conflict_method->IsRuntimeMethod())) { ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); DCHECK(current_table != nullptr); @@ -2227,8 +2227,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. // We create a new table with the new pair { interface_method, method }. uint32_t imt_index = interface_method->GetDexMethodIndex(); - ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry( - imt_index % mirror::Class::kImtSize, sizeof(void*)); + ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*)); if (conflict_method->IsRuntimeMethod()) { ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( cls.Get(), @@ -2239,9 +2238,9 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT if (new_conflict_method != conflict_method) { // Update the IMT if we create a new conflict method. No fence needed here, as the // data is consistent. - cls->SetEmbeddedImTableEntry(imt_index % mirror::Class::kImtSize, - new_conflict_method, - sizeof(void*)); + imt->Set(imt_index % ImTable::kSize, + new_conflict_method, + sizeof(void*)); } } diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 78c570fa99..1ebe5cc47b 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -1130,6 +1130,10 @@ static bool RelocateInPlace(ImageHeader& image_header, image_header.VisitPackedArtFields(&field_visitor, target_base); } { + TimingLogger::ScopedTiming timing("Fixup imt", &logger); + image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size); + } + { TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger); image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size); } diff --git a/runtime/image-inl.h b/runtime/image-inl.h index ea75a622c7..cd0557a235 100644 --- a/runtime/image-inl.h +++ b/runtime/image-inl.h @@ -20,6 +20,7 @@ #include "image.h" #include "art_method.h" +#include "imtable.h" namespace art { @@ -45,6 +46,24 @@ inline mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const { } template <typename Visitor> +inline void ImageHeader::VisitPackedImTables(const Visitor& visitor, + uint8_t* base, + size_t pointer_size) const { + const ImageSection& section = GetImageSection(kSectionImTables); + for (size_t pos = 0; pos < section.Size();) { + ImTable* imt = reinterpret_cast<ImTable*>(base + section.Offset() + pos); + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* orig = imt->Get(i, pointer_size); + ArtMethod* updated = visitor(orig); + if (updated != orig) { + imt->Set(i, updated, pointer_size); + } + } + pos += ImTable::SizeInBytes(pointer_size); + } +} + +template <typename Visitor> inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor, uint8_t* base, size_t pointer_size) const { diff --git a/runtime/image.cc b/runtime/image.cc index a9552c27d3..2362a92c24 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -24,7 +24,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' }; +const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '0', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/image.h b/runtime/image.h index 2ea9af7728..06f06eed0e 100644 --- a/runtime/image.h +++ b/runtime/image.h @@ -195,6 +195,7 @@ class PACKED(4) ImageHeader { kSectionArtFields, kSectionArtMethods, kSectionRuntimeMethods, + kSectionImTables, kSectionIMTConflictTables, kSectionDexCacheArrays, kSectionInternedStrings, @@ -279,6 +280,11 @@ class PACKED(4) ImageHeader { void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const; template <typename Visitor> + void VisitPackedImTables(const Visitor& visitor, + uint8_t* base, + size_t pointer_size) const; + + template <typename Visitor> void VisitPackedImtConflictTables(const Visitor& visitor, uint8_t* base, size_t pointer_size) const; diff --git a/runtime/imtable.h b/runtime/imtable.h new file mode 100644 index 0000000000..51faf70d14 --- /dev/null +++ b/runtime/imtable.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_IMTABLE_H_ +#define ART_RUNTIME_IMTABLE_H_ + +#ifndef IMT_SIZE +#error IMT_SIZE not defined +#endif + +namespace art { + +class ArtMethod; + +class ImTable { + public: + // Interface method table size. Increasing this value reduces the chance of two interface methods + // colliding in the interface method table but increases the size of classes that implement + // (non-marker) interfaces. + static constexpr size_t kSize = IMT_SIZE; + + ArtMethod* Get(size_t index, size_t pointer_size) { + DCHECK_LT(index, kSize); + uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size); + if (pointer_size == 4) { + uint32_t value = *reinterpret_cast<uint32_t*>(ptr); + return reinterpret_cast<ArtMethod*>(value); + } else { + uint64_t value = *reinterpret_cast<uint64_t*>(ptr); + return reinterpret_cast<ArtMethod*>(value); + } + } + + void Set(size_t index, ArtMethod* method, size_t pointer_size) { + DCHECK_LT(index, kSize); + uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size); + if (pointer_size == 4) { + uintptr_t value = reinterpret_cast<uintptr_t>(method); + DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits. + *reinterpret_cast<uint32_t*>(ptr) = static_cast<uint32_t>(value); + } else { + *reinterpret_cast<uint64_t*>(ptr) = reinterpret_cast<uint64_t>(method); + } + } + + static size_t OffsetOfElement(size_t index, size_t pointer_size) { + return index * pointer_size; + } + + void Populate(ArtMethod** data, size_t pointer_size) { + for (size_t i = 0; i < kSize; ++i) { + Set(i, data[i], pointer_size); + } + } + + constexpr static size_t SizeInBytes(size_t pointer_size) { + return kSize * pointer_size; + } +}; + +} // namespace art + +#endif // ART_RUNTIME_IMTABLE_H_ + diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index cc470f372b..3750b7ad18 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -679,7 +679,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, return false; } const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); - CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable()); + CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable()); ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry( vtable_idx, sizeof(void*)); if (UNLIKELY(called_method == nullptr)) { diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index cefd9f0315..b783a019e7 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -247,38 +247,19 @@ inline void Class::SetVTable(PointerArray* new_vtable) { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable); } -inline MemberOffset Class::EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size) { - DCHECK_LT(i, kImtSize); - return MemberOffset( - EmbeddedImTableOffset(pointer_size).Uint32Value() + i * ImTableEntrySize(pointer_size)); -} - -template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> -inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) { - DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>())); - return GetFieldPtrWithSize<ArtMethod*>( - EmbeddedImTableEntryOffset(i, pointer_size), pointer_size); -} - -template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> -inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) { - DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>())); - SetFieldPtrWithSize<false>(EmbeddedImTableEntryOffset(i, pointer_size), method, pointer_size); -} - inline bool Class::HasVTable() { - return GetVTable() != nullptr || ShouldHaveEmbeddedImtAndVTable(); + return GetVTable() != nullptr || ShouldHaveEmbeddedVTable(); } inline int32_t Class::GetVTableLength() { - if (ShouldHaveEmbeddedImtAndVTable()) { + if (ShouldHaveEmbeddedVTable()) { return GetEmbeddedVTableLength(); } return GetVTable() != nullptr ? GetVTable()->GetLength() : 0; } inline ArtMethod* Class::GetVTableEntry(uint32_t i, size_t pointer_size) { - if (ShouldHaveEmbeddedImtAndVTable()) { + if (ShouldHaveEmbeddedVTable()) { return GetEmbeddedVTableEntry(i, pointer_size); } auto* vtable = GetVTable(); @@ -294,6 +275,14 @@ inline void Class::SetEmbeddedVTableLength(int32_t len) { SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len); } +inline ImTable* Class::GetImt(size_t pointer_size) { + return GetFieldPtrWithSize<ImTable*>(MemberOffset(ImtPtrOffset(pointer_size)), pointer_size); +} + +inline void Class::SetImt(ImTable* imt, size_t pointer_size) { + return SetFieldPtrWithSize<false>(MemberOffset(ImtPtrOffset(pointer_size)), imt, pointer_size); +} + inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size) { return MemberOffset( EmbeddedVTableOffset(pointer_size).Uint32Value() + i * VTableEntrySize(pointer_size)); @@ -541,7 +530,7 @@ template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) { DCHECK(IsResolved()); uint32_t base = sizeof(mirror::Class); // Static fields come after the class. - if (ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()) { + if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) { // Static fields come after the embedded tables. base = mirror::Class::ComputeClassSize( true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size); @@ -552,7 +541,7 @@ inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_siz inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) { DCHECK(IsLoaded()); uint32_t base = sizeof(mirror::Class); // Static fields come after the class. - if (ShouldHaveEmbeddedImtAndVTable()) { + if (ShouldHaveEmbeddedVTable()) { // Static fields come after the embedded tables. base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(), 0, 0, 0, 0, 0, pointer_size); @@ -711,7 +700,7 @@ inline Object* Class::AllocNonMovableObject(Thread* self) { return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator()); } -inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, +inline uint32_t Class::ComputeClassSize(bool has_embedded_vtable, uint32_t num_vtable_entries, uint32_t num_8bit_static_fields, uint32_t num_16bit_static_fields, @@ -722,11 +711,10 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, // Space used by java.lang.Class and its instance fields. uint32_t size = sizeof(Class); // Space used by embedded tables. - if (has_embedded_tables) { - const uint32_t embedded_imt_size = kImtSize * ImTableEntrySize(pointer_size); - const uint32_t embedded_vtable_size = num_vtable_entries * VTableEntrySize(pointer_size); - size = RoundUp(size + sizeof(uint32_t) /* embedded vtable len */, pointer_size) + - embedded_imt_size + embedded_vtable_size; + if (has_embedded_vtable) { + size = RoundUp(size + sizeof(uint32_t), pointer_size); + size += pointer_size; // size of pointer to IMT + size += num_vtable_entries * VTableEntrySize(pointer_size); } // Space used by reference statics. @@ -990,18 +978,9 @@ inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() { return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked()); } -inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) { - CheckPointerSize(pointer_size); - // Round up since we want the embedded imt and vtable to be pointer size aligned in case 64 bits. - // Add 32 bits for embedded vtable length. - return MemberOffset( - RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size)); -} - inline MemberOffset Class::EmbeddedVTableOffset(size_t pointer_size) { CheckPointerSize(pointer_size); - return MemberOffset(EmbeddedImTableOffset(pointer_size).Uint32Value() + - kImtSize * ImTableEntrySize(pointer_size)); + return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + pointer_size); } inline void Class::CheckPointerSize(size_t pointer_size) { @@ -1086,7 +1065,7 @@ inline void Class::FixupNativePointers(mirror::Class* dest, dest->SetDexCacheStrings(new_strings); } // Fix up embedded tables. - if (!IsTemp() && ShouldHaveEmbeddedImtAndVTable<kVerifyNone, kReadBarrierOption>()) { + if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) { for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) { ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size); ArtMethod* new_method = visitor(method); @@ -1094,16 +1073,9 @@ inline void Class::FixupNativePointers(mirror::Class* dest, dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size); } } - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - ArtMethod* method = GetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i, - pointer_size); - ArtMethod* new_method = visitor(method); - if (method != new_method) { - dest->SetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i, - new_method, - pointer_size); - } - } + } + if (!IsTemp() && ShouldHaveImt<kVerifyNone, kReadBarrierOption>()) { + dest->SetImt(visitor(GetImt(pointer_size)), pointer_size); } } diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index b4a23badba..9c77d3814c 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -914,13 +914,7 @@ const DexFile::TypeList* Class::GetInterfaceTypeList() { return GetDexFile().GetInterfacesList(*class_def); } -void Class::PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], - size_t pointer_size) { - for (size_t i = 0; i < kImtSize; i++) { - auto method = methods[i]; - DCHECK(method != nullptr); - SetEmbeddedImTableEntry(i, method, pointer_size); - } +void Class::PopulateEmbeddedVTable(size_t pointer_size) { PointerArray* table = GetVTableDuringLinking(); CHECK(table != nullptr) << PrettyClass(this); const size_t table_length = table->GetLength(); @@ -967,7 +961,7 @@ class ReadBarrierOnNativeRootsVisitor { class CopyClassVisitor { public: CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length, - size_t copy_bytes, ArtMethod* const (&imt)[mirror::Class::kImtSize], + size_t copy_bytes, ImTable* imt, size_t pointer_size) : self_(self), orig_(orig), new_length_(new_length), copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) { @@ -979,7 +973,8 @@ class CopyClassVisitor { Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass())); mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_); mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_); - h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_, pointer_size_); + h_new_class_obj->PopulateEmbeddedVTable(pointer_size_); + h_new_class_obj->SetImt(imt_, pointer_size_); h_new_class_obj->SetClassSize(new_length_); // Visit all of the references to make sure there is no from space references in the native // roots. @@ -992,13 +987,13 @@ class CopyClassVisitor { Handle<mirror::Class>* const orig_; const size_t new_length_; const size_t copy_bytes_; - ArtMethod* const (&imt_)[mirror::Class::kImtSize]; + ImTable* imt_; const size_t pointer_size_; DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor); }; Class* Class::CopyOf(Thread* self, int32_t new_length, - ArtMethod* const (&imt)[mirror::Class::kImtSize], size_t pointer_size) { + ImTable* imt, size_t pointer_size) { DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class))); // We may get copied by a compacting GC. StackHandleScope<1> hs(self); diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 5235a3e8df..9670accf56 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -22,6 +22,7 @@ #include "class_flags.h" #include "gc_root.h" #include "gc/allocator_type.h" +#include "imtable.h" #include "invoke_type.h" #include "modifiers.h" #include "object.h" @@ -33,10 +34,6 @@ #include "thread.h" #include "utils.h" -#ifndef IMT_SIZE -#error IMT_SIZE not defined -#endif - namespace art { class ArtField; @@ -66,11 +63,6 @@ class MANAGED Class FINAL : public Object { // 2 ref instance fields.] static constexpr uint32_t kClassWalkSuper = 0xC0000000; - // Interface method table size. Increasing this value reduces the chance of two interface methods - // colliding in the interface method table but increases the size of classes that implement - // (non-marker) interfaces. - static constexpr size_t kImtSize = IMT_SIZE; - // Class Status // // kStatusRetired: Class that's temporarily used till class linking time @@ -351,7 +343,7 @@ class MANAGED Class FINAL : public Object { // be replaced with a class with the right size for embedded imt/vtable. bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) { Status s = GetStatus(); - return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable(); + return s < Status::kStatusResolving && ShouldHaveEmbeddedVTable(); } String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name. @@ -557,7 +549,7 @@ class MANAGED Class FINAL : public Object { SHARED_REQUIRES(Locks::mutator_lock_); // Compute how many bytes would be used a class with the given elements. - static uint32_t ComputeClassSize(bool has_embedded_tables, + static uint32_t ComputeClassSize(bool has_embedded_vtable, uint32_t num_vtable_entries, uint32_t num_8bit_static_fields, uint32_t num_16bit_static_fields, @@ -830,28 +822,28 @@ class MANAGED Class FINAL : public Object { return MemberOffset(sizeof(Class)); } + static MemberOffset ImtPtrOffset(size_t pointer_size) { + return MemberOffset( + RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size)); + } + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) { + bool ShouldHaveImt() SHARED_REQUIRES(Locks::mutator_lock_) { + return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>() && + GetIfTable() != nullptr && !IsArrayClass(); + } + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + ReadBarrierOption kReadBarrierOption = kWithReadBarrier> + bool ShouldHaveEmbeddedVTable() SHARED_REQUIRES(Locks::mutator_lock_) { return IsInstantiable<kVerifyFlags, kReadBarrierOption>(); } bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_); - static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size); - static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size); - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, - ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) - SHARED_REQUIRES(Locks::mutator_lock_); - - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, - ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) - SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size) @@ -861,6 +853,10 @@ class MANAGED Class FINAL : public Object { void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_); + ImTable* GetImt(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); + + void SetImt(ImTable* imt, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); + ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); @@ -870,7 +866,7 @@ class MANAGED Class FINAL : public Object { inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); - void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size) + void PopulateEmbeddedVTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); // Given a method implemented by this class but potentially from a super class, return the @@ -1195,7 +1191,7 @@ class MANAGED Class FINAL : public Object { void AssertInitializedOrInitializingInThread(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); - Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize], + Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); @@ -1322,10 +1318,7 @@ class MANAGED Class FINAL : public Object { // Check that the pointer size matches the one in the class linker. ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size); - - static MemberOffset EmbeddedImTableOffset(size_t pointer_size); static MemberOffset EmbeddedVTableOffset(size_t pointer_size); - template <bool kVisitNativeRoots, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier, |