diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/driver/compiler_driver.cc | 21 | ||||
| -rw-r--r-- | compiler/image_writer.cc | 65 | ||||
| -rw-r--r-- | compiler/image_writer.h | 6 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 12 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 6 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/inliner.cc | 4 |
10 files changed, 141 insertions, 32 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index a4b48892fb..131be37a33 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -2522,11 +2522,28 @@ class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor true); } // Create the conflict tables. - if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) { + FillIMTAndConflictTables(klass); + return true; + } + + private: + void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { + if (!klass->ShouldHaveImt()) { + return; + } + if (visited_classes_.find(klass) != visited_classes_.end()) { + return; + } + if (klass->HasSuperClass()) { + FillIMTAndConflictTables(klass->GetSuperClass()); + } + if (!klass->IsTemp()) { Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass); } - return true; + visited_classes_.insert(klass); } + + std::set<mirror::Class*> visited_classes_; }; void CompilerDriver::InitializeClasses(jobject class_loader, diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index da10568475..063eb11718 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -1232,9 +1232,10 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } // Assign offsets for all runtime methods in the IMT since these may hold conflict tables // live. - if (as_klass->ShouldHaveEmbeddedImtAndVTable()) { - for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_); + if (as_klass->ShouldHaveImt()) { + ImTable* imt = as_klass->GetImt(target_ptr_size_); + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* imt_method = imt->Get(i, target_ptr_size_); DCHECK(imt_method != nullptr); if (imt_method->IsRuntimeMethod() && !IsInBootImage(imt_method) && @@ -1243,6 +1244,11 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } } } + + if (as_klass->ShouldHaveImt()) { + ImTable* imt = as_klass->GetImt(target_ptr_size_); + TryAssignImTableOffset(imt, oat_index); + } } else if (h_obj->IsObjectArray()) { // Walk elements of an object array. int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength(); @@ -1269,6 +1275,23 @@ bool ImageWriter::NativeRelocationAssigned(void* ptr) const { return native_object_relocations_.find(ptr) != native_object_relocations_.end(); } +void ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) { + // No offset, or already assigned. + if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) { + return; + } + // If the method is a conflict method we also want to assign the conflict table offset. + ImageInfo& image_info = GetImageInfo(oat_index); + const size_t size = ImTable::SizeInBytes(target_ptr_size_); + native_object_relocations_.emplace( + imt, + NativeObjectRelocation { + oat_index, + image_info.bin_slot_sizes_[kBinImTable], + kNativeObjectRelocationTypeIMTable}); + image_info.bin_slot_sizes_[kBinImTable] += size; +} + void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) { // No offset, or already assigned. if (table == nullptr || NativeRelocationAssigned(table)) { @@ -1391,6 +1414,7 @@ void ImageWriter::CalculateNewObjectOffsets() { bin_offset = RoundUp(bin_offset, method_alignment); break; } + case kBinImTable: case kBinIMTConflictTable: { bin_offset = RoundUp(bin_offset, target_ptr_size_); break; @@ -1461,6 +1485,10 @@ size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) c bin_slot_offsets_[kBinArtMethodClean], bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); + // IMT section. + ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables]; + *imt_section = ImageSection(bin_slot_offsets_[kBinImTable], bin_slot_sizes_[kBinImTable]); + // Conflict tables section. ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables]; *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable], @@ -1585,6 +1613,13 @@ class FixupRootVisitor : public RootVisitor { ImageWriter* const image_writer_; }; +void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) { + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* method = orig->Get(i, target_ptr_size_); + copy->Set(i, NativeLocationInImage(method), target_ptr_size_); + } +} + void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) { const size_t count = orig->NumEntries(target_ptr_size_); for (size_t i = 0; i < count; ++i) { @@ -1642,6 +1677,12 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { case kNativeObjectRelocationTypeDexCacheArray: // Nothing to copy here, everything is done in FixupDexCache(). break; + case kNativeObjectRelocationTypeIMTable: { + ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first); + ImTable* dest_imt = reinterpret_cast<ImTable*>(dest); + CopyAndFixupImTable(orig_imt, dest_imt); + break; + } case kNativeObjectRelocationTypeIMTConflictTable: { auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first); CopyAndFixupImtConflictTable( @@ -1850,13 +1891,25 @@ uintptr_t ImageWriter::NativeOffsetInImage(void* obj) { } template <typename T> +std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { + std::ostringstream oss; + oss << ptr; + return oss.str(); +} + +template <> +std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { + return PrettyMethod(method); +} + +template <typename T> T* ImageWriter::NativeLocationInImage(T* obj) { if (obj == nullptr || IsInBootImage(obj)) { return obj; } else { auto it = native_object_relocations_.find(obj); - CHECK(it != native_object_relocations_.end()) << obj << " spaces " - << Runtime::Current()->GetHeap()->DumpSpaces(); + CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj) + << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces(); const NativeObjectRelocation& relocation = it->second; ImageInfo& image_info = GetImageInfo(relocation.oat_index); return reinterpret_cast<T*>(image_info.image_begin_ + relocation.offset); @@ -2210,6 +2263,8 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat return kBinDexCacheArray; case kNativeObjectRelocationTypeRuntimeMethod: return kBinRuntimeMethod; + case kNativeObjectRelocationTypeIMTable: + return kBinImTable; case kNativeObjectRelocationTypeIMTConflictTable: return kBinIMTConflictTable; } diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 51976c511f..1efdc22c0a 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -169,6 +169,8 @@ class ImageWriter FINAL { // ArtMethods may be dirty if the class has native methods or a declaring class that isn't // initialized. kBinArtMethodDirty, + // IMT (clean) + kBinImTable, // Conflict tables (clean). kBinIMTConflictTable, // Runtime methods (always clean, do not have a length prefix array). @@ -191,6 +193,7 @@ class ImageWriter FINAL { kNativeObjectRelocationTypeArtMethodDirty, kNativeObjectRelocationTypeArtMethodArrayDirty, kNativeObjectRelocationTypeRuntimeMethod, + kNativeObjectRelocationTypeIMTable, kNativeObjectRelocationTypeIMTConflictTable, kNativeObjectRelocationTypeDexCacheArray, }; @@ -401,6 +404,7 @@ class ImageWriter FINAL { void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) SHARED_REQUIRES(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) @@ -433,6 +437,8 @@ class ImageWriter FINAL { size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + // Assign the offset for an IMT conflict table. Does nothing if the table already has a native // relocation. void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 6e74d082e0..4fc3b5434b 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1873,8 +1873,6 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); Register hidden_reg = locations->GetTemp(1).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1900,10 +1898,14 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + __ LoadFromOffset(kLoadWord, temp, temp, + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kArmPointerSize)); // temp = temp->GetImtEntryAt(method_offset); + __ LoadFromOffset(kLoadWord, temp, temp, method_offset); uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value(); - __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); __ LoadFromOffset(kLoadWord, LR, temp, entry_point); // LR(); @@ -6777,8 +6779,11 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArmPointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); + __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kArmPointerSize)); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index a37ea1e9a3..b63a3d4c1a 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -3490,8 +3490,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. LocationSummary* locations = invoke->GetLocations(); Register temp = XRegisterFrom(locations->GetTemp(0)); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); Offset class_offset = mirror::Object::ClassOffset(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); @@ -3521,6 +3519,10 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); + __ Ldr(temp, + MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kArm64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); @@ -5148,8 +5150,10 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArm64PointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); + __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), + mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kArm64PointerSize)); } __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), method_offset)); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index ed0767ed52..c8e927d026 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3701,8 +3701,6 @@ void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize); @@ -3719,6 +3717,10 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ LoadFromOffset(kLoadWord, temp, temp, + mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kMipsPointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); @@ -5162,8 +5164,12 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kMipsPointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); + __ LoadFromOffset(kLoadWord, + locations->Out().AsRegister<Register>(), + locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kMipsPointerSize)); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 8c73e350f6..8d5dc84df9 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2935,8 +2935,6 @@ void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize); @@ -2953,6 +2951,10 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ LoadFromOffset(kLoadDoubleword, temp, temp, + mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value()); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kMips64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8c643a05c8..9d0092b674 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -2012,8 +2012,6 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); XmmRegister hidden_reg = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -2040,7 +2038,12 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + // temp = temp->GetAddressOfIMT() + __ movl(temp, + Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kX86PointerSize)); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address(temp, @@ -4060,8 +4063,12 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86PointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); + __ movl(locations->InAt(0).AsRegister<Register>(), + Address(locations->InAt(0).AsRegister<Register>(), + mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); + // temp = temp->GetImtEntryAt(method_offset); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kX86PointerSize)); } __ movl(locations->Out().AsRegister<Register>(), Address(locations->InAt(0).AsRegister<Register>(), method_offset)); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 72de3e6e35..a8da5f2ea5 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -2228,8 +2228,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo LocationSummary* locations = invoke->GetLocations(); CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( - invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -2255,6 +2253,12 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); + // temp = temp->GetAddressOfIMT() + __ movq(temp, + Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); + // temp = temp->GetImtEntryAt(method_offset); + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + invoke->GetImtIndex() % ImTable::kSize, kX86_64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -3978,8 +3982,11 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86_64PointerSize).SizeValue(); } else { - method_offset = mirror::Class::EmbeddedImTableEntryOffset( - instruction->GetIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); + __ movq(locations->Out().AsRegister<CpuRegister>(), + Address(locations->InAt(0).AsRegister<CpuRegister>(), + mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); + method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex() % ImTable::kSize, kX86_64PointerSize)); } __ movq(locations->Out().AsRegister<CpuRegister>(), Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset)); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 59de895182..27b6896150 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -656,8 +656,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } ArtMethod* new_method = nullptr; if (invoke_instruction->IsInvokeInterface()) { - new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry( - method_index % mirror::Class::kImtSize, pointer_size); + new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get( + method_index % ImTable::kSize, pointer_size); if (new_method->IsRuntimeMethod()) { // Bail out as soon as we see a conflict trampoline in one of the target's // interface table. |