diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/driver/compiler_driver.cc | 22 | ||||
| -rw-r--r-- | compiler/image_writer.cc | 65 | ||||
| -rw-r--r-- | compiler/image_writer.h | 6 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 12 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 6 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/inliner.cc | 4 | ||||
| -rw-r--r-- | compiler/optimizing/instruction_builder.cc | 3 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 4 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_cfi_test_expected.inc | 22 | ||||
| -rw-r--r-- | compiler/optimizing/prepare_for_register_allocation.cc | 9 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 16 | ||||
| -rw-r--r-- | compiler/utils/assembler_test.h | 11 | ||||
| -rw-r--r-- | compiler/utils/mips/assembler_mips_test.cc | 20 |
17 files changed, 100 insertions, 159 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 131be37a33..e52dda35bb 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -47,7 +47,6 @@ #include "driver/compiler_options.h" #include "jni_internal.h" #include "object_lock.h" -#include "profiler.h" #include "runtime.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/heap_bitmap.h" @@ -2522,28 +2521,11 @@ class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor true); } // Create the conflict tables. - FillIMTAndConflictTables(klass); - return true; - } - - private: - void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { - if (!klass->ShouldHaveImt()) { - return; - } - if (visited_classes_.find(klass) != visited_classes_.end()) { - return; - } - if (klass->HasSuperClass()) { - FillIMTAndConflictTables(klass->GetSuperClass()); - } - if (!klass->IsTemp()) { + if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) { Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass); } - visited_classes_.insert(klass); + return true; } - - std::set<mirror::Class*> visited_classes_; }; void CompilerDriver::InitializeClasses(jobject class_loader, diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 063eb11718..da10568475 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -1232,10 +1232,9 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } // Assign offsets for all runtime methods in the IMT since these may hold conflict tables // live. - if (as_klass->ShouldHaveImt()) { - ImTable* imt = as_klass->GetImt(target_ptr_size_); - for (size_t i = 0; i < ImTable::kSize; ++i) { - ArtMethod* imt_method = imt->Get(i, target_ptr_size_); + if (as_klass->ShouldHaveEmbeddedImtAndVTable()) { + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_); DCHECK(imt_method != nullptr); if (imt_method->IsRuntimeMethod() && !IsInBootImage(imt_method) && @@ -1244,11 +1243,6 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } } } - - if (as_klass->ShouldHaveImt()) { - ImTable* imt = as_klass->GetImt(target_ptr_size_); - TryAssignImTableOffset(imt, oat_index); - } } else if (h_obj->IsObjectArray()) { // Walk elements of an object array. int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength(); @@ -1275,23 +1269,6 @@ bool ImageWriter::NativeRelocationAssigned(void* ptr) const { return native_object_relocations_.find(ptr) != native_object_relocations_.end(); } -void ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) { - // No offset, or already assigned. - if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) { - return; - } - // If the method is a conflict method we also want to assign the conflict table offset. - ImageInfo& image_info = GetImageInfo(oat_index); - const size_t size = ImTable::SizeInBytes(target_ptr_size_); - native_object_relocations_.emplace( - imt, - NativeObjectRelocation { - oat_index, - image_info.bin_slot_sizes_[kBinImTable], - kNativeObjectRelocationTypeIMTable}); - image_info.bin_slot_sizes_[kBinImTable] += size; -} - void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) { // No offset, or already assigned. if (table == nullptr || NativeRelocationAssigned(table)) { @@ -1414,7 +1391,6 @@ void ImageWriter::CalculateNewObjectOffsets() { bin_offset = RoundUp(bin_offset, method_alignment); break; } - case kBinImTable: case kBinIMTConflictTable: { bin_offset = RoundUp(bin_offset, target_ptr_size_); break; @@ -1485,10 +1461,6 @@ size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) c bin_slot_offsets_[kBinArtMethodClean], bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); - // IMT section. - ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables]; - *imt_section = ImageSection(bin_slot_offsets_[kBinImTable], bin_slot_sizes_[kBinImTable]); - // Conflict tables section. ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables]; *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable], @@ -1613,13 +1585,6 @@ class FixupRootVisitor : public RootVisitor { ImageWriter* const image_writer_; }; -void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) { - for (size_t i = 0; i < ImTable::kSize; ++i) { - ArtMethod* method = orig->Get(i, target_ptr_size_); - copy->Set(i, NativeLocationInImage(method), target_ptr_size_); - } -} - void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) { const size_t count = orig->NumEntries(target_ptr_size_); for (size_t i = 0; i < count; ++i) { @@ -1677,12 +1642,6 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { case kNativeObjectRelocationTypeDexCacheArray: // Nothing to copy here, everything is done in FixupDexCache(). break; - case kNativeObjectRelocationTypeIMTable: { - ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first); - ImTable* dest_imt = reinterpret_cast<ImTable*>(dest); - CopyAndFixupImTable(orig_imt, dest_imt); - break; - } case kNativeObjectRelocationTypeIMTConflictTable: { auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first); CopyAndFixupImtConflictTable( @@ -1891,25 +1850,13 @@ uintptr_t ImageWriter::NativeOffsetInImage(void* obj) { } template <typename T> -std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { - std::ostringstream oss; - oss << ptr; - return oss.str(); -} - -template <> -std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { - return PrettyMethod(method); -} - -template <typename T> T* ImageWriter::NativeLocationInImage(T* obj) { if (obj == nullptr || IsInBootImage(obj)) { return obj; } else { auto it = native_object_relocations_.find(obj); - CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj) - << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces(); + CHECK(it != native_object_relocations_.end()) << obj << " spaces " + << Runtime::Current()->GetHeap()->DumpSpaces(); const NativeObjectRelocation& relocation = it->second; ImageInfo& image_info = GetImageInfo(relocation.oat_index); return reinterpret_cast<T*>(image_info.image_begin_ + relocation.offset); @@ -2263,8 +2210,6 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat return kBinDexCacheArray; case kNativeObjectRelocationTypeRuntimeMethod: return kBinRuntimeMethod; - case kNativeObjectRelocationTypeIMTable: - return kBinImTable; case kNativeObjectRelocationTypeIMTConflictTable: return kBinIMTConflictTable; } diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 1efdc22c0a..51976c511f 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -169,8 +169,6 @@ class ImageWriter FINAL { // ArtMethods may be dirty if the class has native methods or a declaring class that isn't // initialized. kBinArtMethodDirty, - // IMT (clean) - kBinImTable, // Conflict tables (clean). kBinIMTConflictTable, // Runtime methods (always clean, do not have a length prefix array). @@ -193,7 +191,6 @@ class ImageWriter FINAL { kNativeObjectRelocationTypeArtMethodDirty, kNativeObjectRelocationTypeArtMethodArrayDirty, kNativeObjectRelocationTypeRuntimeMethod, - kNativeObjectRelocationTypeIMTable, kNativeObjectRelocationTypeIMTConflictTable, kNativeObjectRelocationTypeDexCacheArray, }; @@ -404,7 +401,6 @@ class ImageWriter FINAL { void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) SHARED_REQUIRES(Locks::mutator_lock_); - void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) SHARED_REQUIRES(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) @@ -437,8 +433,6 @@ class ImageWriter FINAL { size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); - void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); - // Assign the offset for an IMT conflict table. Does nothing if the table already has a native // relocation. void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 83b4705302..5316d59bff 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1889,6 +1889,8 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); Register hidden_reg = locations->GetTemp(1).AsRegister<Register>(); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1914,14 +1916,10 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); - __ LoadFromOffset(kLoadWord, temp, temp, - mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); - uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex(), kArmPointerSize)); // temp = temp->GetImtEntryAt(method_offset); - __ LoadFromOffset(kLoadWord, temp, temp, method_offset); uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value(); + __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); __ LoadFromOffset(kLoadWord, LR, temp, entry_point); // LR(); @@ -6961,11 +6959,8 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArmPointerSize).SizeValue(); } else { - __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), - locations->InAt(0).AsRegister<Register>(), - mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); - method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex(), kArmPointerSize)); + method_offset = mirror::Class::EmbeddedImTableEntryOffset( + instruction->GetIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 07d5e50c6b..fc2c2c34aa 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -3506,6 +3506,8 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. LocationSummary* locations = invoke->GetLocations(); Register temp = XRegisterFrom(locations->GetTemp(0)); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); Offset class_offset = mirror::Object::ClassOffset(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); @@ -3535,10 +3537,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); - __ Ldr(temp, - MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); - uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex(), kArm64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); @@ -5355,10 +5353,8 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kArm64PointerSize).SizeValue(); } else { - __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), - mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); - method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex(), kArm64PointerSize)); + method_offset = mirror::Class::EmbeddedImTableEntryOffset( + instruction->GetIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); } __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), method_offset)); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index b9b3463f4d..4d44c18dcf 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3698,6 +3698,8 @@ void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize); @@ -3714,10 +3716,6 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); - __ LoadFromOffset(kLoadWord, temp, temp, - mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); - uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex(), kMipsPointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); @@ -5167,12 +5165,8 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kMipsPointerSize).SizeValue(); } else { - __ LoadFromOffset(kLoadWord, - locations->Out().AsRegister<Register>(), - locations->InAt(0).AsRegister<Register>(), - mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); - method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex(), kMipsPointerSize)); + method_offset = mirror::Class::EmbeddedImTableEntryOffset( + instruction->GetIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value(); } __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(), diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 2330c4803e..2e78884daf 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2932,6 +2932,8 @@ void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>(); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize); @@ -2948,10 +2950,6 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); - __ LoadFromOffset(kLoadDoubleword, temp, temp, - mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value()); - uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex(), kMips64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index af10bad6cc..1261619536 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -2027,6 +2027,8 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) LocationSummary* locations = invoke->GetLocations(); Register temp = locations->GetTemp(0).AsRegister<Register>(); XmmRegister hidden_reg = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -2053,12 +2055,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); - // temp = temp->GetAddressOfIMT() - __ movl(temp, - Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); - uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex(), kX86PointerSize)); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address(temp, @@ -4078,12 +4075,8 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86PointerSize).SizeValue(); } else { - __ movl(locations->InAt(0).AsRegister<Register>(), - Address(locations->InAt(0).AsRegister<Register>(), - mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value())); - // temp = temp->GetImtEntryAt(method_offset); - method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex(), kX86PointerSize)); + method_offset = mirror::Class::EmbeddedImTableEntryOffset( + instruction->GetIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); } __ movl(locations->Out().AsRegister<Register>(), Address(locations->InAt(0).AsRegister<Register>(), method_offset)); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 2b21454101..5e30203b38 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -2257,6 +2257,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo LocationSummary* locations = invoke->GetLocations(); CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>(); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -2282,12 +2284,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); - // temp = temp->GetAddressOfIMT() - __ movq(temp, - Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); - // temp = temp->GetImtEntryAt(method_offset); - uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - invoke->GetImtIndex(), kX86_64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -4011,11 +4007,8 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct method_offset = mirror::Class::EmbeddedVTableEntryOffset( instruction->GetIndex(), kX86_64PointerSize).SizeValue(); } else { - __ movq(locations->Out().AsRegister<CpuRegister>(), - Address(locations->InAt(0).AsRegister<CpuRegister>(), - mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); - method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( - instruction->GetIndex(), kX86_64PointerSize)); + method_offset = mirror::Class::EmbeddedImTableEntryOffset( + instruction->GetIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); } __ movq(locations->Out().AsRegister<CpuRegister>(), Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset)); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index be4ea200a9..c67b2d5fe9 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -656,8 +656,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } ArtMethod* new_method = nullptr; if (invoke_instruction->IsInvokeInterface()) { - new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get( - method_index, pointer_size); + new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry( + method_index % mirror::Class::kImtSize, pointer_size); if (new_method->IsRuntimeMethod()) { // Bail out as soon as we see a conflict trampoline in one of the target's // interface table. diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index afac5f9cf1..b4125299ea 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -16,7 +16,6 @@ #include "instruction_builder.h" -#include "art_method-inl.h" #include "bytecode_utils.h" #include "class_linker.h" #include "driver/compiler_options.h" @@ -891,7 +890,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, return_type, dex_pc, method_idx, - resolved_method->GetImtIndex()); + resolved_method->GetDexMethodIndex()); } return HandleInvoke(invoke, diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 29df7c8ab8..6b2c33e668 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -5029,7 +5029,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> { } bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { - return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize; + return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value()); } size_t ComputeHashCode() const OVERRIDE { @@ -5076,7 +5076,7 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { } bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { - return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize; + return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value()); } const FieldInfo& GetFieldInfo() const { return field_info_; } diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc index 764160adce..05eb06333e 100644 --- a/compiler/optimizing/optimizing_cfi_test_expected.inc +++ b/compiler/optimizing/optimizing_cfi_test_expected.inc @@ -32,21 +32,21 @@ static constexpr uint8_t expected_cfi_kThumb2[] = { // 0x00000012: .cfi_def_cfa_offset: 64 static constexpr uint8_t expected_asm_kArm64[] = { - 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0xD7, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9, - 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0xD7, 0x42, 0xA9, - 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6, + 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9, + 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0x17, 0x40, 0xF9, + 0xF5, 0x7B, 0x43, 0xA9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6, }; static constexpr uint8_t expected_cfi_kArm64[] = { - 0x44, 0x0E, 0x40, 0x44, 0x94, 0x06, 0x95, 0x04, 0x44, 0x9E, 0x02, 0x44, + 0x44, 0x0E, 0x40, 0x44, 0x94, 0x06, 0x44, 0x95, 0x04, 0x9E, 0x02, 0x44, 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49, - 0x44, 0xD4, 0xD5, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40, + 0x44, 0xD4, 0x44, 0xD5, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40, }; // 0x00000000: str x0, [sp, #-64]! // 0x00000004: .cfi_def_cfa_offset: 64 -// 0x00000004: stp x20, x21, [sp, #40] +// 0x00000004: str x20, [sp, #40] // 0x00000008: .cfi_offset: r20 at cfa-24 -// 0x00000008: .cfi_offset: r21 at cfa-16 -// 0x00000008: str lr, [sp, #56] +// 0x00000008: stp x21, lr, [sp, #48] +// 0x0000000c: .cfi_offset: r21 at cfa-16 // 0x0000000c: .cfi_offset: r30 at cfa-8 // 0x0000000c: stp d8, d9, [sp, #24] // 0x00000010: .cfi_offset_extended: r72 at cfa-40 @@ -55,10 +55,10 @@ static constexpr uint8_t expected_cfi_kArm64[] = { // 0x00000010: ldp d8, d9, [sp, #24] // 0x00000014: .cfi_restore_extended: r72 // 0x00000014: .cfi_restore_extended: r73 -// 0x00000014: ldp x20, x21, [sp, #40] +// 0x00000014: ldr x20, [sp, #40] // 0x00000018: .cfi_restore: r20 -// 0x00000018: .cfi_restore: r21 -// 0x00000018: ldr lr, [sp, #56] +// 0x00000018: ldp x21, lr, [sp, #48] +// 0x0000001c: .cfi_restore: r21 // 0x0000001c: .cfi_restore: r30 // 0x0000001c: add sp, sp, #0x40 (64) // 0x00000020: .cfi_def_cfa_offset: 0 diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index 696b8c6859..8fb539661f 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -146,7 +146,11 @@ void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) { instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex()), 0); // The allocation entry point that deals with access checks does not work with inlined // methods, so we need to check whether this allocation comes from an inlined method. - if (has_only_one_use && !instruction->GetEnvironment()->IsFromInlinedInvoke()) { + // We also need to make the same check as for moving clinit check, whether the HLoadClass + // has the clinit check responsibility or not (HLoadClass can throw anyway). + if (has_only_one_use && + !instruction->GetEnvironment()->IsFromInlinedInvoke() && + CanMoveClinitCheck(load_class, instruction)) { // We can remove the load class from the graph. If it needed access checks, we delegate // the access check to the allocation. if (load_class->NeedsAccessCheck()) { @@ -203,7 +207,8 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input, HInstruction* user) const { // Determine if input and user come from the same dex instruction, so that we can move // the clinit check responsibility from one to the other, i.e. from HClinitCheck (user) - // to HLoadClass (input), or from HClinitCheck (input) to HInvokeStaticOrDirect (user). + // to HLoadClass (input), or from HClinitCheck (input) to HInvokeStaticOrDirect (user), + // or from HLoadClass (input) to HNewInstance (user). // Start with a quick dex pc check. if (user->GetDexPc() != input->GetDexPc()) { diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 1842f00ff6..54ed62bef3 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -648,6 +648,15 @@ static inline dwarf::Reg DWARFReg(CPURegister reg) { void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) { int size = registers.RegisterSizeInBytes(); const Register sp = vixl_masm_->StackPointer(); + // Since we are operating on register pairs, we would like to align on + // double the standard size; on the other hand, we don't want to insert + // an extra store, which will happen if the number of registers is even. + if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) { + const CPURegister& dst0 = registers.PopLowestIndex(); + ___ Str(dst0, MemOperand(sp, offset)); + cfi_.RelOffset(DWARFReg(dst0), offset); + offset += size; + } while (registers.Count() >= 2) { const CPURegister& dst0 = registers.PopLowestIndex(); const CPURegister& dst1 = registers.PopLowestIndex(); @@ -667,6 +676,13 @@ void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) { void Arm64Assembler::UnspillRegisters(vixl::CPURegList registers, int offset) { int size = registers.RegisterSizeInBytes(); const Register sp = vixl_masm_->StackPointer(); + // Be consistent with the logic for spilling registers. + if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) { + const CPURegister& dst0 = registers.PopLowestIndex(); + ___ Ldr(dst0, MemOperand(sp, offset)); + cfi_.Restore(DWARFReg(dst0)); + offset += size; + } while (registers.Count() >= 2) { const CPURegister& dst0 = registers.PopLowestIndex(); const CPURegister& dst1 = registers.PopLowestIndex(); diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h index afe0576906..92b4c8e041 100644 --- a/compiler/utils/assembler_test.h +++ b/compiler/utils/assembler_test.h @@ -344,6 +344,17 @@ class AssemblerTest : public testing::Test { } template <typename ImmType> + std::string RepeatFFIb(void (Ass::*f)(FPReg, FPReg, ImmType), int imm_bits, std::string fmt) { + return RepeatTemplatedRegistersImmBits<FPReg, FPReg, ImmType>(f, + imm_bits, + GetFPRegisters(), + GetFPRegisters(), + &AssemblerTest::GetFPRegName, + &AssemblerTest::GetFPRegName, + fmt); + } + + template <typename ImmType> std::string RepeatIbFF(void (Ass::*f)(ImmType, FPReg, FPReg), int imm_bits, std::string fmt) { return RepeatTemplatedImmBitsRegisters<ImmType, FPReg, FPReg>(f, GetFPRegisters(), diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc index c722d0c333..a1d6ad6a2f 100644 --- a/compiler/utils/mips/assembler_mips_test.cc +++ b/compiler/utils/mips/assembler_mips_test.cc @@ -647,6 +647,26 @@ TEST_F(AssemblerMIPSTest, Movt) { DriverStr(RepeatRRIb(&mips::MipsAssembler::Movt, 3, "movt ${reg1}, ${reg2}, $fcc{imm}"), "Movt"); } +TEST_F(AssemblerMIPSTest, MovfS) { + DriverStr(RepeatFFIb(&mips::MipsAssembler::MovfS, 3, "movf.s ${reg1}, ${reg2}, $fcc{imm}"), + "MovfS"); +} + +TEST_F(AssemblerMIPSTest, MovfD) { + DriverStr(RepeatFFIb(&mips::MipsAssembler::MovfD, 3, "movf.d ${reg1}, ${reg2}, $fcc{imm}"), + "MovfD"); +} + +TEST_F(AssemblerMIPSTest, MovtS) { + DriverStr(RepeatFFIb(&mips::MipsAssembler::MovtS, 3, "movt.s ${reg1}, ${reg2}, $fcc{imm}"), + "MovtS"); +} + +TEST_F(AssemblerMIPSTest, MovtD) { + DriverStr(RepeatFFIb(&mips::MipsAssembler::MovtD, 3, "movt.d ${reg1}, ${reg2}, $fcc{imm}"), + "MovtD"); +} + TEST_F(AssemblerMIPSTest, CvtSW) { DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "CvtSW"); } |