summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk4
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--compiler/driver/compiler_driver.cc21
-rw-r--r--compiler/image_writer.cc65
-rw-r--r--compiler/image_writer.h6
-rw-r--r--compiler/optimizing/code_generator_arm.cc15
-rw-r--r--compiler/optimizing/code_generator_arm64.cc140
-rw-r--r--compiler/optimizing/code_generator_mips.cc14
-rw-r--r--compiler/optimizing/code_generator_mips64.cc6
-rw-r--r--compiler/optimizing/code_generator_x86.cc15
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/inliner.cc4
-rw-r--r--compiler/optimizing/instruction_builder.cc3
-rw-r--r--compiler/optimizing/intrinsics_arm.cc47
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc51
-rw-r--r--compiler/optimizing/nodes.cc2
-rw-r--r--compiler/optimizing/reference_type_propagation.cc4
-rw-r--r--compiler/utils/assembler_test.h7
-rw-r--r--compiler/utils/mips/assembler_mips.cc207
-rw-r--r--compiler/utils/mips/assembler_mips.h5
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc644
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc1513
-rw-r--r--patchoat/patchoat.cc12
-rw-r--r--patchoat/patchoat.h1
-rw-r--r--runtime/arch/arm/asm_support_arm.S25
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.cc1
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S116
-rw-r--r--runtime/art_field-inl.h3
-rw-r--r--runtime/art_field.h1
-rw-r--r--runtime/art_method-inl.h23
-rw-r--r--runtime/art_method.h21
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/class_linker-inl.h28
-rw-r--r--runtime/class_linker.cc186
-rw-r--r--runtime/class_linker.h24
-rw-r--r--runtime/class_linker_test.cc3
-rw-r--r--runtime/common_runtime_test.cc20
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h9
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc16
-rw-r--r--runtime/entrypoints_order_test.cc8
-rw-r--r--runtime/gc/collector/concurrent_copying.cc68
-rw-r--r--runtime/gc/collector/concurrent_copying.h30
-rw-r--r--runtime/gc/collector/mark_compact.cc92
-rw-r--r--runtime/gc/collector/mark_compact.h11
-rw-r--r--runtime/gc/collector/mark_sweep.cc63
-rw-r--r--runtime/gc/collector/mark_sweep.h26
-rw-r--r--runtime/gc/collector/semi_space-inl.h23
-rw-r--r--runtime/gc/collector/semi_space.cc58
-rw-r--r--runtime/gc/collector/semi_space.h4
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/space/image_space.cc183
-rw-r--r--runtime/image-inl.h19
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/image.h6
-rw-r--r--runtime/imtable.h77
-rw-r--r--runtime/interpreter/interpreter_common.h2
-rw-r--r--runtime/java_vm_ext.cc31
-rw-r--r--runtime/mem_map.cc7
-rw-r--r--runtime/mirror/class-inl.h87
-rw-r--r--runtime/mirror/class.cc17
-rw-r--r--runtime/mirror/class.h54
-rw-r--r--runtime/mirror/dex_cache-inl.h1
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/read_barrier.h5
-rw-r--r--runtime/thread.cc101
-rw-r--r--runtime/thread.h12
-rw-r--r--runtime/thread_list.cc40
-rw-r--r--runtime/thread_list.h4
-rw-r--r--test/033-class-init-deadlock/expected.txt2
-rw-r--r--test/033-class-init-deadlock/src/Main.java16
-rw-r--r--test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc14
-rwxr-xr-xtest/149-suspend-all-stress/check18
-rw-r--r--test/149-suspend-all-stress/expected.txt1
-rw-r--r--test/149-suspend-all-stress/suspend_all.cc41
-rw-r--r--test/566-polymorphic-inlining/polymorphic_inline.cc27
-rw-r--r--test/566-polymorphic-inlining/src/Main.java12
-rw-r--r--test/570-checker-select/src/Main.java147
-rw-r--r--test/606-erroneous-class/expected.txt0
-rw-r--r--test/606-erroneous-class/info.txt3
-rw-r--r--test/606-erroneous-class/smali-multidex/ClassA.smali27
-rw-r--r--test/606-erroneous-class/smali/ClassB.smali18
-rw-r--r--test/606-erroneous-class/smali/ErrClass.smali26
-rw-r--r--test/606-erroneous-class/src/Main.java21
-rw-r--r--test/Android.libarttest.mk6
-rw-r--r--test/valgrind-target-suppressions.txt21
-rwxr-xr-xtools/buildbot-build.sh2
86 files changed, 3419 insertions, 1298 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index fc4dd55d67..123bcaa3bd 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -296,8 +296,8 @@ art_asflags :=
ifdef ART_IMT_SIZE
art_cflags += -DIMT_SIZE=$(ART_IMT_SIZE)
else
- # Default is 64
- art_cflags += -DIMT_SIZE=64
+ # Default is 43
+ art_cflags += -DIMT_SIZE=43
endif
ifeq ($(ART_HEAP_POISONING),true)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index a14265e30d..1afbdfcb59 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -365,6 +365,7 @@ COMPILER_GTEST_HOST_SRC_FILES_arm64 := \
COMPILER_GTEST_HOST_SRC_FILES_mips := \
$(COMPILER_GTEST_COMMON_SRC_FILES_mips) \
compiler/utils/mips/assembler_mips_test.cc \
+ compiler/utils/mips/assembler_mips32r6_test.cc \
COMPILER_GTEST_HOST_SRC_FILES_mips64 := \
$(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a4b48892fb..131be37a33 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2522,11 +2522,28 @@ class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor
true);
}
// Create the conflict tables.
- if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) {
+ FillIMTAndConflictTables(klass);
+ return true;
+ }
+
+ private:
+ void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!klass->ShouldHaveImt()) {
+ return;
+ }
+ if (visited_classes_.find(klass) != visited_classes_.end()) {
+ return;
+ }
+ if (klass->HasSuperClass()) {
+ FillIMTAndConflictTables(klass->GetSuperClass());
+ }
+ if (!klass->IsTemp()) {
Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass);
}
- return true;
+ visited_classes_.insert(klass);
}
+
+ std::set<mirror::Class*> visited_classes_;
};
void CompilerDriver::InitializeClasses(jobject class_loader,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index da10568475..063eb11718 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1232,9 +1232,10 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
// Assign offsets for all runtime methods in the IMT since these may hold conflict tables
// live.
- if (as_klass->ShouldHaveEmbeddedImtAndVTable()) {
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_);
+ if (as_klass->ShouldHaveImt()) {
+ ImTable* imt = as_klass->GetImt(target_ptr_size_);
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ ArtMethod* imt_method = imt->Get(i, target_ptr_size_);
DCHECK(imt_method != nullptr);
if (imt_method->IsRuntimeMethod() &&
!IsInBootImage(imt_method) &&
@@ -1243,6 +1244,11 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
}
}
+
+ if (as_klass->ShouldHaveImt()) {
+ ImTable* imt = as_klass->GetImt(target_ptr_size_);
+ TryAssignImTableOffset(imt, oat_index);
+ }
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
@@ -1269,6 +1275,23 @@ bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
return native_object_relocations_.find(ptr) != native_object_relocations_.end();
}
+void ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) {
+ // No offset, or already assigned.
+ if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) {
+ return;
+ }
+ // If the method is a conflict method we also want to assign the conflict table offset.
+ ImageInfo& image_info = GetImageInfo(oat_index);
+ const size_t size = ImTable::SizeInBytes(target_ptr_size_);
+ native_object_relocations_.emplace(
+ imt,
+ NativeObjectRelocation {
+ oat_index,
+ image_info.bin_slot_sizes_[kBinImTable],
+ kNativeObjectRelocationTypeIMTable});
+ image_info.bin_slot_sizes_[kBinImTable] += size;
+}
+
void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) {
// No offset, or already assigned.
if (table == nullptr || NativeRelocationAssigned(table)) {
@@ -1391,6 +1414,7 @@ void ImageWriter::CalculateNewObjectOffsets() {
bin_offset = RoundUp(bin_offset, method_alignment);
break;
}
+ case kBinImTable:
case kBinIMTConflictTable: {
bin_offset = RoundUp(bin_offset, target_ptr_size_);
break;
@@ -1461,6 +1485,10 @@ size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) c
bin_slot_offsets_[kBinArtMethodClean],
bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]);
+ // IMT section.
+ ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables];
+ *imt_section = ImageSection(bin_slot_offsets_[kBinImTable], bin_slot_sizes_[kBinImTable]);
+
// Conflict tables section.
ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
*imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable],
@@ -1585,6 +1613,13 @@ class FixupRootVisitor : public RootVisitor {
ImageWriter* const image_writer_;
};
+void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) {
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ ArtMethod* method = orig->Get(i, target_ptr_size_);
+ copy->Set(i, NativeLocationInImage(method), target_ptr_size_);
+ }
+}
+
void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
const size_t count = orig->NumEntries(target_ptr_size_);
for (size_t i = 0; i < count; ++i) {
@@ -1642,6 +1677,12 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
case kNativeObjectRelocationTypeDexCacheArray:
// Nothing to copy here, everything is done in FixupDexCache().
break;
+ case kNativeObjectRelocationTypeIMTable: {
+ ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first);
+ ImTable* dest_imt = reinterpret_cast<ImTable*>(dest);
+ CopyAndFixupImTable(orig_imt, dest_imt);
+ break;
+ }
case kNativeObjectRelocationTypeIMTConflictTable: {
auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
CopyAndFixupImtConflictTable(
@@ -1850,13 +1891,25 @@ uintptr_t ImageWriter::NativeOffsetInImage(void* obj) {
}
template <typename T>
+std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::ostringstream oss;
+ oss << ptr;
+ return oss.str();
+}
+
+template <>
+std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+ return PrettyMethod(method);
+}
+
+template <typename T>
T* ImageWriter::NativeLocationInImage(T* obj) {
if (obj == nullptr || IsInBootImage(obj)) {
return obj;
} else {
auto it = native_object_relocations_.find(obj);
- CHECK(it != native_object_relocations_.end()) << obj << " spaces "
- << Runtime::Current()->GetHeap()->DumpSpaces();
+ CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj)
+ << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces();
const NativeObjectRelocation& relocation = it->second;
ImageInfo& image_info = GetImageInfo(relocation.oat_index);
return reinterpret_cast<T*>(image_info.image_begin_ + relocation.offset);
@@ -2210,6 +2263,8 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat
return kBinDexCacheArray;
case kNativeObjectRelocationTypeRuntimeMethod:
return kBinRuntimeMethod;
+ case kNativeObjectRelocationTypeIMTable:
+ return kBinImTable;
case kNativeObjectRelocationTypeIMTConflictTable:
return kBinIMTConflictTable;
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 51976c511f..1efdc22c0a 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -169,6 +169,8 @@ class ImageWriter FINAL {
// ArtMethods may be dirty if the class has native methods or a declaring class that isn't
// initialized.
kBinArtMethodDirty,
+ // IMT (clean)
+ kBinImTable,
// Conflict tables (clean).
kBinIMTConflictTable,
// Runtime methods (always clean, do not have a length prefix array).
@@ -191,6 +193,7 @@ class ImageWriter FINAL {
kNativeObjectRelocationTypeArtMethodDirty,
kNativeObjectRelocationTypeArtMethodArrayDirty,
kNativeObjectRelocationTypeRuntimeMethod,
+ kNativeObjectRelocationTypeIMTable,
kNativeObjectRelocationTypeIMTConflictTable,
kNativeObjectRelocationTypeDexCacheArray,
};
@@ -401,6 +404,7 @@ class ImageWriter FINAL {
void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
@@ -433,6 +437,8 @@ class ImageWriter FINAL {
size_t oat_index)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
+
// Assign the offset for an IMT conflict table. Does nothing if the table already has a native
// relocation.
void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6e74d082e0..eca9e2c299 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1873,8 +1873,6 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
LocationSummary* locations = invoke->GetLocations();
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register hidden_reg = locations->GetTemp(1).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1900,10 +1898,14 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
+ __ LoadFromOffset(kLoadWord, temp, temp,
+ mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
+ uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ invoke->GetImtIndex(), kArmPointerSize));
// temp = temp->GetImtEntryAt(method_offset);
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
uint32_t entry_point =
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value();
- __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
@@ -6777,8 +6779,11 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kArmPointerSize).SizeValue();
} else {
- method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- instruction->GetIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
+ __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(),
+ locations->InAt(0).AsRegister<Register>(),
+ mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
+ method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ instruction->GetIndex(), kArmPointerSize));
}
__ LoadFromOffset(kLoadWord,
locations->Out().AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5560ae2c74..5d3c8c5590 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2951,75 +2951,48 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
-enum SelectVariant {
- kCsel,
- kCselFalseConst,
- kCselTrueConst,
- kFcsel,
-};
-
static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) {
return condition->IsCondition() &&
Primitive::IsFloatingPointType(condition->InputAt(0)->GetType());
}
-static inline bool IsRecognizedCselConstant(HInstruction* constant) {
- if (constant->IsConstant()) {
- int64_t value = Int64FromConstant(constant->AsConstant());
- if ((value == -1) || (value == 0) || (value == 1)) {
- return true;
- }
- }
- return false;
-}
-
-static inline SelectVariant GetSelectVariant(HSelect* select) {
- if (Primitive::IsFloatingPointType(select->GetType())) {
- return kFcsel;
- } else if (IsRecognizedCselConstant(select->GetFalseValue())) {
- return kCselFalseConst;
- } else if (IsRecognizedCselConstant(select->GetTrueValue())) {
- return kCselTrueConst;
- } else {
- return kCsel;
- }
-}
-
-static inline bool HasSwappedInputs(SelectVariant variant) {
- return variant == kCselTrueConst;
-}
-
-static inline Condition GetConditionForSelect(HCondition* condition, SelectVariant variant) {
- IfCondition cond = HasSwappedInputs(variant) ? condition->GetOppositeCondition()
- : condition->GetCondition();
+static inline Condition GetConditionForSelect(HCondition* condition) {
+ IfCondition cond = condition->AsCondition()->GetCondition();
return IsConditionOnFloatingPointValues(condition) ? ARM64FPCondition(cond, condition->IsGtBias())
: ARM64Condition(cond);
}
void LocationsBuilderARM64::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
- switch (GetSelectVariant(select)) {
- case kCsel:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
- break;
- case kCselFalseConst:
- locations->SetInAt(0, Location::ConstantLocation(select->InputAt(0)->AsConstant()));
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
- break;
- case kCselTrueConst:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::ConstantLocation(select->InputAt(1)->AsConstant()));
- locations->SetOut(Location::RequiresRegister());
- break;
- case kFcsel:
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister());
- break;
+ if (Primitive::IsFloatingPointType(select->GetType())) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ HConstant* cst_true_value = select->GetTrueValue()->AsConstant();
+ HConstant* cst_false_value = select->GetFalseValue()->AsConstant();
+ bool is_true_value_constant = cst_true_value != nullptr;
+ bool is_false_value_constant = cst_false_value != nullptr;
+ // Ask VIXL whether we should synthesize constants in registers.
+ // We give an arbitrary register to VIXL when dealing with non-constant inputs.
+ Operand true_op = is_true_value_constant ?
+ Operand(Int64FromConstant(cst_true_value)) : Operand(x1);
+ Operand false_op = is_false_value_constant ?
+ Operand(Int64FromConstant(cst_false_value)) : Operand(x2);
+ bool true_value_in_register = false;
+ bool false_value_in_register = false;
+ MacroAssembler::GetCselSynthesisInformation(
+ x0, true_op, false_op, &true_value_in_register, &false_value_in_register);
+ true_value_in_register |= !is_true_value_constant;
+ false_value_in_register |= !is_false_value_constant;
+
+ locations->SetInAt(1, true_value_in_register ? Location::RequiresRegister()
+ : Location::ConstantLocation(cst_true_value));
+ locations->SetInAt(0, false_value_in_register ? Location::RequiresRegister()
+ : Location::ConstantLocation(cst_false_value));
+ locations->SetOut(Location::RequiresRegister());
}
+
if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
locations->SetInAt(2, Location::RequiresRegister());
}
@@ -3027,45 +3000,34 @@ void LocationsBuilderARM64::VisitSelect(HSelect* select) {
void InstructionCodeGeneratorARM64::VisitSelect(HSelect* select) {
HInstruction* cond = select->GetCondition();
- SelectVariant variant = GetSelectVariant(select);
Condition csel_cond;
if (IsBooleanValueOrMaterializedCondition(cond)) {
if (cond->IsCondition() && cond->GetNext() == select) {
- // Condition codes set from previous instruction.
- csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
+ // Use the condition flags set by the previous instruction.
+ csel_cond = GetConditionForSelect(cond->AsCondition());
} else {
__ Cmp(InputRegisterAt(select, 2), 0);
- csel_cond = HasSwappedInputs(variant) ? eq : ne;
+ csel_cond = ne;
}
} else if (IsConditionOnFloatingPointValues(cond)) {
GenerateFcmp(cond);
- csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
+ csel_cond = GetConditionForSelect(cond->AsCondition());
} else {
__ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1));
- csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
+ csel_cond = GetConditionForSelect(cond->AsCondition());
}
- switch (variant) {
- case kCsel:
- case kCselFalseConst:
- __ Csel(OutputRegister(select),
- InputRegisterAt(select, 1),
- InputOperandAt(select, 0),
- csel_cond);
- break;
- case kCselTrueConst:
- __ Csel(OutputRegister(select),
- InputRegisterAt(select, 0),
- InputOperandAt(select, 1),
- csel_cond);
- break;
- case kFcsel:
- __ Fcsel(OutputFPRegister(select),
- InputFPRegisterAt(select, 1),
- InputFPRegisterAt(select, 0),
- csel_cond);
- break;
+ if (Primitive::IsFloatingPointType(select->GetType())) {
+ __ Fcsel(OutputFPRegister(select),
+ InputFPRegisterAt(select, 1),
+ InputFPRegisterAt(select, 0),
+ csel_cond);
+ } else {
+ __ Csel(OutputRegister(select),
+ InputOperandAt(select, 1),
+ InputOperandAt(select, 0),
+ csel_cond);
}
}
@@ -3528,8 +3490,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
LocationSummary* locations = invoke->GetLocations();
Register temp = XRegisterFrom(locations->GetTemp(0));
- uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
Location receiver = locations->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
@@ -3559,6 +3519,10 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
+ __ Ldr(temp,
+ MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
+ uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ invoke->GetImtIndex(), kArm64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
@@ -5186,8 +5150,10 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kArm64PointerSize).SizeValue();
} else {
- method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- instruction->GetIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
+ __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)),
+ mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
+ method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ instruction->GetIndex(), kArm64PointerSize));
}
__ Ldr(XRegisterFrom(locations->Out()),
MemOperand(XRegisterFrom(locations->InAt(0)), method_offset));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ed0767ed52..d5bad28dab 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3701,8 +3701,6 @@ void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
@@ -3719,6 +3717,10 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke
__ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ LoadFromOffset(kLoadWord, temp, temp,
+ mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
+ uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ invoke->GetImtIndex(), kMipsPointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -5162,8 +5164,12 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kMipsPointerSize).SizeValue();
} else {
- method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- instruction->GetIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
+ __ LoadFromOffset(kLoadWord,
+ locations->Out().AsRegister<Register>(),
+ locations->InAt(0).AsRegister<Register>(),
+ mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
+ method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ instruction->GetIndex(), kMipsPointerSize));
}
__ LoadFromOffset(kLoadWord,
locations->Out().AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 8c73e350f6..539abf1de8 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2935,8 +2935,6 @@ void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
@@ -2953,6 +2951,10 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ LoadFromOffset(kLoadDoubleword, temp, temp,
+ mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
+ uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ invoke->GetImtIndex(), kMips64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8c643a05c8..a21c295274 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2012,8 +2012,6 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
LocationSummary* locations = invoke->GetLocations();
Register temp = locations->GetTemp(0).AsRegister<Register>();
XmmRegister hidden_reg = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -2040,7 +2038,12 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
+ // temp = temp->GetAddressOfIMT()
+ __ movl(temp,
+ Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value()));
// temp = temp->GetImtEntryAt(method_offset);
+ uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ invoke->GetImtIndex(), kX86PointerSize));
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp,
@@ -4060,8 +4063,12 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kX86PointerSize).SizeValue();
} else {
- method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- instruction->GetIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value();
+ __ movl(locations->InAt(0).AsRegister<Register>(),
+ Address(locations->InAt(0).AsRegister<Register>(),
+ mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value()));
+ // temp = temp->GetImtEntryAt(method_offset);
+ method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ instruction->GetIndex(), kX86PointerSize));
}
__ movl(locations->Out().AsRegister<Register>(),
Address(locations->InAt(0).AsRegister<Register>(), method_offset));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 72de3e6e35..135f0c40d0 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2228,8 +2228,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
LocationSummary* locations = invoke->GetLocations();
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -2255,6 +2253,12 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
+ // temp = temp->GetAddressOfIMT()
+ __ movq(temp,
+ Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value()));
+ // temp = temp->GetImtEntryAt(method_offset);
+ uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ invoke->GetImtIndex(), kX86_64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -3978,8 +3982,11 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kX86_64PointerSize).SizeValue();
} else {
- method_offset = mirror::Class::EmbeddedImTableEntryOffset(
- instruction->GetIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value();
+ __ movq(locations->Out().AsRegister<CpuRegister>(),
+ Address(locations->InAt(0).AsRegister<CpuRegister>(),
+ mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value()));
+ method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+ instruction->GetIndex(), kX86_64PointerSize));
}
__ movq(locations->Out().AsRegister<CpuRegister>(),
Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 59de895182..d5e80b4759 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -656,8 +656,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
}
ArtMethod* new_method = nullptr;
if (invoke_instruction->IsInvokeInterface()) {
- new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry(
- method_index % mirror::Class::kImtSize, pointer_size);
+ new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get(
+ method_index, pointer_size);
if (new_method->IsRuntimeMethod()) {
// Bail out as soon as we see a conflict trampoline in one of the target's
// interface table.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index f2286e46e6..1c67bcc878 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -16,6 +16,7 @@
#include "instruction_builder.h"
+#include "art_method-inl.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "driver/compiler_options.h"
@@ -890,7 +891,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
return_type,
dex_pc,
method_idx,
- resolved_method->GetDexMethodIndex());
+ resolved_method->GetImtIndex());
}
return HandleInvoke(invoke,
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 29f7672b0a..7d1c2ebe0b 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2031,7 +2031,7 @@ void IntrinsicLocationsBuilderARM::VisitStringGetCharsNoCheck(HInvoke* invoke) {
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
+ // Temporary registers to store lengths of strings and for calculations.
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -2059,28 +2059,55 @@ void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) {
Register dstObj = locations->InAt(3).AsRegister<Register>();
Register dstBegin = locations->InAt(4).AsRegister<Register>();
- Register src_ptr = locations->GetTemp(0).AsRegister<Register>();
- Register src_ptr_end = locations->GetTemp(1).AsRegister<Register>();
+ Register num_chr = locations->GetTemp(0).AsRegister<Register>();
+ Register src_ptr = locations->GetTemp(1).AsRegister<Register>();
Register dst_ptr = locations->GetTemp(2).AsRegister<Register>();
- Register tmp = locations->GetTemp(3).AsRegister<Register>();
// src range to copy.
__ add(src_ptr, srcObj, ShifterOperand(value_offset));
- __ add(src_ptr_end, src_ptr, ShifterOperand(srcEnd, LSL, 1));
__ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1));
// dst to be copied.
__ add(dst_ptr, dstObj, ShifterOperand(data_offset));
__ add(dst_ptr, dst_ptr, ShifterOperand(dstBegin, LSL, 1));
+ __ subs(num_chr, srcEnd, ShifterOperand(srcBegin));
+
// Do the copy.
- Label loop, done;
+ Label loop, remainder, done;
+
+ // Early out for valid zero-length retrievals.
+ __ b(&done, EQ);
+
+ // Save repairing the value of num_chr on the < 4 character path.
+ __ subs(IP, num_chr, ShifterOperand(4));
+ __ b(&remainder, LT);
+
+ // Keep the result of the earlier subs, we are going to fetch at least 4 characters.
+ __ mov(num_chr, ShifterOperand(IP));
+
+ // Main loop used for longer fetches loads and stores 4x16-bit characters at a time.
+ // (LDRD/STRD fault on unaligned addresses and it's not worth inlining extra code
+ // to rectify these everywhere this intrinsic applies.)
__ Bind(&loop);
- __ cmp(src_ptr, ShifterOperand(src_ptr_end));
+ __ ldr(IP, Address(src_ptr, char_size * 2));
+ __ subs(num_chr, num_chr, ShifterOperand(4));
+ __ str(IP, Address(dst_ptr, char_size * 2));
+ __ ldr(IP, Address(src_ptr, char_size * 4, Address::PostIndex));
+ __ str(IP, Address(dst_ptr, char_size * 4, Address::PostIndex));
+ __ b(&loop, GE);
+
+ __ adds(num_chr, num_chr, ShifterOperand(4));
__ b(&done, EQ);
- __ ldrh(tmp, Address(src_ptr, char_size, Address::PostIndex));
- __ strh(tmp, Address(dst_ptr, char_size, Address::PostIndex));
- __ b(&loop);
+
+ // Main loop for < 4 character case and remainder handling. Loads and stores one
+ // 16-bit Java character at a time.
+ __ Bind(&remainder);
+ __ ldrh(IP, Address(src_ptr, char_size, Address::PostIndex));
+ __ subs(num_chr, num_chr, ShifterOperand(1));
+ __ strh(IP, Address(dst_ptr, char_size, Address::PostIndex));
+ __ b(&remainder, GT);
+
__ Bind(&done);
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d776fb4406..c8d6ddc8f1 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1745,6 +1745,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke)
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
@@ -1770,29 +1771,57 @@ void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
Register dstBegin = XRegisterFrom(locations->InAt(4));
Register src_ptr = XRegisterFrom(locations->GetTemp(0));
- Register src_ptr_end = XRegisterFrom(locations->GetTemp(1));
+ Register num_chr = XRegisterFrom(locations->GetTemp(1));
+ Register tmp1 = XRegisterFrom(locations->GetTemp(2));
UseScratchRegisterScope temps(masm);
Register dst_ptr = temps.AcquireX();
- Register tmp = temps.AcquireW();
+ Register tmp2 = temps.AcquireX();
- // src range to copy.
+ // src address to copy from.
__ Add(src_ptr, srcObj, Operand(value_offset));
- __ Add(src_ptr_end, src_ptr, Operand(srcEnd, LSL, 1));
__ Add(src_ptr, src_ptr, Operand(srcBegin, LSL, 1));
- // dst to be copied.
+ // dst address start to copy to.
__ Add(dst_ptr, dstObj, Operand(data_offset));
__ Add(dst_ptr, dst_ptr, Operand(dstBegin, LSL, 1));
+ __ Sub(num_chr, srcEnd, srcBegin);
+
// Do the copy.
- vixl::Label loop, done;
+ vixl::Label loop;
+ vixl::Label done;
+ vixl::Label remainder;
+
+ // Early out for valid zero-length retrievals.
+ __ Cbz(num_chr, &done);
+
+ // Save repairing the value of num_chr on the < 8 character path.
+ __ Subs(tmp1, num_chr, 8);
+ __ B(lt, &remainder);
+
+ // Keep the result of the earlier subs, we are going to fetch at least 8 characters.
+ __ Mov(num_chr, tmp1);
+
+ // Main loop used for longer fetches loads and stores 8x16-bit characters at a time.
+ // (Unaligned addresses are acceptable here and not worth inlining extra code to rectify.)
__ Bind(&loop);
- __ Cmp(src_ptr, src_ptr_end);
- __ B(&done, eq);
- __ Ldrh(tmp, MemOperand(src_ptr, char_size, vixl::PostIndex));
- __ Strh(tmp, MemOperand(dst_ptr, char_size, vixl::PostIndex));
- __ B(&loop);
+ __ Ldp(tmp1, tmp2, MemOperand(src_ptr, char_size * 8, vixl::PostIndex));
+ __ Subs(num_chr, num_chr, 8);
+ __ Stp(tmp1, tmp2, MemOperand(dst_ptr, char_size * 8, vixl::PostIndex));
+ __ B(ge, &loop);
+
+ __ Adds(num_chr, num_chr, 8);
+ __ B(eq, &done);
+
+ // Main loop for < 8 character case and remainder handling. Loads and stores one
+ // 16-bit Java character at a time.
+ __ Bind(&remainder);
+ __ Ldrh(tmp1, MemOperand(src_ptr, char_size, vixl::PostIndex));
+ __ Subs(num_chr, num_chr, 1);
+ __ Strh(tmp1, MemOperand(dst_ptr, char_size, vixl::PostIndex));
+ __ B(gt, &remainder);
+
__ Bind(&done);
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index ae3c4b01e6..4b4e549e20 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2283,8 +2283,6 @@ ReferenceTypeInfo ReferenceTypeInfo::Create(TypeHandle type_handle, bool is_exac
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
DCHECK(IsValidHandle(type_handle));
- DCHECK(!type_handle->IsErroneous());
- DCHECK(!type_handle->IsArrayClass() || !type_handle->GetComponentType()->IsErroneous());
if (!is_exact) {
DCHECK(!type_handle->CannotBeAssignedFromOtherTypes())
<< "Callers of ReferenceTypeInfo::Create should ensure is_exact is properly computed";
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 2a281dd46d..3e6adcb172 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -46,10 +46,10 @@ static inline ReferenceTypeInfo::TypeHandle GetRootHandle(StackHandleScopeCollec
return *cache;
}
-// Returns true if klass is admissible to the propagation: non-null and non-erroneous.
+// Returns true if klass is admissible to the propagation: non-null and resolved.
// For an array type, we also check if the component type is admissible.
static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
- return klass != nullptr && !klass->IsErroneous() &&
+ return klass != nullptr && klass->IsResolved() &&
(!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType()));
}
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 084e9011ba..afe0576906 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -461,7 +461,7 @@ class AssemblerTest : public testing::Test {
void SetUp() OVERRIDE {
arena_.reset(new ArenaAllocator(&pool_));
- assembler_.reset(new (arena_.get()) Ass(arena_.get()));
+ assembler_.reset(CreateAssembler(arena_.get()));
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -481,6 +481,11 @@ class AssemblerTest : public testing::Test {
arena_.reset();
}
+ // Override this to set up any architecture-specific things, e.g., CPU revision.
+ virtual Ass* CreateAssembler(ArenaAllocator* arena) {
+ return new (arena) Ass(arena);
+ }
+
// Override this to set up any architecture-specific things, e.g., register vectors.
virtual void SetUpHelpers() {}
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 9368301d07..ac930833f2 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -448,6 +448,11 @@ void MipsAssembler::Lui(Register rt, uint16_t imm16) {
EmitI(0xf, static_cast<Register>(0), rt, imm16);
}
+void MipsAssembler::Aui(Register rt, Register rs, uint16_t imm16) {
+ CHECK(IsR6());
+ EmitI(0xf, rs, rt, imm16);
+}
+
void MipsAssembler::Sync(uint32_t stype) {
EmitR(0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0),
stype & 0x1f, 0xf);
@@ -1385,13 +1390,8 @@ void MipsAssembler::StoreConst32ToOffset(int32_t value,
Register base,
int32_t offset,
Register temp) {
- if (!IsInt<16>(offset)) {
- CHECK_NE(temp, AT); // Must not use AT as temp, as not to overwrite the loaded value.
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
- }
+ CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ false);
if (value == 0) {
temp = ZERO;
} else {
@@ -1404,14 +1404,8 @@ void MipsAssembler::StoreConst64ToOffset(int64_t value,
Register base,
int32_t offset,
Register temp) {
- // IsInt<16> must be passed a signed value.
- if (!IsInt<16>(offset) || !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize))) {
- CHECK_NE(temp, AT); // Must not use AT as temp, as not to overwrite the loaded value.
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
- }
+ CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ true);
uint32_t low = Low32Bits(value);
uint32_t high = High32Bits(value);
if (low == 0) {
@@ -1457,11 +1451,35 @@ void MipsAssembler::LoadDConst64(FRegister rd, int64_t value, Register temp) {
}
void MipsAssembler::Addiu32(Register rt, Register rs, int32_t value, Register temp) {
+ CHECK_NE(rs, temp); // Must not overwrite the register `rs` while loading `value`.
if (IsInt<16>(value)) {
Addiu(rt, rs, value);
+ } else if (IsR6()) {
+ int16_t high = High16Bits(value);
+ int16_t low = Low16Bits(value);
+ high += (low < 0) ? 1 : 0; // Account for sign extension in addiu.
+ if (low != 0) {
+ Aui(temp, rs, high);
+ Addiu(rt, temp, low);
+ } else {
+ Aui(rt, rs, high);
+ }
} else {
- LoadConst32(temp, value);
- Addu(rt, rs, temp);
+ // Do not load the whole 32-bit `value` if it can be represented as
+ // a sum of two 16-bit signed values. This can save an instruction.
+ constexpr int32_t kMinValueForSimpleAdjustment = std::numeric_limits<int16_t>::min() * 2;
+ constexpr int32_t kMaxValueForSimpleAdjustment = std::numeric_limits<int16_t>::max() * 2;
+ if (0 <= value && value <= kMaxValueForSimpleAdjustment) {
+ Addiu(temp, rs, kMaxValueForSimpleAdjustment / 2);
+ Addiu(rt, temp, value - kMaxValueForSimpleAdjustment / 2);
+ } else if (kMinValueForSimpleAdjustment <= value && value < 0) {
+ Addiu(temp, rs, kMinValueForSimpleAdjustment / 2);
+ Addiu(rt, temp, value - kMinValueForSimpleAdjustment / 2);
+ } else {
+ // Now that all shorter options have been exhausted, load the full 32-bit value.
+ LoadConst32(temp, value);
+ Addu(rt, rs, temp);
+ }
}
}
@@ -2262,17 +2280,103 @@ void MipsAssembler::Bc1nez(FRegister ft, MipsLabel* label) {
Bcond(label, kCondT, static_cast<Register>(ft), ZERO);
}
-void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base,
- int32_t offset) {
- // IsInt<16> must be passed a signed value.
- if (!IsInt<16>(offset) ||
- (type == kLoadDoubleword && !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
+void MipsAssembler::AdjustBaseAndOffset(Register& base,
+ int32_t& offset,
+ bool is_doubleword,
+ bool is_float) {
+ // This method is used to adjust the base register and offset pair
+ // for a load/store when the offset doesn't fit into int16_t.
+ // It is assumed that `base + offset` is sufficiently aligned for memory
+ // operands that are machine word in size or smaller. For doubleword-sized
+ // operands it's assumed that `base` is a multiple of 8, while `offset`
+ // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
+ // and spilled variables on the stack accessed relative to the stack
+ // pointer register).
+ // We preserve the "alignment" of `offset` by adjusting it by a multiple of 8.
+ CHECK_NE(base, AT); // Must not overwrite the register `base` while loading `offset`.
+
+ bool doubleword_aligned = IsAligned<kMipsDoublewordSize>(offset);
+ bool two_accesses = is_doubleword && (!is_float || !doubleword_aligned);
+
+ // IsInt<16> must be passed a signed value, hence the static cast below.
+ if (IsInt<16>(offset) &&
+ (!two_accesses || IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ // Nothing to do: `offset` (and, if needed, `offset + 4`) fits into int16_t.
+ return;
+ }
+
+ // Remember the "(mis)alignment" of `offset`, it will be checked at the end.
+ uint32_t misalignment = offset & (kMipsDoublewordSize - 1);
+
+ // Do not load the whole 32-bit `offset` if it can be represented as
+ // a sum of two 16-bit signed offsets. This can save an instruction or two.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -64KB to about +64KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7ff8; // Max int16_t that's a multiple of 8.
+ constexpr int32_t kMaxOffsetForSimpleAdjustment = 2 * kMinOffsetForSimpleAdjustment;
+ if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) {
+ Addiu(AT, base, kMinOffsetForSimpleAdjustment);
+ offset -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) {
+ Addiu(AT, base, -kMinOffsetForSimpleAdjustment);
+ offset += kMinOffsetForSimpleAdjustment;
+ } else if (IsR6()) {
+ // On R6 take advantage of the aui instruction, e.g.:
+ // aui AT, base, offset_high
+ // lw reg_lo, offset_low(AT)
+ // lw reg_hi, (offset_low+4)(AT)
+ // or when offset_low+4 overflows int16_t:
+ // aui AT, base, offset_high
+ // addiu AT, AT, 8
+ // lw reg_lo, (offset_low-8)(AT)
+ // lw reg_hi, (offset_low-4)(AT)
+ int16_t offset_high = High16Bits(offset);
+ int16_t offset_low = Low16Bits(offset);
+ offset_high += (offset_low < 0) ? 1 : 0; // Account for offset sign extension in load/store.
+ Aui(AT, base, offset_high);
+ if (two_accesses && !IsInt<16>(static_cast<int32_t>(offset_low + kMipsWordSize))) {
+ // Avoid overflow in the 16-bit offset of the load/store instruction when adding 4.
+ Addiu(AT, AT, kMipsDoublewordSize);
+ offset_low -= kMipsDoublewordSize;
+ }
+ offset = offset_low;
+ } else {
+ // Do not load the whole 32-bit `offset` if it can be represented as
+ // a sum of three 16-bit signed offsets. This can save an instruction.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -96KB to about +96KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForMediumAdjustment = 2 * kMinOffsetForSimpleAdjustment;
+ constexpr int32_t kMaxOffsetForMediumAdjustment = 3 * kMinOffsetForSimpleAdjustment;
+ if (0 <= offset && offset <= kMaxOffsetForMediumAdjustment) {
+ Addiu(AT, base, kMinOffsetForMediumAdjustment / 2);
+ Addiu(AT, AT, kMinOffsetForMediumAdjustment / 2);
+ offset -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= offset && offset < 0) {
+ Addiu(AT, base, -kMinOffsetForMediumAdjustment / 2);
+ Addiu(AT, AT, -kMinOffsetForMediumAdjustment / 2);
+ offset += kMinOffsetForMediumAdjustment;
+ } else {
+ // Now that all shorter options have been exhausted, load the full 32-bit offset.
+ int32_t loaded_offset = RoundDown(offset, kMipsDoublewordSize);
+ LoadConst32(AT, loaded_offset);
+ Addu(AT, AT, base);
+ offset -= loaded_offset;
+ }
}
+ base = AT;
+
+ CHECK(IsInt<16>(offset));
+ if (two_accesses) {
+ CHECK(IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)));
+ }
+ CHECK_EQ(misalignment, offset & (kMipsDoublewordSize - 1));
+}
+void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base,
+ int32_t offset) {
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
switch (type) {
case kLoadSignedByte:
Lb(reg, base, offset);
@@ -2306,27 +2410,12 @@ void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register
}
void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) {
- if (!IsInt<16>(offset)) {
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
- }
-
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
Lwc1(reg, base, offset);
}
void MipsAssembler::LoadDFromOffset(FRegister reg, Register base, int32_t offset) {
- // IsInt<16> must be passed a signed value.
- if (!IsInt<16>(offset) ||
- (!IsAligned<kMipsDoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
- }
-
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
if (offset & 0x7) {
if (Is32BitFPU()) {
Lwc1(reg, base, offset);
@@ -2365,15 +2454,10 @@ void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32
void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base,
int32_t offset) {
- // IsInt<16> must be passed a signed value.
- if (!IsInt<16>(offset) ||
- (type == kStoreDoubleword && !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
- }
-
+ // Must not use AT as `reg`, so as not to overwrite the value being stored
+ // with the adjusted `base`.
+ CHECK_NE(reg, AT);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
switch (type) {
case kStoreByte:
Sb(reg, base, offset);
@@ -2396,27 +2480,12 @@ void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register
}
void MipsAssembler::StoreSToOffset(FRegister reg, Register base, int32_t offset) {
- if (!IsInt<16>(offset)) {
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
- }
-
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
Swc1(reg, base, offset);
}
void MipsAssembler::StoreDToOffset(FRegister reg, Register base, int32_t offset) {
- // IsInt<16> must be passed a signed value.
- if (!IsInt<16>(offset) ||
- (!IsAligned<kMipsDoublewordSize>(offset) &&
- !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
- LoadConst32(AT, offset);
- Addu(AT, AT, base);
- base = AT;
- offset = 0;
- }
-
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
if (offset & 0x7) {
if (Is32BitFPU()) {
Swc1(reg, base, offset);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index d5e62853f4..31b3b311eb 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -183,6 +183,7 @@ class MipsAssembler FINAL : public Assembler {
void Lbu(Register rt, Register rs, uint16_t imm16);
void Lhu(Register rt, Register rs, uint16_t imm16);
void Lui(Register rt, uint16_t imm16);
+ void Aui(Register rt, Register rs, uint16_t imm16); // R6
void Sync(uint32_t stype);
void Mfhi(Register rd); // R2
void Mflo(Register rd); // R2
@@ -385,6 +386,10 @@ class MipsAssembler FINAL : public Assembler {
void Bc1nez(FRegister ft, MipsLabel* label); // R6
void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size);
+ void AdjustBaseAndOffset(Register& base,
+ int32_t& offset,
+ bool is_doubleword,
+ bool is_float = false);
void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset);
void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
void LoadDFromOffset(FRegister reg, Register base, int32_t offset);
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
new file mode 100644
index 0000000000..ce92d602d0
--- /dev/null
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -0,0 +1,644 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_mips.h"
+
+#include <map>
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+#define __ GetAssembler()->
+
+namespace art {
+
+struct MIPSCpuRegisterCompare {
+ bool operator()(const mips::Register& a, const mips::Register& b) const {
+ return a < b;
+ }
+};
+
+class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
+ mips::Register,
+ mips::FRegister,
+ uint32_t> {
+ public:
+ typedef AssemblerTest<mips::MipsAssembler, mips::Register, mips::FRegister, uint32_t> Base;
+
+ AssemblerMIPS32r6Test() :
+ instruction_set_features_(MipsInstructionSetFeatures::FromVariant("mips32r6", nullptr)) {
+ }
+
+ protected:
+ // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+ std::string GetArchitectureString() OVERRIDE {
+ return "mips";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " --no-warn -32 -march=mips32r6";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mmips:isa32r6";
+ }
+
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
+ return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get());
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.push_back(new mips::Register(mips::ZERO));
+ registers_.push_back(new mips::Register(mips::AT));
+ registers_.push_back(new mips::Register(mips::V0));
+ registers_.push_back(new mips::Register(mips::V1));
+ registers_.push_back(new mips::Register(mips::A0));
+ registers_.push_back(new mips::Register(mips::A1));
+ registers_.push_back(new mips::Register(mips::A2));
+ registers_.push_back(new mips::Register(mips::A3));
+ registers_.push_back(new mips::Register(mips::T0));
+ registers_.push_back(new mips::Register(mips::T1));
+ registers_.push_back(new mips::Register(mips::T2));
+ registers_.push_back(new mips::Register(mips::T3));
+ registers_.push_back(new mips::Register(mips::T4));
+ registers_.push_back(new mips::Register(mips::T5));
+ registers_.push_back(new mips::Register(mips::T6));
+ registers_.push_back(new mips::Register(mips::T7));
+ registers_.push_back(new mips::Register(mips::S0));
+ registers_.push_back(new mips::Register(mips::S1));
+ registers_.push_back(new mips::Register(mips::S2));
+ registers_.push_back(new mips::Register(mips::S3));
+ registers_.push_back(new mips::Register(mips::S4));
+ registers_.push_back(new mips::Register(mips::S5));
+ registers_.push_back(new mips::Register(mips::S6));
+ registers_.push_back(new mips::Register(mips::S7));
+ registers_.push_back(new mips::Register(mips::T8));
+ registers_.push_back(new mips::Register(mips::T9));
+ registers_.push_back(new mips::Register(mips::K0));
+ registers_.push_back(new mips::Register(mips::K1));
+ registers_.push_back(new mips::Register(mips::GP));
+ registers_.push_back(new mips::Register(mips::SP));
+ registers_.push_back(new mips::Register(mips::FP));
+ registers_.push_back(new mips::Register(mips::RA));
+
+ secondary_register_names_.emplace(mips::Register(mips::ZERO), "zero");
+ secondary_register_names_.emplace(mips::Register(mips::AT), "at");
+ secondary_register_names_.emplace(mips::Register(mips::V0), "v0");
+ secondary_register_names_.emplace(mips::Register(mips::V1), "v1");
+ secondary_register_names_.emplace(mips::Register(mips::A0), "a0");
+ secondary_register_names_.emplace(mips::Register(mips::A1), "a1");
+ secondary_register_names_.emplace(mips::Register(mips::A2), "a2");
+ secondary_register_names_.emplace(mips::Register(mips::A3), "a3");
+ secondary_register_names_.emplace(mips::Register(mips::T0), "t0");
+ secondary_register_names_.emplace(mips::Register(mips::T1), "t1");
+ secondary_register_names_.emplace(mips::Register(mips::T2), "t2");
+ secondary_register_names_.emplace(mips::Register(mips::T3), "t3");
+ secondary_register_names_.emplace(mips::Register(mips::T4), "t4");
+ secondary_register_names_.emplace(mips::Register(mips::T5), "t5");
+ secondary_register_names_.emplace(mips::Register(mips::T6), "t6");
+ secondary_register_names_.emplace(mips::Register(mips::T7), "t7");
+ secondary_register_names_.emplace(mips::Register(mips::S0), "s0");
+ secondary_register_names_.emplace(mips::Register(mips::S1), "s1");
+ secondary_register_names_.emplace(mips::Register(mips::S2), "s2");
+ secondary_register_names_.emplace(mips::Register(mips::S3), "s3");
+ secondary_register_names_.emplace(mips::Register(mips::S4), "s4");
+ secondary_register_names_.emplace(mips::Register(mips::S5), "s5");
+ secondary_register_names_.emplace(mips::Register(mips::S6), "s6");
+ secondary_register_names_.emplace(mips::Register(mips::S7), "s7");
+ secondary_register_names_.emplace(mips::Register(mips::T8), "t8");
+ secondary_register_names_.emplace(mips::Register(mips::T9), "t9");
+ secondary_register_names_.emplace(mips::Register(mips::K0), "k0");
+ secondary_register_names_.emplace(mips::Register(mips::K1), "k1");
+ secondary_register_names_.emplace(mips::Register(mips::GP), "gp");
+ secondary_register_names_.emplace(mips::Register(mips::SP), "sp");
+ secondary_register_names_.emplace(mips::Register(mips::FP), "fp");
+ secondary_register_names_.emplace(mips::Register(mips::RA), "ra");
+
+ fp_registers_.push_back(new mips::FRegister(mips::F0));
+ fp_registers_.push_back(new mips::FRegister(mips::F1));
+ fp_registers_.push_back(new mips::FRegister(mips::F2));
+ fp_registers_.push_back(new mips::FRegister(mips::F3));
+ fp_registers_.push_back(new mips::FRegister(mips::F4));
+ fp_registers_.push_back(new mips::FRegister(mips::F5));
+ fp_registers_.push_back(new mips::FRegister(mips::F6));
+ fp_registers_.push_back(new mips::FRegister(mips::F7));
+ fp_registers_.push_back(new mips::FRegister(mips::F8));
+ fp_registers_.push_back(new mips::FRegister(mips::F9));
+ fp_registers_.push_back(new mips::FRegister(mips::F10));
+ fp_registers_.push_back(new mips::FRegister(mips::F11));
+ fp_registers_.push_back(new mips::FRegister(mips::F12));
+ fp_registers_.push_back(new mips::FRegister(mips::F13));
+ fp_registers_.push_back(new mips::FRegister(mips::F14));
+ fp_registers_.push_back(new mips::FRegister(mips::F15));
+ fp_registers_.push_back(new mips::FRegister(mips::F16));
+ fp_registers_.push_back(new mips::FRegister(mips::F17));
+ fp_registers_.push_back(new mips::FRegister(mips::F18));
+ fp_registers_.push_back(new mips::FRegister(mips::F19));
+ fp_registers_.push_back(new mips::FRegister(mips::F20));
+ fp_registers_.push_back(new mips::FRegister(mips::F21));
+ fp_registers_.push_back(new mips::FRegister(mips::F22));
+ fp_registers_.push_back(new mips::FRegister(mips::F23));
+ fp_registers_.push_back(new mips::FRegister(mips::F24));
+ fp_registers_.push_back(new mips::FRegister(mips::F25));
+ fp_registers_.push_back(new mips::FRegister(mips::F26));
+ fp_registers_.push_back(new mips::FRegister(mips::F27));
+ fp_registers_.push_back(new mips::FRegister(mips::F28));
+ fp_registers_.push_back(new mips::FRegister(mips::F29));
+ fp_registers_.push_back(new mips::FRegister(mips::F30));
+ fp_registers_.push_back(new mips::FRegister(mips::F31));
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ STLDeleteElements(&fp_registers_);
+ }
+
+ std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
+ return secondary_register_names_[reg];
+ }
+
+ std::string RepeatInsn(size_t count, const std::string& insn) {
+ std::string result;
+ for (; count != 0u; --count) {
+ result += insn;
+ }
+ return result;
+ }
+
+ void BranchCondTwoRegsHelper(void (mips::MipsAssembler::*f)(mips::Register,
+ mips::Register,
+ mips::MipsLabel*),
+ std::string instr_name) {
+ mips::MipsLabel label;
+ (Base::GetAssembler()->*f)(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ (Base::GetAssembler()->*f)(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n" +
+ instr_name + " $a0, $a1, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ instr_name + " $a2, $a3, 1b\n"
+ "nop\n";
+ DriverStr(expected, instr_name);
+ }
+
+ private:
+ std::vector<mips::Register*> registers_;
+ std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_;
+
+ std::vector<mips::FRegister*> fp_registers_;
+ std::unique_ptr<const MipsInstructionSetFeatures> instruction_set_features_;
+};
+
+
+TEST_F(AssemblerMIPS32r6Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+TEST_F(AssemblerMIPS32r6Test, MulR6) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::MulR6, "mul ${reg1}, ${reg2}, ${reg3}"), "MulR6");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MuhR6) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::MuhR6, "muh ${reg1}, ${reg2}, ${reg3}"), "MuhR6");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MuhuR6) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::MuhuR6, "muhu ${reg1}, ${reg2}, ${reg3}"), "MuhuR6");
+}
+
+TEST_F(AssemblerMIPS32r6Test, DivR6) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::DivR6, "div ${reg1}, ${reg2}, ${reg3}"), "DivR6");
+}
+
+TEST_F(AssemblerMIPS32r6Test, ModR6) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::ModR6, "mod ${reg1}, ${reg2}, ${reg3}"), "ModR6");
+}
+
+TEST_F(AssemblerMIPS32r6Test, DivuR6) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::DivuR6, "divu ${reg1}, ${reg2}, ${reg3}"), "DivuR6");
+}
+
+TEST_F(AssemblerMIPS32r6Test, ModuR6) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::ModuR6, "modu ${reg1}, ${reg2}, ${reg3}"), "ModuR6");
+}
+
+//////////
+// MISC //
+//////////
+
+TEST_F(AssemblerMIPS32r6Test, Aui) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Aui, 16, "aui ${reg1}, ${reg2}, {imm}"), "Aui");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Bitswap) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Seleqz) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
+ "seleqz");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Selnez) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Selnez, "selnez ${reg1}, ${reg2}, ${reg3}"),
+ "selnez");
+}
+
+TEST_F(AssemblerMIPS32r6Test, ClzR6) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::ClzR6, "clz ${reg1}, ${reg2}"), "clzR6");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CloR6) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::CloR6, "clo ${reg1}, ${reg2}"), "cloR6");
+}
+
+////////////////////
+// FLOATING POINT //
+////////////////////
+
+TEST_F(AssemblerMIPS32r6Test, SelS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SelS, "sel.s ${reg1}, ${reg2}, ${reg3}"), "sel.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SelD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SelD, "sel.d ${reg1}, ${reg2}, ${reg3}"), "sel.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, ClassS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::ClassS, "class.s ${reg1}, ${reg2}"), "class.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, ClassD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::ClassD, "class.d ${reg1}, ${reg2}"), "class.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MinS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MinS, "min.s ${reg1}, ${reg2}, ${reg3}"), "min.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MinD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MinD, "min.d ${reg1}, ${reg2}, ${reg3}"), "min.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MaxS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MaxS, "max.s ${reg1}, ${reg2}, ${reg3}"), "max.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MaxD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MaxD, "max.d ${reg1}, ${reg2}, ${reg3}"), "max.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUnS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUnS, "cmp.un.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.un.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpEqS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpEqS, "cmp.eq.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.eq.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUeqS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUeqS, "cmp.ueq.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ueq.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpLtS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLtS, "cmp.lt.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.lt.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUltS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUltS, "cmp.ult.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ult.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpLeS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLeS, "cmp.le.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.le.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUleS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUleS, "cmp.ule.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ule.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpOrS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpOrS, "cmp.or.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.or.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUneS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUneS, "cmp.une.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.une.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpNeS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpNeS, "cmp.ne.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ne.s");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUnD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUnD, "cmp.un.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.un.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpEqD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpEqD, "cmp.eq.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.eq.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUeqD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUeqD, "cmp.ueq.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ueq.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpLtD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLtD, "cmp.lt.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.lt.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUltD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUltD, "cmp.ult.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ult.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpLeD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLeD, "cmp.le.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.le.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUleD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUleD, "cmp.ule.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ule.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpOrD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpOrD, "cmp.or.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.or.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpUneD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUneD, "cmp.une.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.une.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, CmpNeD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::CmpNeD, "cmp.ne.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ne.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LoadDFromOffset) {
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FF8);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFB);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFC);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFF);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0xFFF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x8008);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x8001);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0xFFF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE8);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF8);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF1);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF1);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF8);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE8);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x17FF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE9);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE9);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x17FF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x12345678);
+
+ const char* expected =
+ "ldc1 $f0, -0x8000($a0)\n"
+ "ldc1 $f0, 0($a0)\n"
+ "ldc1 $f0, 0x7FF8($a0)\n"
+ "lwc1 $f0, 0x7FFB($a0)\n"
+ "lw $t8, 0x7FFF($a0)\n"
+ "mthc1 $t8, $f0\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "lwc1 $f0, 4($at)\n"
+ "lw $t8, 8($at)\n"
+ "mthc1 $t8, $f0\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "lwc1 $f0, 7($at)\n"
+ "lw $t8, 11($at)\n"
+ "mthc1 $t8, $f0\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "ldc1 $f0, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "ldc1 $f0, -0x10($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "lwc1 $f0, -9($at)\n"
+ "lw $t8, -5($at)\n"
+ "mthc1 $t8, $f0\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "ldc1 $f0, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "ldc1 $f0, 0x7FF8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "ldc1 $f0, -0x7FE8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "ldc1 $f0, 0x8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "lwc1 $f0, 0xF($at)\n"
+ "lw $t8, 0x13($at)\n"
+ "mthc1 $t8, $f0\n"
+ "aui $at, $a0, 0x1\n"
+ "lwc1 $f0, -0xF($at)\n"
+ "lw $t8, -0xB($at)\n"
+ "mthc1 $t8, $f0\n"
+ "aui $at, $a0, 0x1\n"
+ "ldc1 $f0, -0x8($at)\n"
+ "aui $at, $a0, 0x1\n"
+ "ldc1 $f0, 0x7FE8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "ldc1 $f0, -0x7FF0($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "lwc1 $f0, -0x7FE9($at)\n"
+ "lw $t8, -0x7FE5($at)\n"
+ "mthc1 $t8, $f0\n"
+ "aui $at, $a0, 0x1\n"
+ "lwc1 $f0, 0x7FE9($at)\n"
+ "lw $t8, 0x7FED($at)\n"
+ "mthc1 $t8, $f0\n"
+ "aui $at, $a0, 0x1\n"
+ "ldc1 $f0, 0x7FF0($at)\n"
+ "aui $at, $a0, 0x1234\n"
+ "ldc1 $f0, 0x5678($at)\n";
+ DriverStr(expected, "LoadDFromOffset");
+}
+
+TEST_F(AssemblerMIPS32r6Test, StoreDToOffset) {
+ __ StoreDToOffset(mips::F0, mips::A0, -0x8000);
+ __ StoreDToOffset(mips::F0, mips::A0, +0);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FF8);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FFB);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FFC);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FFF);
+ __ StoreDToOffset(mips::F0, mips::A0, -0xFFF0);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x8008);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x8001);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x8000);
+ __ StoreDToOffset(mips::F0, mips::A0, +0xFFF0);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x17FE8);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF8);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF1);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF1);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF8);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x17FE8);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x17FF0);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x17FE9);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x17FE9);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x17FF0);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x12345678);
+
+ const char* expected =
+ "sdc1 $f0, -0x8000($a0)\n"
+ "sdc1 $f0, 0($a0)\n"
+ "sdc1 $f0, 0x7FF8($a0)\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, 0x7FFB($a0)\n"
+ "sw $t8, 0x7FFF($a0)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, 4($at)\n"
+ "sw $t8, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, 7($at)\n"
+ "sw $t8, 11($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "sdc1 $f0, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "sdc1 $f0, -0x10($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, -9($at)\n"
+ "sw $t8, -5($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "sdc1 $f0, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "sdc1 $f0, 0x7FF8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "sdc1 $f0, -0x7FE8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "sdc1 $f0, 0x8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, 0xF($at)\n"
+ "sw $t8, 0x13($at)\n"
+ "aui $at, $a0, 0x1\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, -0xF($at)\n"
+ "sw $t8, -0xB($at)\n"
+ "aui $at, $a0, 0x1\n"
+ "sdc1 $f0, -0x8($at)\n"
+ "aui $at, $a0, 0x1\n"
+ "sdc1 $f0, 0x7FE8($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "sdc1 $f0, -0x7FF0($at)\n"
+ "aui $at, $a0, 0xFFFF\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, -0x7FE9($at)\n"
+ "sw $t8, -0x7FE5($at)\n"
+ "aui $at, $a0, 0x1\n"
+ "mfhc1 $t8, $f0\n"
+ "swc1 $f0, 0x7FE9($at)\n"
+ "sw $t8, 0x7FED($at)\n"
+ "aui $at, $a0, 0x1\n"
+ "sdc1 $f0, 0x7FF0($at)\n"
+ "aui $at, $a0, 0x1234\n"
+ "sdc1 $f0, 0x5678($at)\n";
+ DriverStr(expected, "StoreDToOffset");
+}
+
+//////////////
+// BRANCHES //
+//////////////
+
+// TODO: MipsAssembler::Auipc
+// MipsAssembler::Addiupc
+// MipsAssembler::Bc
+// MipsAssembler::Jic
+// MipsAssembler::Jialc
+// MipsAssembler::Bltc
+// MipsAssembler::Bltzc
+// MipsAssembler::Bgtzc
+// MipsAssembler::Bgec
+// MipsAssembler::Bgezc
+// MipsAssembler::Blezc
+// MipsAssembler::Bltuc
+// MipsAssembler::Bgeuc
+// MipsAssembler::Beqc
+// MipsAssembler::Bnec
+// MipsAssembler::Beqzc
+// MipsAssembler::Bnezc
+// MipsAssembler::Bc1eqz
+// MipsAssembler::Bc1nez
+// MipsAssembler::Buncond
+// MipsAssembler::Bcond
+// MipsAssembler::Call
+
+// TODO: AssemblerMIPS32r6Test.B
+// AssemblerMIPS32r6Test.Beq
+// AssemblerMIPS32r6Test.Bne
+// AssemblerMIPS32r6Test.Beqz
+// AssemblerMIPS32r6Test.Bnez
+// AssemblerMIPS32r6Test.Bltz
+// AssemblerMIPS32r6Test.Bgez
+// AssemblerMIPS32r6Test.Blez
+// AssemblerMIPS32r6Test.Bgtz
+// AssemblerMIPS32r6Test.Blt
+// AssemblerMIPS32r6Test.Bge
+// AssemblerMIPS32r6Test.Bltu
+// AssemblerMIPS32r6Test.Bgeu
+
+#undef __
+
+} // namespace art
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 56e58849c9..c722d0c333 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -731,212 +731,538 @@ TEST_F(AssemblerMIPSTest, Not) {
DriverStr(RepeatRR(&mips::MipsAssembler::Not, "nor ${reg1}, ${reg2}, $zero"), "Not");
}
+TEST_F(AssemblerMIPSTest, Addiu32) {
+ __ Addiu32(mips::A1, mips::A2, -0x8000);
+ __ Addiu32(mips::A1, mips::A2, +0);
+ __ Addiu32(mips::A1, mips::A2, +0x7FFF);
+ __ Addiu32(mips::A1, mips::A2, -0x10000);
+ __ Addiu32(mips::A1, mips::A2, -0x8001);
+ __ Addiu32(mips::A1, mips::A2, +0x8000);
+ __ Addiu32(mips::A1, mips::A2, +0xFFFE);
+ __ Addiu32(mips::A1, mips::A2, -0x10001);
+ __ Addiu32(mips::A1, mips::A2, +0xFFFF);
+ __ Addiu32(mips::A1, mips::A2, +0x10000);
+ __ Addiu32(mips::A1, mips::A2, +0x10001);
+ __ Addiu32(mips::A1, mips::A2, +0x12345678);
+
+ const char* expected =
+ "addiu $a1, $a2, -0x8000\n"
+ "addiu $a1, $a2, 0\n"
+ "addiu $a1, $a2, 0x7FFF\n"
+ "addiu $at, $a2, -0x8000\n"
+ "addiu $a1, $at, -0x8000\n"
+ "addiu $at, $a2, -0x8000\n"
+ "addiu $a1, $at, -1\n"
+ "addiu $at, $a2, 0x7FFF\n"
+ "addiu $a1, $at, 1\n"
+ "addiu $at, $a2, 0x7FFF\n"
+ "addiu $a1, $at, 0x7FFF\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0xFFFF\n"
+ "addu $a1, $a2, $at\n"
+ "ori $at, $zero, 0xFFFF\n"
+ "addu $a1, $a2, $at\n"
+ "lui $at, 1\n"
+ "addu $a1, $a2, $at\n"
+ "lui $at, 1\n"
+ "ori $at, $at, 1\n"
+ "addu $a1, $a2, $at\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
+ "addu $a1, $a2, $at\n";
+ DriverStr(expected, "Addiu32");
+}
+
TEST_F(AssemblerMIPSTest, LoadFromOffset) {
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A0, 0);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 256);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 1000);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x8000);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x10000);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x12345678);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, -256);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0xFFFF8000);
- __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0xABCDEF00);
-
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A0, 0);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 256);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 1000);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x8000);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x10000);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x12345678);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, -256);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0xFFFF8000);
- __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0xABCDEF00);
-
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A0, 0);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 256);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 1000);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x8000);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x10000);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x12345678);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, -256);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0xFFFF8000);
- __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0xABCDEF00);
-
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A0, 0);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 256);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 1000);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x8000);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x10000);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x12345678);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, -256);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0xFFFF8000);
- __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0xABCDEF00);
-
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A0, 0);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 256);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 1000);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x8000);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x10000);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x12345678);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, -256);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0xFFFF8000);
- __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0xABCDEF00);
-
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A0, 0);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A1, 0);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A1, mips::A0, 0);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 256);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 1000);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x8000);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x10000);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x12345678);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -256);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0xFFFF8000);
- __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0xABCDEF00);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x8000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FF8);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FFB);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FFC);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FFF);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0xFFF0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x8008);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x8001);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x8000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0xFFF0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x17FE8);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x0FFF8);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x0FFF1);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x0FFF1);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x0FFF8);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x17FE8);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x17FF0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x17FE9);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x17FE9);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x17FF0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x12345678);
+
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FF8);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FFB);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FFC);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FFF);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0xFFF0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x8008);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x8001);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0xFFF0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x17FE8);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x0FFF8);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x0FFF1);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x0FFF1);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x0FFF8);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x17FE8);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x17FF0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x17FE9);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x17FE9);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x17FF0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x12345678);
+
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x8000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FF8);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FFB);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FFC);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FFF);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0xFFF0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x8008);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x8001);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x8000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0xFFF0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x17FE8);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x0FFF8);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x0FFF1);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x0FFF1);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x0FFF8);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x17FE8);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x17FF0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x17FE9);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x17FE9);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x17FF0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x12345678);
+
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FF8);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FFB);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FFC);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FFF);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0xFFF0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x8008);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x8001);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0xFFF0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x17FE8);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x0FFF8);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x0FFF1);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x0FFF1);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x0FFF8);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x17FE8);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x17FF0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x17FE9);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x17FE9);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x17FF0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x12345678);
+
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x8000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FF8);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FFB);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FFC);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FFF);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0xFFF0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x8008);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x8001);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x8000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0xFFF0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x17FE8);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x0FFF8);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x0FFF1);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x0FFF1);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x0FFF8);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x17FE8);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x17FF0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x17FE9);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x17FE9);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x17FF0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x12345678);
+
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x8000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FF8);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FFB);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FFC);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FFF);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0xFFF0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x8008);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x8001);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x8000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0xFFF0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x17FE8);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x0FFF8);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x0FFF1);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x0FFF1);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x0FFF8);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x17FE8);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x17FF0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x17FE9);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x17FE9);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x17FF0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x12345678);
const char* expected =
- "lb $a0, 0($a0)\n"
- "lb $a0, 0($a1)\n"
- "lb $a0, 256($a1)\n"
- "lb $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "lb $a3, -0x8000($a1)\n"
+ "lb $a3, 0($a1)\n"
+ "lb $a3, 0x7FF8($a1)\n"
+ "lb $a3, 0x7FFB($a1)\n"
+ "lb $a3, 0x7FFC($a1)\n"
+ "lb $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lb $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lb $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lb $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lb $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lb $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lb $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lb $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lb $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lb $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lb $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lb $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lb $a0, 0($at)\n"
- "lui $at, 1\n"
+ "lb $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
+ "addu $at, $at, $a1\n"
+ "lb $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a1\n"
+ "lb $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a1\n"
- "lb $a0, 0($at)\n"
+ "lb $a3, 0($at)\n"
"lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "lb $a0, 0($at)\n"
- "lb $a0, -256($a1)\n"
- "lb $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lb $a3, 0($at)\n"
+
+ "lbu $a3, -0x8000($a1)\n"
+ "lbu $a3, 0($a1)\n"
+ "lbu $a3, 0x7FF8($a1)\n"
+ "lbu $a3, 0x7FFB($a1)\n"
+ "lbu $a3, 0x7FFC($a1)\n"
+ "lbu $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lbu $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lbu $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lbu $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lbu $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lbu $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lbu $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lbu $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lbu $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lbu $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lbu $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lbu $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lb $a0, 0($at)\n"
-
- "lbu $a0, 0($a0)\n"
- "lbu $a0, 0($a1)\n"
- "lbu $a0, 256($a1)\n"
- "lbu $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "lbu $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lbu $a0, 0($at)\n"
- "lui $at, 1\n"
+ "lbu $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a1\n"
- "lbu $a0, 0($at)\n"
+ "lbu $a3, 0($at)\n"
"lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "lbu $a0, 0($at)\n"
- "lbu $a0, -256($a1)\n"
- "lbu $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lbu $a3, 0($at)\n"
+
+ "lh $a3, -0x8000($a1)\n"
+ "lh $a3, 0($a1)\n"
+ "lh $a3, 0x7FF8($a1)\n"
+ "lh $a3, 0x7FFB($a1)\n"
+ "lh $a3, 0x7FFC($a1)\n"
+ "lh $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lh $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lh $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lh $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lh $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lh $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lh $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lh $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lh $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lh $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lh $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lh $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lbu $a0, 0($at)\n"
-
- "lh $a0, 0($a0)\n"
- "lh $a0, 0($a1)\n"
- "lh $a0, 256($a1)\n"
- "lh $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "lh $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lh $a0, 0($at)\n"
- "lui $at, 1\n"
+ "lh $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
"addu $at, $at, $a1\n"
- "lh $a0, 0($at)\n"
+ "lh $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
+ "addu $at, $at, $a1\n"
+ "lh $a3, 0($at)\n"
"lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "lh $a0, 0($at)\n"
- "lh $a0, -256($a1)\n"
- "lh $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lh $a3, 0($at)\n"
+
+ "lhu $a3, -0x8000($a1)\n"
+ "lhu $a3, 0($a1)\n"
+ "lhu $a3, 0x7FF8($a1)\n"
+ "lhu $a3, 0x7FFB($a1)\n"
+ "lhu $a3, 0x7FFC($a1)\n"
+ "lhu $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lhu $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lhu $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lhu $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lhu $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lhu $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lhu $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lhu $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lhu $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lhu $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lhu $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lhu $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lh $a0, 0($at)\n"
-
- "lhu $a0, 0($a0)\n"
- "lhu $a0, 0($a1)\n"
- "lhu $a0, 256($a1)\n"
- "lhu $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "lhu $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lhu $a0, 0($at)\n"
- "lui $at, 1\n"
+ "lhu $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a1\n"
- "lhu $a0, 0($at)\n"
+ "lhu $a3, 0($at)\n"
"lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "lhu $a0, 0($at)\n"
- "lhu $a0, -256($a1)\n"
- "lhu $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lhu $a3, 0($at)\n"
+
+ "lw $a3, -0x8000($a1)\n"
+ "lw $a3, 0($a1)\n"
+ "lw $a3, 0x7FF8($a1)\n"
+ "lw $a3, 0x7FFB($a1)\n"
+ "lw $a3, 0x7FFC($a1)\n"
+ "lw $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lw $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lw $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "lw $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lw $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lw $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lw $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lw $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lw $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lw $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lw $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lw $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lhu $a0, 0($at)\n"
-
- "lw $a0, 0($a0)\n"
- "lw $a0, 0($a1)\n"
- "lw $a0, 256($a1)\n"
- "lw $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "lw $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "lw $a0, 0($at)\n"
- "lui $at, 1\n"
+ "lw $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
"addu $at, $at, $a1\n"
- "lw $a0, 0($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "lw $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a1\n"
- "lw $a0, 0($at)\n"
- "lw $a0, -256($a1)\n"
- "lw $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lw $a3, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "lw $a0, 0($at)\n"
+ "lw $a3, 0($at)\n"
- "lw $a1, 4($a0)\n"
- "lw $a0, 0($a0)\n"
- "lw $a0, 0($a1)\n"
- "lw $a1, 4($a1)\n"
- "lw $a1, 0($a0)\n"
- "lw $a2, 4($a0)\n"
+ "lw $a0, -0x8000($a2)\n"
+ "lw $a1, -0x7FFC($a2)\n"
"lw $a0, 0($a2)\n"
"lw $a1, 4($a2)\n"
- "lw $a0, 256($a2)\n"
- "lw $a1, 260($a2)\n"
- "lw $a0, 1000($a2)\n"
- "lw $a1, 1004($a2)\n"
- "ori $at, $zero, 0x8000\n"
+ "lw $a0, 0x7FF8($a2)\n"
+ "lw $a1, 0x7FFC($a2)\n"
+ "lw $a0, 0x7FFB($a2)\n"
+ "lw $a1, 0x7FFF($a2)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "lw $a0, 4($at)\n"
+ "lw $a1, 8($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "lw $a0, 7($at)\n"
+ "lw $a1, 11($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "lw $a0, -0x7FF8($at)\n"
+ "lw $a1, -0x7FF4($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "lw $a0, -0x10($at)\n"
+ "lw $a1, -0xC($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "lw $a0, -9($at)\n"
+ "lw $a1, -5($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "lw $a0, 8($at)\n"
+ "lw $a1, 12($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "lw $a0, 0x7FF8($at)\n"
+ "lw $a1, 0x7FFC($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lw $a0, -0x7FF8($at)\n"
+ "lw $a1, -0x7FF4($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lw $a0, -8($at)\n"
+ "lw $a1, -4($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lw $a0, -1($at)\n"
+ "lw $a1, 3($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lw $a0, 1($at)\n"
+ "lw $a1, 5($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lw $a0, 8($at)\n"
+ "lw $a1, 12($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lw $a0, 0x7FF8($at)\n"
+ "lw $a1, 0x7FFC($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a2\n"
"lw $a0, 0($at)\n"
"lw $a1, 4($at)\n"
- "lui $at, 1\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a2\n"
- "lw $a0, 0($at)\n"
- "lw $a1, 4($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "lw $a0, 7($at)\n"
+ "lw $a1, 11($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 1($at)\n"
+ "lw $a1, 5($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a2\n"
"lw $a0, 0($at)\n"
"lw $a1, 4($at)\n"
- "lw $a0, -256($a2)\n"
- "lw $a1, -252($a2)\n"
- "lw $a0, 0xFFFF8000($a2)\n"
- "lw $a1, 0xFFFF8004($a2)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a2\n"
"lw $a0, 0($at)\n"
"lw $a1, 4($at)\n";
@@ -944,208 +1270,513 @@ TEST_F(AssemblerMIPSTest, LoadFromOffset) {
}
TEST_F(AssemblerMIPSTest, LoadSFromOffset) {
- __ LoadSFromOffset(mips::F0, mips::A0, 0);
- __ LoadSFromOffset(mips::F0, mips::A0, 4);
- __ LoadSFromOffset(mips::F0, mips::A0, 256);
- __ LoadSFromOffset(mips::F0, mips::A0, 0x8000);
- __ LoadSFromOffset(mips::F0, mips::A0, 0x10000);
- __ LoadSFromOffset(mips::F0, mips::A0, 0x12345678);
- __ LoadSFromOffset(mips::F0, mips::A0, -256);
- __ LoadSFromOffset(mips::F0, mips::A0, 0xFFFF8000);
- __ LoadSFromOffset(mips::F0, mips::A0, 0xABCDEF00);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x8000);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x7FF8);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x7FFB);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x7FFC);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x7FFF);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0xFFF0);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x8008);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x8001);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x8000);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0xFFF0);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x17FE8);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x0FFF8);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x0FFF1);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x0FFF1);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x0FFF8);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x17FE8);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x17FF0);
+ __ LoadSFromOffset(mips::F2, mips::A0, -0x17FE9);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x17FE9);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x17FF0);
+ __ LoadSFromOffset(mips::F2, mips::A0, +0x12345678);
const char* expected =
- "lwc1 $f0, 0($a0)\n"
- "lwc1 $f0, 4($a0)\n"
- "lwc1 $f0, 256($a0)\n"
- "ori $at, $zero, 0x8000\n"
+ "lwc1 $f2, -0x8000($a0)\n"
+ "lwc1 $f2, 0($a0)\n"
+ "lwc1 $f2, 0x7FF8($a0)\n"
+ "lwc1 $f2, 0x7FFB($a0)\n"
+ "lwc1 $f2, 0x7FFC($a0)\n"
+ "lwc1 $f2, 0x7FFF($a0)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "lwc1 $f2, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "lwc1 $f2, -0x10($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "lwc1 $f2, -9($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "lwc1 $f2, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "lwc1 $f2, 0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lwc1 $f2, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lwc1 $f2, -8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lwc1 $f2, -1($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lwc1 $f2, 1($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lwc1 $f2, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lwc1 $f2, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
- "lwc1 $f0, 0($at)\n"
- "lui $at, 1\n"
+ "lwc1 $f2, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
- "lwc1 $f0, 0($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "lwc1 $f2, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f2, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a0\n"
- "lwc1 $f0, 0($at)\n"
- "lwc1 $f0, -256($a0)\n"
- "lwc1 $f0, 0xFFFF8000($a0)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lwc1 $f2, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a0\n"
- "lwc1 $f0, 0($at)\n";
+ "lwc1 $f2, 0($at)\n";
DriverStr(expected, "LoadSFromOffset");
}
-
TEST_F(AssemblerMIPSTest, LoadDFromOffset) {
- __ LoadDFromOffset(mips::F0, mips::A0, 0);
- __ LoadDFromOffset(mips::F0, mips::A0, 4);
- __ LoadDFromOffset(mips::F0, mips::A0, 256);
- __ LoadDFromOffset(mips::F0, mips::A0, 0x8000);
- __ LoadDFromOffset(mips::F0, mips::A0, 0x10000);
- __ LoadDFromOffset(mips::F0, mips::A0, 0x12345678);
- __ LoadDFromOffset(mips::F0, mips::A0, -256);
- __ LoadDFromOffset(mips::F0, mips::A0, 0xFFFF8000);
- __ LoadDFromOffset(mips::F0, mips::A0, 0xABCDEF00);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FF8);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFB);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFC);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFF);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0xFFF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x8008);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x8001);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0xFFF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE8);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF8);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF1);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF1);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF8);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE8);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x17FF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE9);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE9);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x17FF0);
+ __ LoadDFromOffset(mips::F0, mips::A0, +0x12345678);
const char* expected =
+ "ldc1 $f0, -0x8000($a0)\n"
"ldc1 $f0, 0($a0)\n"
- "lwc1 $f0, 4($a0)\n"
- "lwc1 $f1, 8($a0)\n"
- "ldc1 $f0, 256($a0)\n"
- "ori $at, $zero, 0x8000\n"
+ "ldc1 $f0, 0x7FF8($a0)\n"
+ "lwc1 $f0, 0x7FFB($a0)\n"
+ "lwc1 $f1, 0x7FFF($a0)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "lwc1 $f0, 4($at)\n"
+ "lwc1 $f1, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "lwc1 $f0, 7($at)\n"
+ "lwc1 $f1, 11($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "ldc1 $f0, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "ldc1 $f0, -0x10($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "lwc1 $f0, -9($at)\n"
+ "lwc1 $f1, -5($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "ldc1 $f0, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "ldc1 $f0, 0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "ldc1 $f0, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "ldc1 $f0, -8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "lwc1 $f0, -1($at)\n"
+ "lwc1 $f1, 3($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "lwc1 $f0, 1($at)\n"
+ "lwc1 $f1, 5($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "ldc1 $f0, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "ldc1 $f0, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
"ldc1 $f0, 0($at)\n"
- "lui $at, 1\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
- "ldc1 $f0, 0($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "lwc1 $f0, 7($at)\n"
+ "lwc1 $f1, 11($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 1($at)\n"
+ "lwc1 $f1, 5($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a0\n"
"ldc1 $f0, 0($at)\n"
- "ldc1 $f0, -256($a0)\n"
- "ldc1 $f0, 0xFFFF8000($a0)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a0\n"
"ldc1 $f0, 0($at)\n";
DriverStr(expected, "LoadDFromOffset");
}
TEST_F(AssemblerMIPSTest, StoreToOffset) {
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A0, 0);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 256);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 1000);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x8000);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x10000);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x12345678);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, -256);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0xFFFF8000);
- __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0xABCDEF00);
-
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A0, 0);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 256);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 1000);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x8000);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x10000);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x12345678);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, -256);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0xFFFF8000);
- __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0xABCDEF00);
-
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A0, 0);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 256);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 1000);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x8000);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x10000);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x12345678);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, -256);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0xFFFF8000);
- __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0xABCDEF00);
-
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 256);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 1000);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x8000);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x10000);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x12345678);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -256);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0xFFFF8000);
- __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0xABCDEF00);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x8000);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FF8);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FFB);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FFC);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FFF);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0xFFF0);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x8008);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x8001);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x8000);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0xFFF0);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x17FE8);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x0FFF8);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x0FFF1);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x0FFF1);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x0FFF8);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x17FE8);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x17FF0);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x17FE9);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x17FE9);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x17FF0);
+ __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x12345678);
+
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x8000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FF8);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FFB);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FFC);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FFF);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0xFFF0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x8008);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x8001);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x8000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0xFFF0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x17FE8);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x0FFF8);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x0FFF1);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x0FFF1);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x0FFF8);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x17FE8);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x17FF0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x17FE9);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x17FE9);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x17FF0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x12345678);
+
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x8000);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FF8);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FFB);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FFC);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FFF);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0xFFF0);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x8008);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x8001);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x8000);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0xFFF0);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x17FE8);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x0FFF8);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x0FFF1);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x0FFF1);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x0FFF8);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x17FE8);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x17FF0);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x17FE9);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x17FE9);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x17FF0);
+ __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x12345678);
+
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x8000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FF8);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FFB);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FFC);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FFF);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0xFFF0);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x8008);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x8001);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x8000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0xFFF0);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x17FE8);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x0FFF8);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x0FFF1);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x0FFF1);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x0FFF8);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x17FE8);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x17FF0);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x17FE9);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x17FE9);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x17FF0);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x12345678);
const char* expected =
- "sb $a0, 0($a0)\n"
- "sb $a0, 0($a1)\n"
- "sb $a0, 256($a1)\n"
- "sb $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "sb $a3, -0x8000($a1)\n"
+ "sb $a3, 0($a1)\n"
+ "sb $a3, 0x7FF8($a1)\n"
+ "sb $a3, 0x7FFB($a1)\n"
+ "sb $a3, 0x7FFC($a1)\n"
+ "sb $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sb $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sb $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sb $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "sb $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "sb $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sb $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sb $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sb $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sb $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sb $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sb $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "sb $a0, 0($at)\n"
- "lui $at, 1\n"
+ "sb $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
+ "addu $at, $at, $a1\n"
+ "sb $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a1\n"
+ "sb $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a1\n"
- "sb $a0, 0($at)\n"
+ "sb $a3, 0($at)\n"
"lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "sb $a0, 0($at)\n"
- "sb $a0, -256($a1)\n"
- "sb $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "sb $a3, 0($at)\n"
+
+ "sh $a3, -0x8000($a1)\n"
+ "sh $a3, 0($a1)\n"
+ "sh $a3, 0x7FF8($a1)\n"
+ "sh $a3, 0x7FFB($a1)\n"
+ "sh $a3, 0x7FFC($a1)\n"
+ "sh $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sh $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sh $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sh $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "sh $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "sh $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sh $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sh $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sh $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sh $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sh $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sh $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "sb $a0, 0($at)\n"
-
- "sh $a0, 0($a0)\n"
- "sh $a0, 0($a1)\n"
- "sh $a0, 256($a1)\n"
- "sh $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "sh $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "sh $a0, 0($at)\n"
- "lui $at, 1\n"
+ "sh $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
"addu $at, $at, $a1\n"
- "sh $a0, 0($at)\n"
+ "sh $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
+ "addu $at, $at, $a1\n"
+ "sh $a3, 0($at)\n"
"lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "sh $a0, 0($at)\n"
- "sh $a0, -256($a1)\n"
- "sh $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "sh $a3, 0($at)\n"
+
+ "sw $a3, -0x8000($a1)\n"
+ "sw $a3, 0($a1)\n"
+ "sw $a3, 0x7FF8($a1)\n"
+ "sw $a3, 0x7FFB($a1)\n"
+ "sw $a3, 0x7FFC($a1)\n"
+ "sw $a3, 0x7FFF($a1)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sw $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sw $a3, -0x10($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "sw $a3, -9($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "sw $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "sw $a3, 0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sw $a3, -0x7FF8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sw $a3, -8($at)\n"
+ "addiu $at, $a1, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sw $a3, -1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sw $a3, 1($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sw $a3, 8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sw $a3, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "sh $a0, 0($at)\n"
-
- "sw $a0, 0($a0)\n"
- "sw $a0, 0($a1)\n"
- "sw $a0, 256($a1)\n"
- "sw $a0, 1000($a1)\n"
- "ori $at, $zero, 0x8000\n"
+ "sw $a3, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a1\n"
- "sw $a0, 0($at)\n"
- "lui $at, 1\n"
+ "sw $a3, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
"addu $at, $at, $a1\n"
- "sw $a0, 0($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "sw $a3, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a1\n"
- "sw $a0, 0($at)\n"
- "sw $a0, -256($a1)\n"
- "sw $a0, 0xFFFF8000($a1)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "sw $a3, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a1\n"
- "sw $a0, 0($at)\n"
+ "sw $a3, 0($at)\n"
+ "sw $a0, -0x8000($a2)\n"
+ "sw $a1, -0x7FFC($a2)\n"
"sw $a0, 0($a2)\n"
"sw $a1, 4($a2)\n"
- "sw $a0, 256($a2)\n"
- "sw $a1, 260($a2)\n"
- "sw $a0, 1000($a2)\n"
- "sw $a1, 1004($a2)\n"
- "ori $at, $zero, 0x8000\n"
+ "sw $a0, 0x7FF8($a2)\n"
+ "sw $a1, 0x7FFC($a2)\n"
+ "sw $a0, 0x7FFB($a2)\n"
+ "sw $a1, 0x7FFF($a2)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "sw $a0, 4($at)\n"
+ "sw $a1, 8($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "sw $a0, 7($at)\n"
+ "sw $a1, 11($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "sw $a0, -0x7FF8($at)\n"
+ "sw $a1, -0x7FF4($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "sw $a0, -0x10($at)\n"
+ "sw $a1, -0xC($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "sw $a0, -9($at)\n"
+ "sw $a1, -5($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "sw $a0, 8($at)\n"
+ "sw $a1, 12($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "sw $a0, 0x7FF8($at)\n"
+ "sw $a1, 0x7FFC($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sw $a0, -0x7FF8($at)\n"
+ "sw $a1, -0x7FF4($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sw $a0, -8($at)\n"
+ "sw $a1, -4($at)\n"
+ "addiu $at, $a2, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sw $a0, -1($at)\n"
+ "sw $a1, 3($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sw $a0, 1($at)\n"
+ "sw $a1, 5($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sw $a0, 8($at)\n"
+ "sw $a1, 12($at)\n"
+ "addiu $at, $a2, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sw $a0, 0x7FF8($at)\n"
+ "sw $a1, 0x7FFC($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a2\n"
"sw $a0, 0($at)\n"
"sw $a1, 4($at)\n"
- "lui $at, 1\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a2\n"
- "sw $a0, 0($at)\n"
- "sw $a1, 4($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "sw $a0, 7($at)\n"
+ "sw $a1, 11($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 1($at)\n"
+ "sw $a1, 5($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a2\n"
"sw $a0, 0($at)\n"
"sw $a1, 4($at)\n"
- "sw $a0, -256($a2)\n"
- "sw $a1, -252($a2)\n"
- "sw $a0, 0xFFFF8000($a2)\n"
- "sw $a1, 0xFFFF8004($a2)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a2\n"
"sw $a0, 0($at)\n"
"sw $a1, 4($at)\n";
@@ -1153,69 +1784,174 @@ TEST_F(AssemblerMIPSTest, StoreToOffset) {
}
TEST_F(AssemblerMIPSTest, StoreSToOffset) {
- __ StoreSToOffset(mips::F0, mips::A0, 0);
- __ StoreSToOffset(mips::F0, mips::A0, 4);
- __ StoreSToOffset(mips::F0, mips::A0, 256);
- __ StoreSToOffset(mips::F0, mips::A0, 0x8000);
- __ StoreSToOffset(mips::F0, mips::A0, 0x10000);
- __ StoreSToOffset(mips::F0, mips::A0, 0x12345678);
- __ StoreSToOffset(mips::F0, mips::A0, -256);
- __ StoreSToOffset(mips::F0, mips::A0, 0xFFFF8000);
- __ StoreSToOffset(mips::F0, mips::A0, 0xABCDEF00);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x8000);
+ __ StoreSToOffset(mips::F2, mips::A0, +0);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x7FF8);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x7FFB);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x7FFC);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x7FFF);
+ __ StoreSToOffset(mips::F2, mips::A0, -0xFFF0);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x8008);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x8001);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x8000);
+ __ StoreSToOffset(mips::F2, mips::A0, +0xFFF0);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x17FE8);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x0FFF8);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x0FFF1);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x0FFF1);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x0FFF8);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x17FE8);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x17FF0);
+ __ StoreSToOffset(mips::F2, mips::A0, -0x17FE9);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x17FE9);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x17FF0);
+ __ StoreSToOffset(mips::F2, mips::A0, +0x12345678);
const char* expected =
- "swc1 $f0, 0($a0)\n"
- "swc1 $f0, 4($a0)\n"
- "swc1 $f0, 256($a0)\n"
- "ori $at, $zero, 0x8000\n"
+ "swc1 $f2, -0x8000($a0)\n"
+ "swc1 $f2, 0($a0)\n"
+ "swc1 $f2, 0x7FF8($a0)\n"
+ "swc1 $f2, 0x7FFB($a0)\n"
+ "swc1 $f2, 0x7FFC($a0)\n"
+ "swc1 $f2, 0x7FFF($a0)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "swc1 $f2, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "swc1 $f2, -0x10($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "swc1 $f2, -9($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "swc1 $f2, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "swc1 $f2, 0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "swc1 $f2, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "swc1 $f2, -8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "swc1 $f2, -1($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "swc1 $f2, 1($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "swc1 $f2, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "swc1 $f2, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
- "swc1 $f0, 0($at)\n"
- "lui $at, 1\n"
+ "swc1 $f2, 0($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
- "swc1 $f0, 0($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "swc1 $f2, 7($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
"addu $at, $at, $a0\n"
- "swc1 $f0, 0($at)\n"
- "swc1 $f0, -256($a0)\n"
- "swc1 $f0, 0xFFFF8000($a0)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "swc1 $f2, 1($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f2, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a0\n"
- "swc1 $f0, 0($at)\n";
+ "swc1 $f2, 0($at)\n";
DriverStr(expected, "StoreSToOffset");
}
TEST_F(AssemblerMIPSTest, StoreDToOffset) {
- __ StoreDToOffset(mips::F0, mips::A0, 0);
- __ StoreDToOffset(mips::F0, mips::A0, 4);
- __ StoreDToOffset(mips::F0, mips::A0, 256);
- __ StoreDToOffset(mips::F0, mips::A0, 0x8000);
- __ StoreDToOffset(mips::F0, mips::A0, 0x10000);
- __ StoreDToOffset(mips::F0, mips::A0, 0x12345678);
- __ StoreDToOffset(mips::F0, mips::A0, -256);
- __ StoreDToOffset(mips::F0, mips::A0, 0xFFFF8000);
- __ StoreDToOffset(mips::F0, mips::A0, 0xABCDEF00);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x8000);
+ __ StoreDToOffset(mips::F0, mips::A0, +0);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FF8);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FFB);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FFC);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x7FFF);
+ __ StoreDToOffset(mips::F0, mips::A0, -0xFFF0);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x8008);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x8001);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x8000);
+ __ StoreDToOffset(mips::F0, mips::A0, +0xFFF0);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x17FE8);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF8);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF1);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF1);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF8);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x17FE8);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x17FF0);
+ __ StoreDToOffset(mips::F0, mips::A0, -0x17FE9);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x17FE9);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x17FF0);
+ __ StoreDToOffset(mips::F0, mips::A0, +0x12345678);
const char* expected =
+ "sdc1 $f0, -0x8000($a0)\n"
"sdc1 $f0, 0($a0)\n"
- "swc1 $f0, 4($a0)\n"
- "swc1 $f1, 8($a0)\n"
- "sdc1 $f0, 256($a0)\n"
- "ori $at, $zero, 0x8000\n"
+ "sdc1 $f0, 0x7FF8($a0)\n"
+ "swc1 $f0, 0x7FFB($a0)\n"
+ "swc1 $f1, 0x7FFF($a0)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "swc1 $f0, 4($at)\n"
+ "swc1 $f1, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "swc1 $f0, 7($at)\n"
+ "swc1 $f1, 11($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "sdc1 $f0, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "sdc1 $f0, -0x10($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "swc1 $f0, -9($at)\n"
+ "swc1 $f1, -5($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "sdc1 $f0, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "sdc1 $f0, 0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sdc1 $f0, -0x7FF8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "sdc1 $f0, -8($at)\n"
+ "addiu $at, $a0, -0x7FF8\n"
+ "addiu $at, $at, -0x7FF8\n"
+ "swc1 $f0, -1($at)\n"
+ "swc1 $f1, 3($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "swc1 $f0, 1($at)\n"
+ "swc1 $f1, 5($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sdc1 $f0, 8($at)\n"
+ "addiu $at, $a0, 0x7FF8\n"
+ "addiu $at, $at, 0x7FF8\n"
+ "sdc1 $f0, 0x7FF8($at)\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
"sdc1 $f0, 0($at)\n"
- "lui $at, 1\n"
+ "lui $at, 0xFFFE\n"
+ "ori $at, $at, 0x8010\n"
"addu $at, $at, $a0\n"
- "sdc1 $f0, 0($at)\n"
- "lui $at, 0x1234\n"
- "ori $at, 0x5678\n"
+ "swc1 $f0, 7($at)\n"
+ "swc1 $f1, 11($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FE8\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 1($at)\n"
+ "swc1 $f1, 5($at)\n"
+ "lui $at, 0x1\n"
+ "ori $at, $at, 0x7FF0\n"
"addu $at, $at, $a0\n"
"sdc1 $f0, 0($at)\n"
- "sdc1 $f0, -256($a0)\n"
- "sdc1 $f0, 0xFFFF8000($a0)\n"
- "lui $at, 0xABCD\n"
- "ori $at, 0xEF00\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
"addu $at, $at, $a0\n"
"sdc1 $f0, 0($at)\n";
DriverStr(expected, "StoreDToOffset");
@@ -1492,6 +2228,51 @@ TEST_F(AssemblerMIPSTest, Bc1t) {
DriverStr(expected, "Bc1t");
}
+///////////////////////
+// Loading Constants //
+///////////////////////
+
+TEST_F(AssemblerMIPSTest, LoadConst32) {
+ // IsUint<16>(value)
+ __ LoadConst32(mips::V0, 0);
+ __ LoadConst32(mips::V0, 65535);
+ // IsInt<16>(value)
+ __ LoadConst32(mips::V0, -1);
+ __ LoadConst32(mips::V0, -32768);
+ // Everything else
+ __ LoadConst32(mips::V0, 65536);
+ __ LoadConst32(mips::V0, 65537);
+ __ LoadConst32(mips::V0, 2147483647);
+ __ LoadConst32(mips::V0, -32769);
+ __ LoadConst32(mips::V0, -65536);
+ __ LoadConst32(mips::V0, -65537);
+ __ LoadConst32(mips::V0, -2147483647);
+ __ LoadConst32(mips::V0, -2147483648);
+
+ const char* expected =
+ // IsUint<16>(value)
+ "ori $v0, $zero, 0\n" // __ LoadConst32(mips::V0, 0);
+ "ori $v0, $zero, 65535\n" // __ LoadConst32(mips::V0, 65535);
+ // IsInt<16>(value)
+ "addiu $v0, $zero, -1\n" // __ LoadConst32(mips::V0, -1);
+ "addiu $v0, $zero, -32768\n" // __ LoadConst32(mips::V0, -32768);
+ // Everything else
+ "lui $v0, 1\n" // __ LoadConst32(mips::V0, 65536);
+ "lui $v0, 1\n" // __ LoadConst32(mips::V0, 65537);
+ "ori $v0, 1\n" // "
+ "lui $v0, 32767\n" // __ LoadConst32(mips::V0, 2147483647);
+ "ori $v0, 65535\n" // "
+ "lui $v0, 65535\n" // __ LoadConst32(mips::V0, -32769);
+ "ori $v0, 32767\n" // "
+ "lui $v0, 65535\n" // __ LoadConst32(mips::V0, -65536);
+ "lui $v0, 65534\n" // __ LoadConst32(mips::V0, -65537);
+ "ori $v0, 65535\n" // "
+ "lui $v0, 32768\n" // __ LoadConst32(mips::V0, -2147483647);
+ "ori $v0, 1\n" // "
+ "lui $v0, 32768\n"; // __ LoadConst32(mips::V0, -2147483648);
+ DriverStr(expected, "LoadConst32");
+}
+
#undef __
} // namespace art
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 0a7ffda3b4..5bb61bb829 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -494,6 +494,17 @@ void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
}
+void PatchOat::PatchImTables(const ImageHeader* image_header) {
+ const size_t pointer_size = InstructionSetPointerSize(isa_);
+ // We can safely walk target image since the conflict tables are independent.
+ image_header->VisitPackedImTables(
+ [this](ArtMethod* method) {
+ return RelocatedAddressOfPointer(method);
+ },
+ image_->Begin(),
+ pointer_size);
+}
+
void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
const size_t pointer_size = InstructionSetPointerSize(isa_);
// We can safely walk target image since the conflict tables are independent.
@@ -636,6 +647,7 @@ bool PatchOat::PatchImage(bool primary_image) {
PatchArtFields(image_header);
PatchArtMethods(image_header);
+ PatchImTables(image_header);
PatchImtConflictTables(image_header);
PatchInternedStrings(image_header);
PatchClassTable(image_header);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 3ef837fde9..61ec695d83 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -117,6 +117,7 @@ class PatchOat {
bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PatchImTables(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchImtConflictTables(const ImageHeader* image_header)
SHARED_REQUIRES(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index 44c7649dea..38ca76a6a9 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -30,18 +30,17 @@
.arch armv7-a
.thumb
-// Macro to generate the value of Runtime::Current into rDest clobbering rTemp. As it uses labels
+// Macro to generate the value of Runtime::Current into rDest. As it uses labels
// then the labels need to be unique. We bind these to the function name in the ENTRY macros.
-.macro RUNTIME_CURRENT name, num, rDest, rTemp
+.macro RUNTIME_CURRENT name, num, rDest
.if .Lruntime_current\num\()_used
.error
.endif
.set .Lruntime_current\num\()_used, 1
- ldr \rDest, .Lgot_\name\()_\num @ Load offset of the GOT.
- ldr \rTemp, .Lruntime_instance_\name\()_\num @ Load GOT offset of Runtime::instance_.
+ ldr \rDest, .Lruntime_instance_\name\()_\num @ Load GOT_PREL offset of Runtime::instance_.
.Lload_got_\name\()_\num\():
- add \rDest, pc @ Fixup GOT address.
- ldr \rDest, [\rDest, \rTemp] @ Load address of Runtime::instance_.
+ add \rDest, pc @ Fixup GOT_PREL address.
+ ldr \rDest, [\rDest] @ Load address of Runtime::instance_.
ldr \rDest, [\rDest] @ Load Runtime::instance_.
.endm
@@ -90,26 +89,20 @@
DEF_ENTRY .arm, \name
.endm
-// Terminate an ENTRY and generate GOT references.
+// Terminate an ENTRY and generate GOT_PREL references.
.macro END name
// Generate offsets of GOT and Runtime::instance_ used in RUNTIME_CURRENT.
.if .Lruntime_current1_used
- .Lgot_\name\()_1:
- .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_1+4)
.Lruntime_instance_\name\()_1:
- .word _ZN3art7Runtime9instance_E(GOT)
+ .word _ZN3art7Runtime9instance_E(GOT_PREL)-(.Lload_got_\name\()_1+4)
.endif
.if .Lruntime_current2_used
- .Lgot_\name\()_2:
- .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_2+4)
.Lruntime_instance_\name\()_2:
- .word _ZN3art7Runtime9instance_E(GOT)
+ .word _ZN3art7Runtime9instance_E(GOT_PREL)-(.Lload_got_\name\()_2+4)
.endif
.if .Lruntime_current3_used
- .Lgot_\name\()_3:
- .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_3+4)
.Lruntime_instance_\name\()_3:
- .word _ZN3art7Runtime9instance_E(GOT)
+ .word _ZN3art7Runtime9instance_E(GOT_PREL)-(.Lload_got_\name\()_3+4)
.endif
// Remove the RUNTIME_CURRENTx macros so they get rebound in the next function entry.
.purgem RUNTIME_CURRENT1
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index ffac0307b7..c3a5829979 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -206,6 +206,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
struct sigaction sa, osa;
sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
sa.sa_sigaction = bad_divide_inst_handle;
+ sigemptyset(&sa.sa_mask);
sigaction(SIGILL, &sa, &osa);
bool has_div = false;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 5209bb6ab6..dd7063f248 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -44,15 +44,15 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp
SPILL_ALL_CALLEE_SAVE_GPRS @ 9 words (36 bytes) of callee saves.
vpush {s16-s31} @ 16 words (64 bytes) of floats.
.cfi_adjust_cfa_offset 64
sub sp, #12 @ 3 words of space, bottom word will hold Method*
.cfi_adjust_cfa_offset 12
- RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*.
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp is kSaveAll Method*.
+ str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
@@ -65,7 +65,7 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME rTemp1, rTemp2
+.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME rTemp
push {r5-r8, r10-r11, lr} @ 7 words of callee saves
.cfi_adjust_cfa_offset 28
.cfi_rel_offset r5, 0
@@ -77,9 +77,9 @@
.cfi_rel_offset lr, 24
sub sp, #4 @ bottom word will hold Method*
.cfi_adjust_cfa_offset 4
- RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ RUNTIME_CURRENT2 \rTemp @ Load Runtime::Current into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp is kRefsOnly Method*.
+ str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
@@ -88,30 +88,6 @@
#endif
.endm
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly)
- * and preserves the value of rTemp2 at entry.
- */
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_RTEMP2 rTemp1, rTemp2
- push {r5-r8, r10-r11, lr} @ 7 words of callee saves
- .cfi_adjust_cfa_offset 28
- .cfi_rel_offset r5, 0
- .cfi_rel_offset r6, 4
- .cfi_rel_offset r7, 8
- .cfi_rel_offset r8, 12
- .cfi_rel_offset r10, 16
- .cfi_rel_offset r11, 20
- .cfi_rel_offset lr, 24
- sub sp, #4 @ bottom word will hold Method*
- .cfi_adjust_cfa_offset 4
- str \rTemp2, [sp, #0] @ save rTemp2
- RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
- ldr \rTemp2, [sp, #0] @ restore rTemp2
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
-
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 28 + 4)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM) size not as expected."
@@ -164,12 +140,12 @@
#endif
.endm
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
- RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- @ rTemp1 is kRefsAndArgs Method*.
- ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ RUNTIME_CURRENT3 \rTemp @ Load Runtime::Current into rTemp.
+ @ rTemp is kRefsAndArgs Method*.
+ ldr \rTemp, [\rTemp, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
+ str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -217,7 +193,7 @@
.macro DELIVER_PENDING_EXCEPTION
.fnend
.fnstart
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 @ save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0 @ save callee saves for throw
mov r0, r9 @ pass Thread::Current
b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
@@ -225,7 +201,7 @@
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0 @ save all registers as basis for long jump context
mov r0, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -234,7 +210,7 @@ END \c_name
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r1, r2 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r1 @ save all registers as basis for long jump context
mov r1, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -243,7 +219,7 @@ END \c_name
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -275,7 +251,7 @@ END \c_name
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*)
@@ -287,7 +263,7 @@ END \name
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, referrer, Thread*)
@@ -299,7 +275,7 @@ END \name
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3 @ save callee saves in case of GC
ldr r3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
@@ -360,7 +336,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case allocation triggers GC
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2 @ save callee saves in case allocation triggers GC
mov r2, r9 @ pass Thread::Current
mov r3, sp
bl \cxx_name @ (method_idx, this, Thread*, SP)
@@ -566,7 +542,7 @@ ENTRY art_quick_lock_object
.Llock_strex_fail:
b .Lretry_lock @ retry
.Lslow_lock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -575,7 +551,7 @@ ENTRY art_quick_lock_object
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -632,7 +608,7 @@ ENTRY art_quick_unlock_object
b .Lretry_unlock @ retry
.Lslow_unlock:
@ save callee saves in case exception allocation triggers GC
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -642,7 +618,7 @@ END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
@ save callee saves in case exception allocation triggers GC
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -677,7 +653,7 @@ ENTRY art_quick_check_cast
.cfi_restore r0
.cfi_restore r1
.cfi_restore lr
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b artThrowClassCastException @ (Class*, Class*, Thread*)
bkpt
@@ -813,7 +789,7 @@ ENTRY art_quick_aput_obj
.Lthrow_array_store_exception:
pop {r0-r2, lr}
/* No need to repeat restore cfi directives, the ones above apply here. */
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r3, ip
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r3
mov r1, r2
mov r2, r9 @ pass Thread::Current
b artThrowArrayStoreException @ (Class*, Class*, Thread*)
@@ -824,7 +800,7 @@ END art_quick_aput_obj
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
mov r1, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -836,7 +812,7 @@ END \name
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -848,7 +824,7 @@ END \name
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3 @ save callee saves in case of GC
mov r3, r9 @ pass Thread::Current
@ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
bl \entrypoint
@@ -861,7 +837,7 @@ END \name
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_RTEMP2 r12, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12 @ save callee saves in case of GC
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint
@@ -890,7 +866,7 @@ ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_O
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*)
@@ -916,7 +892,7 @@ TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETU
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*)
@@ -941,7 +917,7 @@ TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_I
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r12 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
@@ -966,7 +942,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12, lr @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
ldr r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
@@ -1087,7 +1063,7 @@ ENTRY art_quick_alloc_object_rosalloc
bx lr
.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1172,7 +1148,7 @@ ENTRY art_quick_alloc_object_tlab
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC.
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1207,7 +1183,7 @@ ENTRY art_quick_alloc_object_region_tlab
pop {r0, r1, r3, lr}
b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_region_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC.
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1227,7 +1203,7 @@ ENTRY art_quick_test_suspend
1:
#endif
mov r0, rSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for GC stack crawl
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves for GC stack crawl
@ TODO: save FPRs to enable access in the debugger?
bl artTestSuspendFromCode @ (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
@@ -1235,7 +1211,7 @@ END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov r0, rSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for stack crawl
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves for stack crawl
bl artTestSuspendFromCode @ (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_implicit_suspend
@@ -1298,7 +1274,7 @@ END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
@@ -1403,7 +1379,7 @@ END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1, r2
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1
mov r1, r9 @ pass Thread::Current
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
@@ -1426,7 +1402,7 @@ END art_quick_to_interpreter_bridge
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
@ Make stack crawlable and clobber r2 and r3 (post saving)
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2
@ preserve r0 (not normally an arg) knowing there is a spare slot in kRefsAndArgs.
str r0, [sp, #4]
mov r2, r9 @ pass Thread::Current
@@ -1441,7 +1417,7 @@ ENTRY art_quick_instrumentation_entry
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
mov lr, #0 @ link register is to here, so clobber with 0 for later checks
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ set up frame knowing r2 and r3 must be dead on exit
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ set up frame knowing r2 and r3 must be dead on exit
mov r12, sp @ remember bottom of caller's frame
push {r0-r1} @ save return value
.cfi_adjust_cfa_offset 8
@@ -1480,7 +1456,7 @@ END art_quick_instrumentation_entry
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0
mov r0, r9 @ Set up args.
blx artDeoptimize @ artDeoptimize(Thread*)
END art_quick_deoptimize
@@ -1491,7 +1467,7 @@ END art_quick_deoptimize
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0
mov r0, r9 @ Set up args.
blx artDeoptimizeFromCompiledCode @ artDeoptimizeFromCompiledCode(Thread*)
END art_quick_deoptimize_from_compiled_code
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 98d33453e2..a102858acc 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -33,9 +33,10 @@
namespace art {
+template<ReadBarrierOption kReadBarrierOption>
inline mirror::Class* ArtField::GetDeclaringClass() {
GcRootSource gc_root_source(this);
- mirror::Class* result = declaring_class_.Read(&gc_root_source);
+ mirror::Class* result = declaring_class_.Read<kReadBarrierOption>(&gc_root_source);
DCHECK(result != nullptr);
DCHECK(result->IsLoaded() || result->IsErroneous()) << result->GetStatus();
return result;
diff --git a/runtime/art_field.h b/runtime/art_field.h
index b64b70fa8d..aaccbf3699 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -41,6 +41,7 @@ class ArtField FINAL {
public:
ArtField();
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 7647ad6e57..32ae6ffad5 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -120,6 +120,10 @@ inline uint32_t ArtMethod::GetDexMethodIndex() {
return dex_method_index_;
}
+inline uint32_t ArtMethod::GetImtIndex() {
+ return GetDexMethodIndex() % ImTable::kSize;
+}
+
inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(size_t pointer_size) {
return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size),
pointer_size);
@@ -395,8 +399,9 @@ inline mirror::DexCache* ArtMethod::GetDexCache() {
return GetDeclaringClass()->GetDexCache();
}
+template<ReadBarrierOption kReadBarrierOption>
inline bool ArtMethod::IsProxyMethod() {
- return GetDeclaringClass()->IsProxyClass();
+ return GetDeclaringClass<kReadBarrierOption>()->IsProxyClass();
}
inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) {
@@ -438,24 +443,24 @@ inline mirror::Class* ArtMethod::GetReturnType(bool resolve, size_t ptr_size) {
return type;
}
-template<typename RootVisitorType>
+template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
- ArtMethod* interface_method = nullptr;
- mirror::Class* klass = declaring_class_.Read();
- if (LIKELY(klass != nullptr)) {
+ if (LIKELY(!declaring_class_.IsNull())) {
+ visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
+ mirror::Class* klass = declaring_class_.Read<kReadBarrierOption>();
if (UNLIKELY(klass->IsProxyClass())) {
// For normal methods, dex cache shortcuts will be visited through the declaring class.
// However, for proxies we need to keep the interface method alive, so we visit its roots.
- interface_method = mirror::DexCache::GetElementPtrSize(
+ ArtMethod* interface_method = mirror::DexCache::GetElementPtrSize(
GetDexCacheResolvedMethods(pointer_size),
GetDexMethodIndex(),
pointer_size);
DCHECK(interface_method != nullptr);
DCHECK_EQ(interface_method,
- Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this));
+ Runtime::Current()->GetClassLinker()->FindMethodForProxy<kReadBarrierOption>(
+ klass, this));
interface_method->VisitRoots(visitor, pointer_size);
}
- visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
// We know we don't have profiling information if the class hasn't been verified. Note
// that this check also ensures the IsNative call can be made, as IsNative expects a fully
// created class (and not a retired one).
@@ -463,7 +468,7 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
// Runtime methods and native methods use the same field as the profiling info for
// storing their own data (jni entrypoint for native methods, and ImtConflictTable for
// some runtime methods).
- if (!IsNative() && !IsRuntimeMethod()) {
+ if (!IsNative<kReadBarrierOption>() && !IsRuntimeMethod()) {
ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
if (profiling_info != nullptr) {
profiling_info->VisitRoots(visitor);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index a012a5a9ca..849af977e1 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -99,6 +99,22 @@ class ImtConflictTable {
return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
}
+ // Return true if two conflict tables are the same.
+ bool Equals(ImtConflictTable* other, size_t pointer_size) const {
+ size_t num = NumEntries(pointer_size);
+ if (num != other->NumEntries(pointer_size)) {
+ return false;
+ }
+ for (size_t i = 0; i < num; ++i) {
+ if (GetInterfaceMethod(i, pointer_size) != other->GetInterfaceMethod(i, pointer_size) ||
+ GetImplementationMethod(i, pointer_size) !=
+ other->GetImplementationMethod(i, pointer_size)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
// Visit all of the entries.
// NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
// and also returns one. The order is <interface, implementation>.
@@ -340,6 +356,7 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
bool SkipAccessChecks() {
@@ -402,6 +419,8 @@ class ArtMethod FINAL {
ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetImtIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
dex_method_index_ = new_idx;
@@ -564,7 +583,7 @@ class ArtMethod FINAL {
SHARED_REQUIRES(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
- template<typename RootVisitorType>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 8eb3742b61..480644a5b2 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -128,7 +128,7 @@ ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 168 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 166 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index f3e260be56..f2575f702f 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -225,6 +225,34 @@ inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
return klass;
}
+template<ReadBarrierOption kReadBarrierOption>
+ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) {
+ DCHECK(proxy_class->IsProxyClass());
+ DCHECK(proxy_method->IsProxyMethod<kReadBarrierOption>());
+ {
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, dex_lock_);
+ // Locate the dex cache of the original interface/Object
+ for (const DexCacheData& data : dex_caches_) {
+ if (!self->IsJWeakCleared(data.weak_root) &&
+ proxy_method->HasSameDexCacheResolvedTypes(data.resolved_types,
+ image_pointer_size_)) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
+ self->DecodeJObject(data.weak_root));
+ if (dex_cache != nullptr) {
+ ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
+ proxy_method->GetDexMethodIndex(), image_pointer_size_);
+ CHECK(resolved_method != nullptr);
+ return resolved_method;
+ }
+ }
+ }
+ }
+ LOG(FATAL) << "Didn't find dex cache for " << PrettyClass(proxy_class) << " "
+ << PrettyMethod(proxy_method);
+ UNREACHABLE();
+}
+
} // namespace art
#endif // ART_RUNTIME_CLASS_LINKER_INL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index db0e9ac9c4..7c003151ea 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -857,11 +857,13 @@ static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_
if (vtable != nullptr) {
SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_spaces);
}
- if (klass->ShouldHaveEmbeddedImtAndVTable()) {
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- SanityCheckArtMethod(
- klass->GetEmbeddedImTableEntry(i, pointer_size), nullptr, image_spaces);
+ if (klass->ShouldHaveImt()) {
+ ImTable* imt = klass->GetImt(pointer_size);
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr, image_spaces);
}
+ }
+ if (klass->ShouldHaveEmbeddedVTable()) {
for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_spaces);
}
@@ -3456,16 +3458,11 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
new_class->SetClassFlags(mirror::kClassFlagObjectArray);
}
mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self);
- {
- ArtMethod* imt[mirror::Class::kImtSize];
- std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod());
- new_class->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_);
- }
+ new_class->PopulateEmbeddedVTable(image_pointer_size_);
mirror::Class::SetStatus(new_class, mirror::Class::kStatusInitialized, self);
// don't need to set new_class->SetObjectSize(..)
// because Object::SizeOf delegates to Array::SizeOf
-
// All arrays have java/lang/Cloneable and java/io/Serializable as
// interfaces. We need to set that up here, so that stuff like
// "instanceof" works right.
@@ -4280,33 +4277,6 @@ std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) {
return DotToDescriptor(name->ToModifiedUtf8().c_str());
}
-ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) {
- DCHECK(proxy_class->IsProxyClass());
- DCHECK(proxy_method->IsProxyMethod());
- {
- Thread* const self = Thread::Current();
- ReaderMutexLock mu(self, dex_lock_);
- // Locate the dex cache of the original interface/Object
- for (const DexCacheData& data : dex_caches_) {
- if (!self->IsJWeakCleared(data.weak_root) &&
- proxy_method->HasSameDexCacheResolvedTypes(data.resolved_types,
- image_pointer_size_)) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
- self->DecodeJObject(data.weak_root));
- if (dex_cache != nullptr) {
- ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
- proxy_method->GetDexMethodIndex(), image_pointer_size_);
- CHECK(resolved_method != nullptr);
- return resolved_method;
- }
- }
- }
- }
- LOG(FATAL) << "Didn't find dex cache for " << PrettyClass(proxy_class) << " "
- << PrettyMethod(proxy_method);
- UNREACHABLE();
-}
-
void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) {
// Create constructor for Proxy that must initialize the method.
CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 18u);
@@ -5063,9 +5033,11 @@ bool ClassLinker::LinkClass(Thread* self,
if (!LinkSuperClass(klass)) {
return false;
}
- ArtMethod* imt[mirror::Class::kImtSize];
- std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod());
- if (!LinkMethods(self, klass, interfaces, imt)) {
+ ArtMethod* imt_data[ImTable::kSize];
+ // If there are any new conflicts compared to super class.
+ bool new_conflict = false;
+ std::fill_n(imt_data, arraysize(imt_data), Runtime::Current()->GetImtUnimplementedMethod());
+ if (!LinkMethods(self, klass, interfaces, &new_conflict, imt_data)) {
return false;
}
if (!LinkInstanceFields(self, klass)) {
@@ -5078,15 +5050,45 @@ bool ClassLinker::LinkClass(Thread* self,
CreateReferenceInstanceOffsets(klass);
CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
+ ImTable* imt = nullptr;
+ if (klass->ShouldHaveImt()) {
+ // If there are any new conflicts compared to the super class we can not make a copy. There
+ // can be cases where both will have a conflict method at the same slot without having the same
+ // set of conflicts. In this case, we can not share the IMT since the conflict table slow path
+ // will possibly create a table that is incorrect for either of the classes.
+ // Same IMT with new_conflict does not happen very often.
+ if (!new_conflict && klass->HasSuperClass() && klass->GetSuperClass()->ShouldHaveImt()) {
+ ImTable* super_imt = klass->GetSuperClass()->GetImt(image_pointer_size_);
+ bool imt_equals = true;
+ for (size_t i = 0; i < ImTable::kSize && imt_equals; ++i) {
+ imt_equals = imt_equals && (super_imt->Get(i, image_pointer_size_) == imt_data[i]);
+ }
+ if (imt_equals) {
+ imt = super_imt;
+ }
+ }
+ if (imt == nullptr) {
+ LinearAlloc* allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
+ imt = reinterpret_cast<ImTable*>(
+ allocator->Alloc(self, ImTable::SizeInBytes(image_pointer_size_)));
+ if (imt == nullptr) {
+ return false;
+ }
+ imt->Populate(imt_data, image_pointer_size_);
+ }
+ }
+
if (!klass->IsTemp() || (!init_done_ && klass->GetClassSize() == class_size)) {
// We don't need to retire this class as it has no embedded tables or it was created the
// correct size during class linker initialization.
CHECK_EQ(klass->GetClassSize(), class_size) << PrettyDescriptor(klass.Get());
- if (klass->ShouldHaveEmbeddedImtAndVTable()) {
- klass->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_);
+ if (klass->ShouldHaveEmbeddedVTable()) {
+ klass->PopulateEmbeddedVTable(image_pointer_size_);
+ }
+ if (klass->ShouldHaveImt()) {
+ klass->SetImt(imt, image_pointer_size_);
}
-
// This will notify waiters on klass that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
mirror::Class::SetStatus(klass, mirror::Class::kStatusResolved, self);
@@ -5478,6 +5480,7 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
bool ClassLinker::LinkMethods(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ bool* out_new_conflict,
ArtMethod** out_imt) {
self->AllowThreadSuspension();
// A map from vtable indexes to the method they need to be updated to point to. Used because we
@@ -5489,7 +5492,7 @@ bool ClassLinker::LinkMethods(Thread* self,
// any vtable entries with new default method implementations.
return SetupInterfaceLookupTable(self, klass, interfaces)
&& LinkVirtualMethods(self, klass, /*out*/ &default_translations)
- && LinkInterfaceMethods(self, klass, default_translations, out_imt);
+ && LinkInterfaceMethods(self, klass, default_translations, out_new_conflict, out_imt);
}
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
@@ -5647,7 +5650,7 @@ bool ClassLinker::LinkVirtualMethods(
StackHandleScope<2> hs(self);
Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
MutableHandle<mirror::PointerArray> vtable;
- if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
+ if (super_class->ShouldHaveEmbeddedVTable()) {
vtable = hs.NewHandle(AllocPointerArray(self, max_count));
if (UNLIKELY(vtable.Get() == nullptr)) {
self->AssertPendingOOMException();
@@ -6047,6 +6050,7 @@ ArtMethod* ClassLinker::AddMethodToConflictTable(mirror::Class* klass,
void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
ArtMethod* current_method,
+ /*out*/bool* new_conflict,
/*out*/ArtMethod** imt_ref) {
// Place method in imt if entry is empty, place conflict otherwise.
if (*imt_ref == unimplemented_method) {
@@ -6063,40 +6067,77 @@ void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method,
*imt_ref = current_method;
} else {
*imt_ref = imt_conflict_method;
+ *new_conflict = true;
}
} else {
// Place the default conflict method. Note that there may be an existing conflict
// method in the IMT, but it could be one tailored to the super class, with a
// specific ImtConflictTable.
*imt_ref = imt_conflict_method;
+ *new_conflict = true;
}
}
void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) {
- DCHECK(klass->ShouldHaveEmbeddedImtAndVTable()) << PrettyClass(klass);
+ DCHECK(klass->ShouldHaveImt()) << PrettyClass(klass);
DCHECK(!klass->IsTemp()) << PrettyClass(klass);
- ArtMethod* imt[mirror::Class::kImtSize];
+ ArtMethod* imt_data[ImTable::kSize];
Runtime* const runtime = Runtime::Current();
ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
ArtMethod* const conflict_method = runtime->GetImtConflictMethod();
- std::fill_n(imt, arraysize(imt), unimplemented_method);
+ std::fill_n(imt_data, arraysize(imt_data), unimplemented_method);
if (klass->GetIfTable() != nullptr) {
+ bool new_conflict = false;
FillIMTFromIfTable(klass->GetIfTable(),
unimplemented_method,
conflict_method,
klass,
- true,
- false,
- &imt[0]);
+ /*create_conflict_tables*/true,
+ /*ignore_copied_methods*/false,
+ &new_conflict,
+ &imt_data[0]);
}
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_);
+ if (!klass->ShouldHaveImt()) {
+ return;
+ }
+ // Compare the IMT with the super class including the conflict methods. If they are equivalent,
+ // we can just use the same pointer.
+ ImTable* imt = nullptr;
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class != nullptr && super_class->ShouldHaveImt()) {
+ ImTable* super_imt = super_class->GetImt(image_pointer_size_);
+ bool same = true;
+ for (size_t i = 0; same && i < ImTable::kSize; ++i) {
+ ArtMethod* method = imt_data[i];
+ ArtMethod* super_method = super_imt->Get(i, image_pointer_size_);
+ if (method != super_method) {
+ bool is_conflict_table = method->IsRuntimeMethod() &&
+ method != unimplemented_method &&
+ method != conflict_method;
+ // Verify conflict contents.
+ bool super_conflict_table = super_method->IsRuntimeMethod() &&
+ super_method != unimplemented_method &&
+ super_method != conflict_method;
+ if (!is_conflict_table || !super_conflict_table) {
+ same = false;
+ } else {
+ ImtConflictTable* table1 = method->GetImtConflictTable(image_pointer_size_);
+ ImtConflictTable* table2 = super_method->GetImtConflictTable(image_pointer_size_);
+ same = same && table1->Equals(table2, image_pointer_size_);
+ }
+ }
+ }
+ if (same) {
+ imt = super_imt;
+ }
+ }
+ if (imt == nullptr) {
+ imt = klass->GetImt(image_pointer_size_);
+ DCHECK(imt != nullptr);
+ imt->Populate(imt_data, image_pointer_size_);
+ } else {
+ klass->SetImt(imt, image_pointer_size_);
}
-}
-
-static inline uint32_t GetIMTIndex(ArtMethod* interface_method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
}
ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
@@ -6118,8 +6159,9 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
mirror::Class* klass,
bool create_conflict_tables,
bool ignore_copied_methods,
- ArtMethod** imt) {
- uint32_t conflict_counts[mirror::Class::kImtSize] = {};
+ /*out*/bool* new_conflict,
+ /*out*/ArtMethod** imt) {
+ uint32_t conflict_counts[ImTable::kSize] = {};
for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
mirror::Class* interface = if_table->GetInterface(i);
const size_t num_virtuals = interface->NumVirtualMethods();
@@ -6149,7 +6191,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
// or interface methods in the IMT here they will not create extra conflicts since we compare
// names and signatures in SetIMTRef.
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = GetIMTIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
// There is only any conflicts if all of the interface methods for an IMT slot don't have
// the same implementation method, keep track of this to avoid creating a conflict table in
@@ -6161,6 +6203,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
SetIMTRef(unimplemented_method,
imt_conflict_method,
implementation_method,
+ /*out*/new_conflict,
/*out*/&imt[imt_index]);
}
}
@@ -6168,7 +6211,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
if (create_conflict_tables) {
// Create the conflict tables.
LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
size_t conflicts = conflict_counts[i];
if (imt[i] == imt_conflict_method) {
ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc);
@@ -6202,7 +6245,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
}
DCHECK(implementation_method != nullptr);
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = GetIMTIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
if (!imt[imt_index]->IsRuntimeMethod() ||
imt[imt_index] == unimplemented_method ||
imt[imt_index] == imt_conflict_method) {
@@ -6455,12 +6498,14 @@ static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size
void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
+ bool* new_conflict,
ArtMethod** imt) {
DCHECK(klass->HasSuperClass());
mirror::Class* super_class = klass->GetSuperClass();
- if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
+ if (super_class->ShouldHaveImt()) {
+ ImTable* super_imt = super_class->GetImt(image_pointer_size_);
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ imt[i] = super_imt->Get(i, image_pointer_size_);
}
} else {
// No imt in the super class, need to reconstruct from the iftable.
@@ -6473,6 +6518,7 @@ void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
klass.Get(),
/*create_conflict_table*/false,
/*ignore_copied_methods*/true,
+ /*out*/new_conflict,
/*out*/imt);
}
}
@@ -6483,6 +6529,7 @@ bool ClassLinker::LinkInterfaceMethods(
Thread* self,
Handle<mirror::Class> klass,
const std::unordered_map<size_t, ClassLinker::MethodTranslation>& default_translations,
+ bool* out_new_conflict,
ArtMethod** out_imt) {
StackHandleScope<3> hs(self);
Runtime* const runtime = Runtime::Current();
@@ -6518,6 +6565,7 @@ bool ClassLinker::LinkInterfaceMethods(
FillImtFromSuperClass(klass,
unimplemented_method,
imt_conflict_method,
+ out_new_conflict,
out_imt);
}
// Allocate method arrays before since we don't want miss visiting miranda method roots due to
@@ -6603,7 +6651,7 @@ bool ClassLinker::LinkInterfaceMethods(
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- uint32_t imt_index = GetIMTIndex(interface_method);
+ uint32_t imt_index = interface_method->GetImtIndex();
ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -6649,6 +6697,7 @@ bool ClassLinker::LinkInterfaceMethods(
SetIMTRef(unimplemented_method,
imt_conflict_method,
vtable_method,
+ /*out*/out_new_conflict,
/*out*/imt_ptr);
}
break;
@@ -6791,6 +6840,7 @@ bool ClassLinker::LinkInterfaceMethods(
SetIMTRef(unimplemented_method,
imt_conflict_method,
current_method,
+ /*out*/out_new_conflict,
/*out*/imt_ptr);
}
}
@@ -6990,7 +7040,7 @@ bool ClassLinker::LinkInterfaceMethods(
}
// Fix up IMT next
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
auto it = move_table.find(out_imt[i]);
if (it != move_table.end()) {
out_imt[i] = it->second;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index cd1ca7f15a..d6822c5225 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -483,6 +483,7 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_);
std::string GetDescriptorForProxy(mirror::Class* proxy_class)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -832,6 +833,7 @@ class ClassLinker {
bool LinkMethods(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ bool* out_new_conflict,
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -967,19 +969,20 @@ class ClassLinker {
// * kDefaultConflict - Conflicting method implementations were found when searching for
// target_method. The value of *out_default_method is null.
DefaultMethodSearchResult FindDefaultMethodImplementation(
- Thread* self,
- ArtMethod* target_method,
- Handle<mirror::Class> klass,
- /*out*/ArtMethod** out_default_method) const
+ Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method) const
SHARED_REQUIRES(Locks::mutator_lock_);
// Sets the imt entries and fixes up the vtable for the given class by linking all the interface
// methods. See LinkVirtualMethods for an explanation of what default_translations is.
bool LinkInterfaceMethods(
- Thread* self,
- Handle<mirror::Class> klass,
- const std::unordered_map<size_t, MethodTranslation>& default_translations,
- ArtMethod** out_imt)
+ Thread* self,
+ Handle<mirror::Class> klass,
+ const std::unordered_map<size_t, MethodTranslation>& default_translations,
+ bool* out_new_conflict,
+ ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
@@ -1095,6 +1098,7 @@ class ClassLinker {
void SetIMTRef(ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
ArtMethod* current_method,
+ /*out*/bool* new_conflict,
/*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_);
void FillIMTFromIfTable(mirror::IfTable* if_table,
@@ -1103,11 +1107,13 @@ class ClassLinker {
mirror::Class* klass,
bool create_conflict_tables,
bool ignore_copied_methods,
- ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ /*out*/bool* new_conflict,
+ /*out*/ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
void FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
+ bool* new_conflict,
ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 488826b6c4..9b59f2bba6 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -148,7 +148,8 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(0U, array->NumInstanceFields());
EXPECT_EQ(0U, array->NumStaticFields());
EXPECT_EQ(2U, array->NumDirectInterfaces());
- EXPECT_TRUE(array->ShouldHaveEmbeddedImtAndVTable());
+ EXPECT_FALSE(array->ShouldHaveImt());
+ EXPECT_TRUE(array->ShouldHaveEmbeddedVTable());
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != nullptr);
mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index f58af5a8da..5bdb36cafc 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -418,26 +418,6 @@ void CommonRuntimeTestImpl::TearDown() {
(*icu_cleanup_fn)();
Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test
-
- // Manually closing the JNI libraries.
- // Runtime does not support repeatedly doing JNI->CreateVM, thus we need to manually clean up the
- // dynamic linking loader so that gtests would not fail.
- // Bug: 25785594
- if (runtime_->IsStarted()) {
- {
- // We retrieve the handle by calling dlopen on the library. To close it, we need to call
- // dlclose twice, the first time to undo our dlopen and the second time to actually unload it.
- // See man dlopen.
- void* handle = dlopen("libjavacore.so", RTLD_LAZY);
- dlclose(handle);
- CHECK_EQ(0, dlclose(handle));
- }
- {
- void* handle = dlopen("libopenjdkd.so", RTLD_LAZY);
- dlclose(handle);
- CHECK_EQ(0, dlclose(handle));
- }
- }
}
static std::string GetDexFileName(const std::string& jar_prefix, bool host) {
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index fc6257302a..db3f88ff6e 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -19,7 +19,7 @@
#include "entrypoint_utils.h"
-#include "art_method.h"
+#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file.h"
@@ -559,9 +559,10 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_
}
}
case kInterface: {
- uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry(
- imt_index, class_linker->GetImagePointerSize());
+ uint32_t imt_index = resolved_method->GetImtIndex();
+ size_t pointer_size = class_linker->GetImagePointerSize();
+ ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)->
+ Get(imt_index, pointer_size);
if (!imt_method->IsRuntimeMethod()) {
if (kIsDebugBuild) {
mirror::Class* klass = (*this_object)->GetClass();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 03771aa80e..0a70be1c95 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2169,13 +2169,12 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
dex_method_idx, sizeof(void*));
DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method);
ArtMethod* method = nullptr;
+ ImTable* imt = cls->GetImt(sizeof(void*));
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
// If the dex cache already resolved the interface method, look whether we have
// a match in the ImtConflictTable.
- uint32_t imt_index = interface_method->GetDexMethodIndex();
- ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry(
- imt_index % mirror::Class::kImtSize, sizeof(void*));
+ ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), sizeof(void*));
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
DCHECK(current_table != nullptr);
@@ -2226,9 +2225,8 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
// We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
// We create a new table with the new pair { interface_method, method }.
- uint32_t imt_index = interface_method->GetDexMethodIndex();
- ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry(
- imt_index % mirror::Class::kImtSize, sizeof(void*));
+ uint32_t imt_index = interface_method->GetImtIndex();
+ ArtMethod* conflict_method = imt->Get(imt_index, sizeof(void*));
if (conflict_method->IsRuntimeMethod()) {
ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
cls.Get(),
@@ -2239,9 +2237,9 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
if (new_conflict_method != conflict_method) {
// Update the IMT if we create a new conflict method. No fence needed here, as the
// data is consistent.
- cls->SetEmbeddedImTableEntry(imt_index % mirror::Class::kImtSize,
- new_conflict_method,
- sizeof(void*));
+ imt->Set(imt_index,
+ new_conflict_method,
+ sizeof(void*));
}
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index c621672ae7..91deea0161 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -112,10 +112,12 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, pthread_self, last_no_thread_suspension_cause,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_functions,
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_function,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_functions, jni_entrypoints,
- sizeof(void*) * 6);
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_function, active_suspend_barriers,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, active_suspend_barriers, jni_entrypoints,
+ sizeof(Thread::tls_ptr_sized_values::active_suspend_barriers));
// Skip across the entrypoints structures.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 3f8f6284c0..dd750060b8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -192,7 +192,7 @@ void ConcurrentCopying::InitializePhase() {
}
// Used to switch the thread roots of a thread from from-space refs to to-space refs.
-class ThreadFlipVisitor : public Closure {
+class ConcurrentCopying::ThreadFlipVisitor : public Closure {
public:
ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
@@ -229,7 +229,7 @@ class ThreadFlipVisitor : public Closure {
};
// Called back from Runtime::FlipThreadRoots() during a pause.
-class FlipCallback : public Closure {
+class ConcurrentCopying::FlipCallback : public Closure {
public:
explicit FlipCallback(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {
@@ -304,10 +304,9 @@ void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
}
// Used to visit objects in the immune spaces.
-class ConcurrentCopyingImmuneSpaceObjVisitor {
+class ConcurrentCopying::ImmuneSpaceObjVisitor {
public:
- explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
- : collector_(cc) {}
+ explicit ImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {}
void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
@@ -388,7 +387,7 @@ void ConcurrentCopying::MarkingPhase() {
for (auto& space : immune_spaces_.GetSpaces()) {
DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
+ ImmuneSpaceObjVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->Limit()),
visitor);
@@ -487,7 +486,7 @@ void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
Runtime::Current()->BroadcastForNewSystemWeaks();
}
-class DisableMarkingCheckpoint : public Closure {
+class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
public:
explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {
@@ -683,9 +682,9 @@ accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
// The following visitors are used to verify that there's no references to the from-space left after
// marking.
-class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
+class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
public:
- explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
+ explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* ref) const
@@ -712,16 +711,16 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
+class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
public:
- explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
+ explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
+ VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
@@ -739,7 +738,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
+ VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -747,9 +746,9 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
+class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
public:
- explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
+ explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -761,7 +760,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
- ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
+ VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
if (kUseBakerReadBarrier) {
CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
@@ -785,16 +784,15 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
CHECK(!thread->GetIsGcMarking());
}
}
- ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
+ VerifyNoFromSpaceRefsObjectVisitor visitor(this);
// Roots.
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
+ VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Runtime::Current()->VisitRoots(&ref_visitor);
}
// The to-space.
- region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
- this);
+ region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
// Non-moving spaces.
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -802,7 +800,7 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
}
// The alloc stack.
{
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
+ VerifyNoFromSpaceRefsVisitor ref_visitor(this);
for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
it < end; ++it) {
mirror::Object* const obj = it->AsMirrorPtr();
@@ -817,9 +815,9 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
}
// The following visitors are used to assert the to-space invariant.
-class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
+class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
public:
- explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
+ explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* ref) const
@@ -835,16 +833,16 @@ class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
+class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
public:
- explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
+ explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
- ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
+ AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
@@ -861,7 +859,7 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
+ AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -869,9 +867,9 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
+class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
public:
- explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
+ explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -884,7 +882,7 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
space::RegionSpace* region_space = collector->RegionSpace();
CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
- ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
+ AssertToSpaceInvariantFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
}
@@ -892,7 +890,7 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
ConcurrentCopying* const collector_;
};
-class RevokeThreadLocalMarkStackCheckpoint : public Closure {
+class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
bool disable_weak_ref_access)
@@ -1112,7 +1110,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
region_space_->AddLiveBytes(to_ref, alloc_size);
}
if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
- ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
+ AssertToSpaceInvariantObjectVisitor visitor(this);
visitor(to_ref);
}
}
@@ -1484,9 +1482,9 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o
}
// Used to scan ref fields of an object.
-class ConcurrentCopyingRefFieldsVisitor {
+class ConcurrentCopying::RefFieldsVisitor {
public:
- explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
+ explicit RefFieldsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
@@ -1522,7 +1520,7 @@ class ConcurrentCopyingRefFieldsVisitor {
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
- ConcurrentCopyingRefFieldsVisitor visitor(this);
+ RefFieldsVisitor visitor(this);
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index e9ff618ff3..a986a7a1db 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -51,11 +51,10 @@ namespace collector {
class ConcurrentCopying : public GarbageCollector {
public:
- // TODO: disable thse flags for production use.
// Enable the no-from-space-refs verification at the pause.
- static constexpr bool kEnableNoFromSpaceRefsVerification = true;
+ static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
// Enable the from-space bytes/objects check.
- static constexpr bool kEnableFromSpaceAccountingCheck = true;
+ static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
// Enable verbose mode.
static constexpr bool kVerboseMode = false;
@@ -244,16 +243,21 @@ class ConcurrentCopying : public GarbageCollector {
accounting::ReadBarrierTable* rb_table_;
bool force_evacuate_all_; // True if all regions are evacuated.
- friend class ConcurrentCopyingRefFieldsVisitor;
- friend class ConcurrentCopyingImmuneSpaceObjVisitor;
- friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor;
- friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor;
- friend class ConcurrentCopyingClearBlackPtrsVisitor;
- friend class ConcurrentCopyingLostCopyVisitor;
- friend class ThreadFlipVisitor;
- friend class FlipCallback;
- friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor;
- friend class RevokeThreadLocalMarkStackCheckpoint;
+ class AssertToSpaceInvariantFieldVisitor;
+ class AssertToSpaceInvariantObjectVisitor;
+ class AssertToSpaceInvariantRefsVisitor;
+ class ClearBlackPtrsVisitor;
+ class ComputeUnevacFromSpaceLiveRatioVisitor;
+ class DisableMarkingCheckpoint;
+ class FlipCallback;
+ class ImmuneSpaceObjVisitor;
+ class LostCopyVisitor;
+ class RefFieldsVisitor;
+ class RevokeThreadLocalMarkStackCheckpoint;
+ class VerifyNoFromSpaceRefsFieldVisitor;
+ class VerifyNoFromSpaceRefsObjectVisitor;
+ class VerifyNoFromSpaceRefsVisitor;
+ class ThreadFlipVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 6beb60608c..43482eb7cc 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -52,8 +52,9 @@ void MarkCompact::BindBitmaps() {
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
- space_(nullptr), collector_name_(name_), updating_references_(false) {
-}
+ space_(nullptr),
+ collector_name_(name_),
+ updating_references_(false) {}
void MarkCompact::RunPhases() {
Thread* self = Thread::Current();
@@ -85,30 +86,20 @@ void MarkCompact::ForwardObject(mirror::Object* obj) {
++live_objects_in_space_;
}
-class CalculateObjectForwardingAddressVisitor {
- public:
- explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector)
- : collector_(collector) {}
- void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_,
- Locks::heap_bitmap_lock_) {
- DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
- DCHECK(collector_->IsMarked(obj) != nullptr);
- collector_->ForwardObject(obj);
- }
-
- private:
- MarkCompact* const collector_;
-};
void MarkCompact::CalculateObjectForwardingAddresses() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// The bump pointer in the space where the next forwarding address will be.
bump_pointer_ = reinterpret_cast<uint8_t*>(space_->Begin());
// Visit all the marked objects in the bitmap.
- CalculateObjectForwardingAddressVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
- visitor);
+ [this](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
+ DCHECK(IsMarked(obj) != nullptr);
+ ForwardObject(obj);
+ });
}
void MarkCompact::InitializePhase() {
@@ -129,17 +120,6 @@ void MarkCompact::ProcessReferences(Thread* self) {
false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
-class BitmapSetSlowPathVisitor {
- public:
- void operator()(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
- // Marking a large object, make sure its aligned as a sanity check.
- if (!IsAligned<kPageSize>(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
- LOG(FATAL) << obj;
- }
- }
-};
-
inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
if (obj == nullptr) {
return nullptr;
@@ -155,8 +135,15 @@ inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
}
} else {
DCHECK(!space_->HasAddress(obj));
- BitmapSetSlowPathVisitor visitor;
- if (!mark_bitmap_->Set(obj, visitor)) {
+ auto slow_path = [this](const mirror::Object* ref)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Marking a large object, make sure its aligned as a sanity check.
+ if (!IsAligned<kPageSize>(ref)) {
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ LOG(FATAL) << ref;
+ }
+ };
+ if (!mark_bitmap_->Set(obj, slow_path)) {
// This object was not previously marked.
MarkStackPush(obj);
}
@@ -296,10 +283,9 @@ void MarkCompact::VisitRoots(
}
}
-class UpdateRootVisitor : public RootVisitor {
+class MarkCompact::UpdateRootVisitor : public RootVisitor {
public:
- explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) {}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES(Locks::mutator_lock_)
@@ -332,10 +318,10 @@ class UpdateRootVisitor : public RootVisitor {
MarkCompact* const collector_;
};
-class UpdateObjectReferencesVisitor {
+class MarkCompact::UpdateObjectReferencesVisitor {
public:
- explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {}
+
void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
collector_->UpdateObjectReferences(obj);
@@ -423,10 +409,9 @@ inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Objec
}
}
-class UpdateReferenceVisitor {
+class MarkCompact::UpdateReferenceVisitor {
public:
- explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -501,19 +486,6 @@ bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
return space != space_ && !immune_spaces_.ContainsSpace(space);
}
-class MoveObjectVisitor {
- public:
- explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) {
- }
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
- REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
- collector_->MoveObject(obj, obj->SizeOf());
- }
-
- private:
- MarkCompact* const collector_;
-};
-
void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
// Look at the forwarding address stored in the lock word to know where to copy.
DCHECK(space_->HasAddress(obj)) << obj;
@@ -534,10 +506,13 @@ void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
void MarkCompact::MoveObjects() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Move the objects in the before forwarding bitmap.
- MoveObjectVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
- visitor);
+ [this](mirror::Object* obj)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ MoveObject(obj, obj->SizeOf());
+ });
CHECK(lock_words_to_restore_.empty());
}
@@ -572,10 +547,9 @@ void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
-class MarkCompactMarkObjectVisitor {
+class MarkCompact::MarkObjectVisitor {
public:
- explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit MarkObjectVisitor(MarkCompact* collector) : collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -608,7 +582,7 @@ class MarkCompactMarkObjectVisitor {
// Visit all of the references of an object and update.
void MarkCompact::ScanObject(mirror::Object* obj) {
- MarkCompactMarkObjectVisitor visitor(this);
+ MarkObjectVisitor visitor(this);
obj->VisitReferences(visitor, visitor);
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 48311570b5..16abfb73b8 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -222,13 +222,10 @@ class MarkCompact : public GarbageCollector {
bool updating_references_;
private:
- friend class BitmapSetSlowPathVisitor;
- friend class CalculateObjectForwardingAddressVisitor;
- friend class MarkCompactMarkObjectVisitor;
- friend class MoveObjectVisitor;
- friend class UpdateObjectReferencesVisitor;
- friend class UpdateReferenceVisitor;
- friend class UpdateRootVisitor;
+ class MarkObjectVisitor;
+ class UpdateObjectReferencesVisitor;
+ class UpdateReferenceVisitor;
+ class UpdateRootVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 894ceba216..9f54f1cdd4 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -266,7 +266,7 @@ void MarkSweep::MarkingPhase() {
PreCleanCards();
}
-class ScanObjectVisitor {
+class MarkSweep::ScanObjectVisitor {
public:
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
@@ -393,12 +393,14 @@ bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref
return IsMarked(ref->AsMirrorPtr());
}
-class MarkSweepMarkObjectSlowPath {
+class MarkSweep::MarkObjectSlowPath {
public:
- explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep,
- mirror::Object* holder = nullptr,
- MemberOffset offset = MemberOffset(0))
- : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {}
+ explicit MarkObjectSlowPath(MarkSweep* mark_sweep,
+ mirror::Object* holder = nullptr,
+ MemberOffset offset = MemberOffset(0))
+ : mark_sweep_(mark_sweep),
+ holder_(holder),
+ offset_(offset) {}
void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
@@ -444,27 +446,8 @@ class MarkSweepMarkObjectSlowPath {
}
PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
- {
- LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
- Thread* self = Thread::Current();
- if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
- mark_sweep_->VerifyRoots();
- } else {
- const bool heap_bitmap_exclusive_locked =
- Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
- if (heap_bitmap_exclusive_locked) {
- Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
- }
- {
- ScopedThreadSuspension(self, kSuspended);
- ScopedSuspendAll ssa(__FUNCTION__);
- mark_sweep_->VerifyRoots();
- }
- if (heap_bitmap_exclusive_locked) {
- Locks::heap_bitmap_lock_->ExclusiveLock(self);
- }
- }
- }
+ LOG(INTERNAL_FATAL) << "Attempting see if it's a bad thread root\n";
+ mark_sweep_->VerifySuspendedThreadRoots();
LOG(FATAL) << "Can't mark invalid object";
}
}
@@ -499,7 +482,7 @@ inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
if (kCountMarkedObjects) {
++mark_slowpath_count_;
}
- MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
+ MarkObjectSlowPath visitor(this, holder, offset);
// TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
// will check again.
if (!mark_bitmap_->Set(obj, visitor)) {
@@ -534,7 +517,7 @@ inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
if (LIKELY(object_bitmap->HasAddress(obj))) {
return !object_bitmap->AtomicTestAndSet(obj);
}
- MarkSweepMarkObjectSlowPath visitor(this);
+ MarkObjectSlowPath visitor(this);
return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
}
@@ -553,7 +536,7 @@ inline void MarkSweep::MarkObject(mirror::Object* obj,
}
}
-class VerifyRootMarkedVisitor : public SingleRootVisitor {
+class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
public:
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
@@ -582,7 +565,7 @@ void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
}
}
-class VerifyRootVisitor : public SingleRootVisitor {
+class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -591,15 +574,15 @@ class VerifyRootVisitor : public SingleRootVisitor {
if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
if (large_object_space != nullptr && !large_object_space->Contains(root)) {
- LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
+ LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info << "\n";
}
}
}
};
-void MarkSweep::VerifyRoots() {
+void MarkSweep::VerifySuspendedThreadRoots() {
VerifyRootVisitor visitor;
- Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
+ Runtime::Current()->GetThreadList()->VisitRootsForSuspendedThreads(&visitor);
}
void MarkSweep::MarkRoots(Thread* self) {
@@ -629,7 +612,7 @@ void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
}
-class DelayReferenceReferentVisitor {
+class MarkSweep::DelayReferenceReferentVisitor {
public:
explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
@@ -644,7 +627,7 @@ class DelayReferenceReferentVisitor {
};
template <bool kUseFinger = false>
-class MarkStackTask : public Task {
+class MarkSweep::MarkStackTask : public Task {
public:
MarkStackTask(ThreadPool* thread_pool,
MarkSweep* mark_sweep,
@@ -802,7 +785,7 @@ class MarkStackTask : public Task {
}
};
-class CardScanTask : public MarkStackTask<false> {
+class MarkSweep::CardScanTask : public MarkStackTask<false> {
public:
CardScanTask(ThreadPool* thread_pool,
MarkSweep* mark_sweep,
@@ -967,7 +950,7 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
}
}
-class RecursiveMarkTask : public MarkStackTask<false> {
+class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool,
MarkSweep* mark_sweep,
@@ -1080,7 +1063,7 @@ void MarkSweep::SweepSystemWeaks(Thread* self) {
Runtime::Current()->SweepSystemWeaks(this);
}
-class VerifySystemWeakVisitor : public IsMarkedVisitor {
+class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
public:
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
@@ -1109,7 +1092,7 @@ void MarkSweep::VerifySystemWeaks() {
Runtime::Current()->SweepSystemWeaks(&visitor);
}
-class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
+class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor {
public:
CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index c19107a626..9747031152 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -250,8 +250,8 @@ class MarkSweep : public GarbageCollector {
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
- void VerifyRoots()
- NO_THREAD_SAFETY_ANALYSIS;
+ void VerifySuspendedThreadRoots()
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
void ExpandMarkStack()
@@ -353,17 +353,17 @@ class MarkSweep : public GarbageCollector {
std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
private:
- friend class CardScanTask;
- friend class CheckBitmapVisitor;
- friend class CheckReferenceVisitor;
- friend class CheckpointMarkThreadRoots;
- friend class Heap;
- friend class FifoMarkStackChunk;
- friend class MarkObjectVisitor;
- template<bool kUseFinger> friend class MarkStackTask;
- friend class MarkSweepMarkObjectSlowPath;
- friend class VerifyRootMarkedVisitor;
- friend class VerifyRootVisitor;
+ class CardScanTask;
+ class CheckpointMarkThreadRoots;
+ class DelayReferenceReferentVisitor;
+ template<bool kUseFinger> class MarkStackTask;
+ class MarkObjectSlowPath;
+ class RecursiveMarkTask;
+ class ScanObjectParallelVisitor;
+ class ScanObjectVisitor;
+ class VerifyRootMarkedVisitor;
+ class VerifyRootVisitor;
+ class VerifySystemWeakVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(MarkSweep);
};
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index e87b5ff332..78fb2d24ae 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -26,21 +26,6 @@ namespace art {
namespace gc {
namespace collector {
-class BitmapSetSlowPathVisitor {
- public:
- explicit BitmapSetSlowPathVisitor(SemiSpace* semi_space) : semi_space_(semi_space) {
- }
-
- void operator()(const mirror::Object* obj) const {
- CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
- // Marking a large object, make sure its aligned as a sanity check.
- CHECK_ALIGNED(obj, kPageSize);
- }
-
- private:
- SemiSpace* const semi_space_;
-};
-
inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
DCHECK(from_space_->HasAddress(obj));
LockWord lock_word = obj->GetLockWord(false);
@@ -76,8 +61,12 @@ inline void SemiSpace::MarkObject(
obj_ptr->Assign(forward_address);
} else if (!collect_from_space_only_ && !immune_spaces_.IsInImmuneRegion(obj)) {
DCHECK(!to_space_->HasAddress(obj)) << "Tried to mark " << obj << " in to-space";
- BitmapSetSlowPathVisitor visitor(this);
- if (!mark_bitmap_->Set(obj, visitor)) {
+ auto slow_path = [this](const mirror::Object* ref) {
+ CHECK(!to_space_->HasAddress(ref)) << "Marking " << ref << " in to_space_";
+ // Marking a large object, make sure its aligned as a sanity check.
+ CHECK_ALIGNED(ref, kPageSize);
+ };
+ if (!mark_bitmap_->Set(obj, slow_path)) {
// This object was not previously marked.
MarkStackPush(obj);
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f37daa54e9..7a4c025c30 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -282,22 +282,11 @@ void SemiSpace::MarkingPhase() {
}
}
-class SemiSpaceScanObjectVisitor {
- public:
- explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
- void operator()(Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- DCHECK(obj != nullptr);
- semi_space_->ScanObject(obj);
- }
- private:
- SemiSpace* const semi_space_;
-};
-
// Used to verify that there's no references to the from-space.
-class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
+class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
public:
- explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) :
- from_space_(from_space) {}
+ explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
+ : from_space_(from_space) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
@@ -331,23 +320,10 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
- SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_);
+ VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
obj->VisitReferences(visitor, VoidFunctor());
}
-class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
- public:
- explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
- void operator()(Object* obj) const
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- DCHECK(obj != nullptr);
- semi_space_->VerifyNoFromSpaceReferences(obj);
- }
-
- private:
- SemiSpace* const semi_space_;
-};
-
void SemiSpace::MarkReachableObjects() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
{
@@ -390,10 +366,12 @@ void SemiSpace::MarkReachableObjects() {
} else {
TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SemiSpaceScanObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
- visitor);
+ [this](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ ScanObject(obj);
+ });
}
if (kIsDebugBuild) {
// Verify that there are no from-space references that
@@ -401,10 +379,13 @@ void SemiSpace::MarkReachableObjects() {
// card table) didn't miss any from-space references in the
// space.
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
- visitor);
+ [this](Object* obj)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ VerifyNoFromSpaceReferences(obj);
+ });
}
}
}
@@ -424,10 +405,12 @@ void SemiSpace::MarkReachableObjects() {
// classes (primitive array classes) that could move though they
// don't contain any other references.
accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
- SemiSpaceScanObjectVisitor visitor(this);
large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
reinterpret_cast<uintptr_t>(los->End()),
- visitor);
+ [this](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ ScanObject(obj);
+ });
}
// Recursively process the mark stack.
ProcessMarkStack();
@@ -697,10 +680,9 @@ void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference*
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
-class SemiSpaceMarkObjectVisitor {
+class SemiSpace::MarkObjectVisitor {
public:
- explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) {
- }
+ explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -739,7 +721,7 @@ class SemiSpaceMarkObjectVisitor {
// Visit all of the references of an object and update.
void SemiSpace::ScanObject(Object* obj) {
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
- SemiSpaceMarkObjectVisitor visitor(this);
+ MarkObjectVisitor visitor(this);
obj->VisitReferences(visitor, visitor);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 0199e1ae56..694e536b7d 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -272,7 +272,9 @@ class SemiSpace : public GarbageCollector {
bool swap_semi_spaces_;
private:
- friend class BitmapSetSlowPathVisitor;
+ class BitmapSetSlowPathVisitor;
+ class MarkObjectVisitor;
+ class VerifyNoFromSpaceReferencesVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(SemiSpace);
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index cdd5f2e120..8da1493db6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -442,7 +442,7 @@ Heap::Heap(size_t initial_size,
}
// Create other spaces based on whether or not we have a moving GC.
if (foreground_collector_type_ == kCollectorTypeCC) {
- region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
+ region_space_ = space::RegionSpace::Create("main space (region space)", capacity_ * 2, request_begin);
AddSpace(region_space_);
} else if (IsMovingGc(foreground_collector_type_) &&
foreground_collector_type_ != kCollectorTypeGSS) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 78c570fa99..8cadc2e0fc 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1130,6 +1130,10 @@ static bool RelocateInPlace(ImageHeader& image_header,
image_header.VisitPackedArtFields(&field_visitor, target_base);
}
{
+ TimingLogger::ScopedTiming timing("Fixup imt", &logger);
+ image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size);
+ }
+ {
TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
}
@@ -1154,6 +1158,80 @@ static bool RelocateInPlace(ImageHeader& image_header,
return true;
}
+static MemMap* LoadImageFile(const char* image_filename,
+ const char* image_location,
+ const ImageHeader& image_header,
+ uint8_t* address,
+ int fd,
+ TimingLogger& logger,
+ std::string* error_msg) {
+ TimingLogger::ScopedTiming timing("MapImageFile", &logger);
+ const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
+ if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+ return MemMap::MapFileAtAddress(address,
+ image_header.GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ fd,
+ 0,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ image_filename,
+ error_msg);
+ }
+
+ if (storage_mode != ImageHeader::kStorageModeLZ4 &&
+ storage_mode != ImageHeader::kStorageModeLZ4HC) {
+ *error_msg = StringPrintf("Invalid storage mode in image header %d",
+ static_cast<int>(storage_mode));
+ return nullptr;
+ }
+
+ // Reserve output and decompress into it.
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous(image_location,
+ address,
+ image_header.GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ error_msg));
+ if (map != nullptr) {
+ const size_t stored_size = image_header.GetDataSize();
+ const size_t decompress_offset = sizeof(ImageHeader); // Skip the header.
+ std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ fd,
+ /*offset*/0,
+ /*low_4gb*/false,
+ image_filename,
+ error_msg));
+ if (temp_map == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
+ }
+ memcpy(map->Begin(), &image_header, sizeof(ImageHeader));
+ const uint64_t start = NanoTime();
+ // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
+ TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
+ const size_t decompressed_size = LZ4_decompress_safe(
+ reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
+ reinterpret_cast<char*>(map->Begin()) + decompress_offset,
+ stored_size,
+ map->Size() - decompress_offset);
+ VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
+ if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
+ *error_msg = StringPrintf(
+ "Decompressed size does not match expected image size %zu vs %zu",
+ decompressed_size + sizeof(ImageHeader),
+ image_header.GetImageSize());
+ return nullptr;
+ }
+ }
+
+ return map.release();
+}
+
ImageSpace* ImageSpace::Init(const char* image_filename,
const char* image_location,
bool validate_oat_file,
@@ -1231,91 +1309,30 @@ ImageSpace* ImageSpace::Init(const char* image_filename,
return nullptr;
}
- // The preferred address to map the image, null specifies any address. If we manage to map the
- // image at the image begin, the amount of fixup work required is minimized.
- std::vector<uint8_t*> addresses(1, image_header->GetImageBegin());
- if (image_header->IsPic()) {
- // Can also map at a random low_4gb address since we can relocate in-place.
- addresses.push_back(nullptr);
- }
-
- // Note: The image header is part of the image due to mmap page alignment required of offset.
std::unique_ptr<MemMap> map;
- std::string temp_error_msg;
- for (uint8_t* address : addresses) {
- TimingLogger::ScopedTiming timing("MapImageFile", &logger);
- // Only care about the error message for the last address in addresses. We want to avoid the
- // overhead of printing the process maps if we can relocate.
- std::string* out_error_msg = (address == addresses.back()) ? &temp_error_msg : nullptr;
- const ImageHeader::StorageMode storage_mode = image_header->GetStorageMode();
- if (storage_mode == ImageHeader::kStorageModeUncompressed) {
- map.reset(MemMap::MapFileAtAddress(address,
- image_header->GetImageSize(),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- /*low_4gb*/true,
- /*reuse*/false,
- image_filename,
- /*out*/out_error_msg));
- } else {
- if (storage_mode != ImageHeader::kStorageModeLZ4 &&
- storage_mode != ImageHeader::kStorageModeLZ4HC) {
- *error_msg = StringPrintf("Invalid storage mode in image header %d",
- static_cast<int>(storage_mode));
- return nullptr;
- }
- // Reserve output and decompress into it.
- map.reset(MemMap::MapAnonymous(image_location,
- address,
- image_header->GetImageSize(),
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- /*out*/out_error_msg));
- if (map != nullptr) {
- const size_t stored_size = image_header->GetDataSize();
- const size_t decompress_offset = sizeof(ImageHeader); // Skip the header.
- std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
- PROT_READ,
- MAP_PRIVATE,
- file->Fd(),
- /*offset*/0,
- /*low_4gb*/false,
- image_filename,
- out_error_msg));
- if (temp_map == nullptr) {
- DCHECK(!out_error_msg->empty());
- return nullptr;
- }
- memcpy(map->Begin(), image_header, sizeof(ImageHeader));
- const uint64_t start = NanoTime();
- // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
- TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
- const size_t decompressed_size = LZ4_decompress_safe(
- reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
- reinterpret_cast<char*>(map->Begin()) + decompress_offset,
- stored_size,
- map->Size() - decompress_offset);
- VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
- if (decompressed_size + sizeof(ImageHeader) != image_header->GetImageSize()) {
- *error_msg = StringPrintf(
- "Decompressed size does not match expected image size %zu vs %zu",
- decompressed_size + sizeof(ImageHeader),
- image_header->GetImageSize());
- return nullptr;
- }
- }
- }
- if (map != nullptr) {
- break;
- }
- }
-
+ // GetImageBegin is the preferred address to map the image. If we manage to map the
+ // image at the image begin, the amount of fixup work required is minimized.
+ map.reset(LoadImageFile(image_filename,
+ image_location,
+ *image_header,
+ image_header->GetImageBegin(),
+ file->Fd(),
+ logger,
+ error_msg));
+ // If the header specifies PIC mode, we can also map at a random low_4gb address since we can
+ // relocate in-place.
+ if (map == nullptr && image_header->IsPic()) {
+ map.reset(LoadImageFile(image_filename,
+ image_location,
+ *image_header,
+ /* address */ nullptr,
+ file->Fd(),
+ logger,
+ error_msg));
+ }
+ // Were we able to load something and continue?
if (map == nullptr) {
- DCHECK(!temp_error_msg.empty());
- *error_msg = temp_error_msg;
+ DCHECK(!error_msg->empty());
return nullptr;
}
DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader)));
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index ea75a622c7..cd0557a235 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -20,6 +20,7 @@
#include "image.h"
#include "art_method.h"
+#include "imtable.h"
namespace art {
@@ -45,6 +46,24 @@ inline mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const {
}
template <typename Visitor>
+inline void ImageHeader::VisitPackedImTables(const Visitor& visitor,
+ uint8_t* base,
+ size_t pointer_size) const {
+ const ImageSection& section = GetImageSection(kSectionImTables);
+ for (size_t pos = 0; pos < section.Size();) {
+ ImTable* imt = reinterpret_cast<ImTable*>(base + section.Offset() + pos);
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ ArtMethod* orig = imt->Get(i, pointer_size);
+ ArtMethod* updated = visitor(orig);
+ if (updated != orig) {
+ imt->Set(i, updated, pointer_size);
+ }
+ }
+ pos += ImTable::SizeInBytes(pointer_size);
+ }
+}
+
+template <typename Visitor>
inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
size_t pointer_size) const {
diff --git a/runtime/image.cc b/runtime/image.cc
index a9552c27d3..2362a92c24 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '0', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 2ea9af7728..06f06eed0e 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -195,6 +195,7 @@ class PACKED(4) ImageHeader {
kSectionArtFields,
kSectionArtMethods,
kSectionRuntimeMethods,
+ kSectionImTables,
kSectionIMTConflictTables,
kSectionDexCacheArrays,
kSectionInternedStrings,
@@ -279,6 +280,11 @@ class PACKED(4) ImageHeader {
void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
template <typename Visitor>
+ void VisitPackedImTables(const Visitor& visitor,
+ uint8_t* base,
+ size_t pointer_size) const;
+
+ template <typename Visitor>
void VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
size_t pointer_size) const;
diff --git a/runtime/imtable.h b/runtime/imtable.h
new file mode 100644
index 0000000000..51faf70d14
--- /dev/null
+++ b/runtime/imtable.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_IMTABLE_H_
+#define ART_RUNTIME_IMTABLE_H_
+
+#ifndef IMT_SIZE
+#error IMT_SIZE not defined
+#endif
+
+namespace art {
+
+class ArtMethod;
+
+class ImTable {
+ public:
+ // Interface method table size. Increasing this value reduces the chance of two interface methods
+ // colliding in the interface method table but increases the size of classes that implement
+ // (non-marker) interfaces.
+ static constexpr size_t kSize = IMT_SIZE;
+
+ ArtMethod* Get(size_t index, size_t pointer_size) {
+ DCHECK_LT(index, kSize);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ if (pointer_size == 4) {
+ uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
+ return reinterpret_cast<ArtMethod*>(value);
+ } else {
+ uint64_t value = *reinterpret_cast<uint64_t*>(ptr);
+ return reinterpret_cast<ArtMethod*>(value);
+ }
+ }
+
+ void Set(size_t index, ArtMethod* method, size_t pointer_size) {
+ DCHECK_LT(index, kSize);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ if (pointer_size == 4) {
+ uintptr_t value = reinterpret_cast<uintptr_t>(method);
+ DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits.
+ *reinterpret_cast<uint32_t*>(ptr) = static_cast<uint32_t>(value);
+ } else {
+ *reinterpret_cast<uint64_t*>(ptr) = reinterpret_cast<uint64_t>(method);
+ }
+ }
+
+ static size_t OffsetOfElement(size_t index, size_t pointer_size) {
+ return index * pointer_size;
+ }
+
+ void Populate(ArtMethod** data, size_t pointer_size) {
+ for (size_t i = 0; i < kSize; ++i) {
+ Set(i, data[i], pointer_size);
+ }
+ }
+
+ constexpr static size_t SizeInBytes(size_t pointer_size) {
+ return kSize * pointer_size;
+ }
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_IMTABLE_H_
+
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index cc470f372b..3750b7ad18 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -679,7 +679,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
return false;
}
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable());
+ CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
vtable_idx, sizeof(void*));
if (UNLIKELY(called_method == nullptr)) {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index d983a9fa19..c2164126e1 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -74,6 +74,10 @@ class SharedLibrary {
if (self != nullptr) {
self->GetJniEnv()->DeleteWeakGlobalRef(class_loader_);
}
+
+ if (!needs_native_bridge_) {
+ android::CloseNativeLibrary(handle_);
+ }
}
jweak GetClassLoader() const {
@@ -271,8 +275,7 @@ class Libraries {
REQUIRES(!Locks::jni_libraries_lock_)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- typedef void (*JNI_OnUnloadFn)(JavaVM*, void*);
- std::vector<JNI_OnUnloadFn> unload_functions;
+ std::vector<SharedLibrary*> unload_libraries;
{
MutexLock mu(soa.Self(), *Locks::jni_libraries_lock_);
for (auto it = libraries_.begin(); it != libraries_.end(); ) {
@@ -283,15 +286,7 @@ class Libraries {
// the native libraries of the boot class loader.
if (class_loader != nullptr &&
soa.Self()->IsJWeakCleared(class_loader)) {
- void* const sym = library->FindSymbol("JNI_OnUnload", nullptr);
- if (sym == nullptr) {
- VLOG(jni) << "[No JNI_OnUnload found in \"" << library->GetPath() << "\"]";
- } else {
- VLOG(jni) << "[JNI_OnUnload found for \"" << library->GetPath() << "\"]";
- JNI_OnUnloadFn jni_on_unload = reinterpret_cast<JNI_OnUnloadFn>(sym);
- unload_functions.push_back(jni_on_unload);
- }
- delete library;
+ unload_libraries.push_back(library);
it = libraries_.erase(it);
} else {
++it;
@@ -299,9 +294,17 @@ class Libraries {
}
}
// Do this without holding the jni libraries lock to prevent possible deadlocks.
- for (JNI_OnUnloadFn fn : unload_functions) {
- VLOG(jni) << "Calling JNI_OnUnload";
- (*fn)(soa.Vm(), nullptr);
+ typedef void (*JNI_OnUnloadFn)(JavaVM*, void*);
+ for (auto library : unload_libraries) {
+ void* const sym = library->FindSymbol("JNI_OnUnload", nullptr);
+ if (sym == nullptr) {
+ VLOG(jni) << "[No JNI_OnUnload found in \"" << library->GetPath() << "\"]";
+ } else {
+ VLOG(jni) << "[JNI_OnUnload found for \"" << library->GetPath() << "\"]: Calling...";
+ JNI_OnUnloadFn jni_on_unload = reinterpret_cast<JNI_OnUnloadFn>(sym);
+ jni_on_unload(soa.Vm(), nullptr);
+ }
+ delete library;
}
}
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 771f8ed290..c047ba20f5 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -157,6 +157,8 @@ bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string*
}
return false;
}
+
+ ScopedBacktraceMapIteratorLock lock(map.get());
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
if ((begin >= it->start && begin < it->end) // start of new within old
&& (end > it->start && end <= it->end)) { // end of new within old
@@ -180,6 +182,7 @@ static bool CheckNonOverlapping(uintptr_t begin,
*error_msg = StringPrintf("Failed to build process map");
return false;
}
+ ScopedBacktraceMapIteratorLock(map.get());
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
if ((begin >= it->start && begin < it->end) // start of new within old
|| (end > it->start && end < it->end) // end of new within old
@@ -339,7 +342,9 @@ MemMap* MemMap::MapAnonymous(const char* name,
if (actual == MAP_FAILED) {
if (error_msg != nullptr) {
- PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ if (kIsDebugBuild || VLOG_IS_ON(oat)) {
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ }
*error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
"See process maps in the log.",
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index fcdfc88495..b783a019e7 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -247,38 +247,19 @@ inline void Class::SetVTable(PointerArray* new_vtable) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
}
-inline MemberOffset Class::EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size) {
- DCHECK_LT(i, kImtSize);
- return MemberOffset(
- EmbeddedImTableOffset(pointer_size).Uint32Value() + i * ImTableEntrySize(pointer_size));
-}
-
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) {
- DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()));
- return GetFieldPtrWithSize<ArtMethod*>(
- EmbeddedImTableEntryOffset(i, pointer_size), pointer_size);
-}
-
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) {
- DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()));
- SetFieldPtrWithSize<false>(EmbeddedImTableEntryOffset(i, pointer_size), method, pointer_size);
-}
-
inline bool Class::HasVTable() {
- return GetVTable() != nullptr || ShouldHaveEmbeddedImtAndVTable();
+ return GetVTable() != nullptr || ShouldHaveEmbeddedVTable();
}
inline int32_t Class::GetVTableLength() {
- if (ShouldHaveEmbeddedImtAndVTable()) {
+ if (ShouldHaveEmbeddedVTable()) {
return GetEmbeddedVTableLength();
}
return GetVTable() != nullptr ? GetVTable()->GetLength() : 0;
}
inline ArtMethod* Class::GetVTableEntry(uint32_t i, size_t pointer_size) {
- if (ShouldHaveEmbeddedImtAndVTable()) {
+ if (ShouldHaveEmbeddedVTable()) {
return GetEmbeddedVTableEntry(i, pointer_size);
}
auto* vtable = GetVTable();
@@ -294,6 +275,14 @@ inline void Class::SetEmbeddedVTableLength(int32_t len) {
SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len);
}
+inline ImTable* Class::GetImt(size_t pointer_size) {
+ return GetFieldPtrWithSize<ImTable*>(MemberOffset(ImtPtrOffset(pointer_size)), pointer_size);
+}
+
+inline void Class::SetImt(ImTable* imt, size_t pointer_size) {
+ return SetFieldPtrWithSize<false>(MemberOffset(ImtPtrOffset(pointer_size)), imt, pointer_size);
+}
+
inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size) {
return MemberOffset(
EmbeddedVTableOffset(pointer_size).Uint32Value() + i * VTableEntrySize(pointer_size));
@@ -532,7 +521,7 @@ template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
Class* super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
return (super_class != nullptr)
- ? MemberOffset(RoundUp(super_class->GetObjectSize(),
+ ? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags, kReadBarrierOption>(),
sizeof(mirror::HeapReference<mirror::Object>)))
: ClassOffset();
}
@@ -541,7 +530,7 @@ template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) {
DCHECK(IsResolved());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
- if (ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()) {
+ if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
// Static fields come after the embedded tables.
base = mirror::Class::ComputeClassSize(
true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
@@ -552,7 +541,7 @@ inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_siz
inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) {
DCHECK(IsLoaded());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
- if (ShouldHaveEmbeddedImtAndVTable()) {
+ if (ShouldHaveEmbeddedVTable()) {
// Static fields come after the embedded tables.
base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
0, 0, 0, 0, 0, pointer_size);
@@ -711,7 +700,7 @@ inline Object* Class::AllocNonMovableObject(Thread* self) {
return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
}
-inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
+inline uint32_t Class::ComputeClassSize(bool has_embedded_vtable,
uint32_t num_vtable_entries,
uint32_t num_8bit_static_fields,
uint32_t num_16bit_static_fields,
@@ -722,11 +711,10 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
// Space used by java.lang.Class and its instance fields.
uint32_t size = sizeof(Class);
// Space used by embedded tables.
- if (has_embedded_tables) {
- const uint32_t embedded_imt_size = kImtSize * ImTableEntrySize(pointer_size);
- const uint32_t embedded_vtable_size = num_vtable_entries * VTableEntrySize(pointer_size);
- size = RoundUp(size + sizeof(uint32_t) /* embedded vtable len */, pointer_size) +
- embedded_imt_size + embedded_vtable_size;
+ if (has_embedded_vtable) {
+ size = RoundUp(size + sizeof(uint32_t), pointer_size);
+ size += pointer_size; // size of pointer to IMT
+ size += num_vtable_entries * VTableEntrySize(pointer_size);
}
// Space used by reference statics.
@@ -778,7 +766,8 @@ inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor)
}
if (kVisitNativeRoots) {
// Since this class is reachable, we must also visit the associated roots when we scan it.
- VisitNativeRoots(visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ VisitNativeRoots<kReadBarrierOption>(
+ visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
}
@@ -917,24 +906,24 @@ inline GcRoot<String>* Class::GetDexCacheStrings() {
return GetFieldPtr<GcRoot<String>*>(DexCacheStringsOffset());
}
-template<class Visitor>
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) {
for (ArtField& field : GetSFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
- CHECK_EQ(field.GetDeclaringClass(), this) << GetStatus();
+ CHECK_EQ(field.GetDeclaringClass<kReadBarrierOption>(), this) << GetStatus();
}
}
for (ArtField& field : GetIFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
- CHECK_EQ(field.GetDeclaringClass(), this) << GetStatus();
+ CHECK_EQ(field.GetDeclaringClass<kReadBarrierOption>(), this) << GetStatus();
}
}
for (ArtMethod& method : GetMethods(pointer_size)) {
- method.VisitRoots(visitor, pointer_size);
+ method.VisitRoots<kReadBarrierOption>(visitor, pointer_size);
}
}
@@ -989,18 +978,9 @@ inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() {
return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
-inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) {
- CheckPointerSize(pointer_size);
- // Round up since we want the embedded imt and vtable to be pointer size aligned in case 64 bits.
- // Add 32 bits for embedded vtable length.
- return MemberOffset(
- RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size));
-}
-
inline MemberOffset Class::EmbeddedVTableOffset(size_t pointer_size) {
CheckPointerSize(pointer_size);
- return MemberOffset(EmbeddedImTableOffset(pointer_size).Uint32Value() +
- kImtSize * ImTableEntrySize(pointer_size));
+ return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + pointer_size);
}
inline void Class::CheckPointerSize(size_t pointer_size) {
@@ -1085,7 +1065,7 @@ inline void Class::FixupNativePointers(mirror::Class* dest,
dest->SetDexCacheStrings(new_strings);
}
// Fix up embedded tables.
- if (!IsTemp() && ShouldHaveEmbeddedImtAndVTable<kVerifyNone, kReadBarrierOption>()) {
+ if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) {
for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
ArtMethod* new_method = visitor(method);
@@ -1093,16 +1073,9 @@ inline void Class::FixupNativePointers(mirror::Class* dest,
dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size);
}
}
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- ArtMethod* method = GetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i,
- pointer_size);
- ArtMethod* new_method = visitor(method);
- if (method != new_method) {
- dest->SetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i,
- new_method,
- pointer_size);
- }
- }
+ }
+ if (!IsTemp() && ShouldHaveImt<kVerifyNone, kReadBarrierOption>()) {
+ dest->SetImt(visitor(GetImt(pointer_size)), pointer_size);
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index b4a23badba..9c77d3814c 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -914,13 +914,7 @@ const DexFile::TypeList* Class::GetInterfaceTypeList() {
return GetDexFile().GetInterfacesList(*class_def);
}
-void Class::PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize],
- size_t pointer_size) {
- for (size_t i = 0; i < kImtSize; i++) {
- auto method = methods[i];
- DCHECK(method != nullptr);
- SetEmbeddedImTableEntry(i, method, pointer_size);
- }
+void Class::PopulateEmbeddedVTable(size_t pointer_size) {
PointerArray* table = GetVTableDuringLinking();
CHECK(table != nullptr) << PrettyClass(this);
const size_t table_length = table->GetLength();
@@ -967,7 +961,7 @@ class ReadBarrierOnNativeRootsVisitor {
class CopyClassVisitor {
public:
CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length,
- size_t copy_bytes, ArtMethod* const (&imt)[mirror::Class::kImtSize],
+ size_t copy_bytes, ImTable* imt,
size_t pointer_size)
: self_(self), orig_(orig), new_length_(new_length),
copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
@@ -979,7 +973,8 @@ class CopyClassVisitor {
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
- h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_, pointer_size_);
+ h_new_class_obj->PopulateEmbeddedVTable(pointer_size_);
+ h_new_class_obj->SetImt(imt_, pointer_size_);
h_new_class_obj->SetClassSize(new_length_);
// Visit all of the references to make sure there is no from space references in the native
// roots.
@@ -992,13 +987,13 @@ class CopyClassVisitor {
Handle<mirror::Class>* const orig_;
const size_t new_length_;
const size_t copy_bytes_;
- ArtMethod* const (&imt_)[mirror::Class::kImtSize];
+ ImTable* imt_;
const size_t pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
};
Class* Class::CopyOf(Thread* self, int32_t new_length,
- ArtMethod* const (&imt)[mirror::Class::kImtSize], size_t pointer_size) {
+ ImTable* imt, size_t pointer_size) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 8c20fa680f..2adf54ab86 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -22,6 +22,7 @@
#include "class_flags.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
+#include "imtable.h"
#include "invoke_type.h"
#include "modifiers.h"
#include "object.h"
@@ -33,10 +34,6 @@
#include "thread.h"
#include "utils.h"
-#ifndef IMT_SIZE
-#error IMT_SIZE not defined
-#endif
-
namespace art {
class ArtField;
@@ -66,11 +63,6 @@ class MANAGED Class FINAL : public Object {
// 2 ref instance fields.]
static constexpr uint32_t kClassWalkSuper = 0xC0000000;
- // Interface method table size. Increasing this value reduces the chance of two interface methods
- // colliding in the interface method table but increases the size of classes that implement
- // (non-marker) interfaces.
- static constexpr size_t kImtSize = IMT_SIZE;
-
// Class Status
//
// kStatusRetired: Class that's temporarily used till class linking time
@@ -351,7 +343,7 @@ class MANAGED Class FINAL : public Object {
// be replaced with a class with the right size for embedded imt/vtable.
bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
Status s = GetStatus();
- return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable();
+ return s < Status::kStatusResolving && ShouldHaveEmbeddedVTable();
}
String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name.
@@ -557,7 +549,7 @@ class MANAGED Class FINAL : public Object {
SHARED_REQUIRES(Locks::mutator_lock_);
// Compute how many bytes would be used a class with the given elements.
- static uint32_t ComputeClassSize(bool has_embedded_tables,
+ static uint32_t ComputeClassSize(bool has_embedded_vtable,
uint32_t num_vtable_entries,
uint32_t num_8bit_static_fields,
uint32_t num_16bit_static_fields,
@@ -830,28 +822,29 @@ class MANAGED Class FINAL : public Object {
return MemberOffset(sizeof(Class));
}
+ static MemberOffset ImtPtrOffset(size_t pointer_size) {
+ return MemberOffset(
+ RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size));
+ }
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ShouldHaveImt() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>() &&
+ GetIfTable<kVerifyFlags, kReadBarrierOption>() != nullptr &&
+ !IsArrayClass<kVerifyFlags, kReadBarrierOption>();
+ }
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ bool ShouldHaveEmbeddedVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
}
bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
- static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size);
-
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
@@ -861,6 +854,10 @@ class MANAGED Class FINAL : public Object {
void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
+ ImTable* GetImt(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void SetImt(ImTable* imt, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -870,7 +867,7 @@ class MANAGED Class FINAL : public Object {
inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size)
+ void PopulateEmbeddedVTable(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
@@ -1156,7 +1153,7 @@ class MANAGED Class FINAL : public Object {
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
- template<class Visitor>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1195,7 +1192,7 @@ class MANAGED Class FINAL : public Object {
void AssertInitializedOrInitializingInThread(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_);
- Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize],
+ Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
@@ -1322,10 +1319,7 @@ class MANAGED Class FINAL : public Object {
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
-
- static MemberOffset EmbeddedImTableOffset(size_t pointer_size);
static MemberOffset EmbeddedVTableOffset(size_t pointer_size);
-
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 2894b68f03..0b3461f8c2 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -55,7 +55,6 @@ inline Class* DexCache::GetResolvedType(uint32_t type_idx) {
inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
DCHECK_LT(type_idx, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB.
// TODO default transaction support.
- DCHECK(resolved == nullptr || !resolved->IsErroneous());
GetResolvedTypes()[type_idx] = GcRoot<Class>(resolved);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
diff --git a/runtime/oat.h b/runtime/oat.h
index 57675dc738..52d4c4209e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '7', '9', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '8', '1', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index b7bd99b6f5..42e959c2bd 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -37,11 +37,10 @@ class ArtMethod;
class ReadBarrier {
public:
- // TODO: disable thse flags for production use.
// Enable the to-space invariant checks.
- static constexpr bool kEnableToSpaceInvariantChecks = true;
+ static constexpr bool kEnableToSpaceInvariantChecks = kIsDebugBuild;
// Enable the read barrier checks.
- static constexpr bool kEnableReadBarrierInvariantChecks = true;
+ static constexpr bool kEnableReadBarrierInvariantChecks = kIsDebugBuild;
// It's up to the implementation whether the given field gets updated whereas the return value
// must be an updated reference unless kAlwaysUpdateField is true.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f1f4a122b4..b9ee4421b9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -924,10 +924,22 @@ bool Thread::InitStackHwm() {
Runtime* runtime = Runtime::Current();
bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
+
+ // Valgrind on arm doesn't give the right values here. Do not install the guard page, and
+ // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting
+ // stack_begin to 0.
+ const bool valgrind_on_arm =
+ (kRuntimeISA == kArm || kRuntimeISA == kArm64) &&
+ kMemoryToolIsValgrind &&
+ RUNNING_ON_MEMORY_TOOL != 0;
+ if (valgrind_on_arm) {
+ tlsPtr_.stack_begin = nullptr;
+ }
+
ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
- if (implicit_stack_check) {
+ if (implicit_stack_check && !valgrind_on_arm) {
// The thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
@@ -1122,32 +1134,36 @@ void Thread::ClearSuspendBarrier(AtomicInteger* target) {
}
void Thread::RunCheckpointFunction() {
- Closure *checkpoints[kMaxCheckpoints];
-
- // Grab the suspend_count lock and copy the current set of
- // checkpoints. Then clear the list and the flag. The RequestCheckpoint
- // function will also grab this lock so we prevent a race between setting
- // the kCheckpointRequest flag and clearing it.
- {
- MutexLock mu(this, *Locks::thread_suspend_count_lock_);
- for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
- checkpoints[i] = tlsPtr_.checkpoint_functions[i];
- tlsPtr_.checkpoint_functions[i] = nullptr;
+ bool done = false;
+ do {
+ // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is
+ // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock
+ // to prevent a race between setting the kCheckpointRequest flag and clearing it.
+ Closure* checkpoint = nullptr;
+ {
+ MutexLock mu(this, *Locks::thread_suspend_count_lock_);
+ if (tlsPtr_.checkpoint_function != nullptr) {
+ checkpoint = tlsPtr_.checkpoint_function;
+ if (!checkpoint_overflow_.empty()) {
+ // Overflow list not empty, copy the first one out and continue.
+ tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
+ checkpoint_overflow_.pop_front();
+ } else {
+ // No overflow checkpoints, this means that we are on the last pending checkpoint.
+ tlsPtr_.checkpoint_function = nullptr;
+ AtomicClearFlag(kCheckpointRequest);
+ done = true;
+ }
+ } else {
+ LOG(FATAL) << "Checkpoint flag set without pending checkpoint";
+ }
}
- AtomicClearFlag(kCheckpointRequest);
- }
- // Outside the lock, run all the checkpoint functions that
- // we collected.
- bool found_checkpoint = false;
- for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
- if (checkpoints[i] != nullptr) {
- ScopedTrace trace("Run checkpoint function");
- checkpoints[i]->Run(this);
- found_checkpoint = true;
- }
- }
- CHECK(found_checkpoint);
+ // Outside the lock, run the checkpoint functions that we collected.
+ ScopedTrace trace("Run checkpoint function");
+ DCHECK(checkpoint != nullptr);
+ checkpoint->Run(this);
+ } while (!done);
}
bool Thread::RequestCheckpoint(Closure* function) {
@@ -1157,20 +1173,6 @@ bool Thread::RequestCheckpoint(Closure* function) {
return false; // Fail, thread is suspended and so can't run a checkpoint.
}
- uint32_t available_checkpoint = kMaxCheckpoints;
- for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
- if (tlsPtr_.checkpoint_functions[i] == nullptr) {
- available_checkpoint = i;
- break;
- }
- }
- if (available_checkpoint == kMaxCheckpoints) {
- // No checkpoint functions available, we can't run a checkpoint
- return false;
- }
- tlsPtr_.checkpoint_functions[available_checkpoint] = function;
-
- // Checkpoint function installed now install flag bit.
// We must be runnable to request a checkpoint.
DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
union StateAndFlags new_state_and_flags;
@@ -1178,11 +1180,13 @@ bool Thread::RequestCheckpoint(Closure* function) {
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
old_state_and_flags.as_int, new_state_and_flags.as_int);
- if (UNLIKELY(!success)) {
- // The thread changed state before the checkpoint was installed.
- CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
- tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
- } else {
+ if (success) {
+ // Succeeded setting checkpoint flag, now insert the actual checkpoint.
+ if (tlsPtr_.checkpoint_function == nullptr) {
+ tlsPtr_.checkpoint_function = function;
+ } else {
+ checkpoint_overflow_.push_back(function);
+ }
CHECK_EQ(ReadFlag(kCheckpointRequest), true);
TriggerSuspend();
}
@@ -1624,9 +1628,7 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte
std::fill(tlsPtr_.rosalloc_runs,
tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
gc::allocator::RosAlloc::GetDedicatedFullRun());
- for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
- tlsPtr_.checkpoint_functions[i] = nullptr;
- }
+ tlsPtr_.checkpoint_function = nullptr;
for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
tlsPtr_.active_suspend_barriers[i] = nullptr;
}
@@ -1767,9 +1769,8 @@ Thread::~Thread() {
}
CHECK_NE(GetState(), kRunnable);
CHECK_NE(ReadFlag(kCheckpointRequest), true);
- CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
- CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
- CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
+ CHECK(tlsPtr_.checkpoint_function == nullptr);
+ CHECK_EQ(checkpoint_overflow_.size(), 0u);
CHECK(tlsPtr_.flip_function == nullptr);
CHECK_EQ(tls32_.suspended_at_suspend_check, false);
diff --git a/runtime/thread.h b/runtime/thread.h
index 3c367ee5b6..7ae9be5d1a 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1220,9 +1220,6 @@ class Thread {
static void ThreadExitCallback(void* arg);
- // Maximum number of checkpoint functions.
- static constexpr uint32_t kMaxCheckpoints = 3;
-
// Maximum number of suspend barriers.
static constexpr uint32_t kMaxSuspendBarriers = 3;
@@ -1452,9 +1449,9 @@ class Thread {
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
- // Pending checkpoint function or null if non-pending. Installation guarding by
- // Locks::thread_suspend_count_lock_.
- Closure* checkpoint_functions[kMaxCheckpoints];
+ // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
+ // requests another checkpoint, it goes to the checkpoint overflow list.
+ Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
// Pending barriers that require passing or NULL if non-pending. Installation guarding by
// Locks::thread_suspend_count_lock_.
@@ -1517,6 +1514,9 @@ class Thread {
// Debug disable read barrier count, only is checked for debug builds and only in the runtime.
uint8_t debug_disallow_read_barrier_ = 0;
+ // Pending extra checkpoints if checkpoint_function_ is already used.
+ std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
+
friend class Dbg; // For SetStateUnsafe.
friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Runtime; // For CreatePeer.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index da214796b9..d90bd8d26b 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -234,7 +234,12 @@ void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
os << "DALVIK THREADS (" << list_.size() << "):\n";
}
DumpCheckpoint checkpoint(&os, dump_native_stack);
- size_t threads_running_checkpoint = RunCheckpoint(&checkpoint);
+ size_t threads_running_checkpoint;
+ {
+ // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
+ ScopedObjectAccess soa(Thread::Current());
+ threads_running_checkpoint = RunCheckpoint(&checkpoint);
+ }
if (threads_running_checkpoint != 0) {
checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
}
@@ -1295,6 +1300,39 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
}
}
+void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
+ Thread* const self = Thread::Current();
+ std::vector<Thread*> threads_to_visit;
+
+ // Tell threads to suspend and copy them into list.
+ {
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ for (Thread* thread : list_) {
+ thread->ModifySuspendCount(self, +1, nullptr, false);
+ if (thread == self || thread->IsSuspended()) {
+ threads_to_visit.push_back(thread);
+ } else {
+ thread->ModifySuspendCount(self, -1, nullptr, false);
+ }
+ }
+ }
+
+ // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
+ // order violations.
+ for (Thread* thread : threads_to_visit) {
+ thread->VisitRoots(visitor);
+ }
+
+ // Restore suspend counts.
+ {
+ MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ for (Thread* thread : threads_to_visit) {
+ thread->ModifySuspendCount(self, -1, nullptr, false);
+ }
+ }
+}
+
void ThreadList::VisitRoots(RootVisitor* visitor) const {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
for (const auto& thread : list_) {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index df81ad1a7b..49f65e16a7 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -144,6 +144,10 @@ class ThreadList {
void VisitRoots(RootVisitor* visitor) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRootsForSuspendedThreads(RootVisitor* visitor)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Return a copy of the thread list.
std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
return list_;
diff --git a/test/033-class-init-deadlock/expected.txt b/test/033-class-init-deadlock/expected.txt
index 182d0da00d..9e843a06f6 100644
--- a/test/033-class-init-deadlock/expected.txt
+++ b/test/033-class-init-deadlock/expected.txt
@@ -1,6 +1,4 @@
Deadlock test starting.
-A initializing...
-B initializing...
Deadlock test interrupting threads.
Deadlock test main thread bailing.
A initialized: false
diff --git a/test/033-class-init-deadlock/src/Main.java b/test/033-class-init-deadlock/src/Main.java
index 32332307f5..bd4d4ab7b5 100644
--- a/test/033-class-init-deadlock/src/Main.java
+++ b/test/033-class-init-deadlock/src/Main.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+import java.util.concurrent.CyclicBarrier;
+
/**
* This causes most VMs to lock up.
*
@@ -23,6 +25,8 @@ public class Main {
public static boolean aInitialized = false;
public static boolean bInitialized = false;
+ public static CyclicBarrier barrier = new CyclicBarrier(3);
+
static public void main(String[] args) {
Thread thread1, thread2;
@@ -30,10 +34,10 @@ public class Main {
thread1 = new Thread() { public void run() { new A(); } };
thread2 = new Thread() { public void run() { new B(); } };
thread1.start();
- // Give thread1 a chance to start before starting thread2.
- try { Thread.sleep(1000); } catch (InterruptedException ie) { }
thread2.start();
+ // Not expecting any exceptions, so print them out if we get them.
+ try { barrier.await(); } catch (Exception e) { System.out.println(e); }
try { Thread.sleep(6000); } catch (InterruptedException ie) { }
System.out.println("Deadlock test interrupting threads.");
@@ -48,8 +52,8 @@ public class Main {
class A {
static {
- System.out.println("A initializing...");
- try { Thread.sleep(3000); } catch (InterruptedException ie) { }
+ // Not expecting any exceptions, so print them out if we get them.
+ try { Main.barrier.await(); } catch (Exception e) { System.out.println(e); }
new B();
System.out.println("A initialized");
Main.aInitialized = true;
@@ -58,8 +62,8 @@ class A {
class B {
static {
- System.out.println("B initializing...");
- try { Thread.sleep(3000); } catch (InterruptedException ie) { }
+ // Not expecting any exceptions, so print them out if we get them.
+ try { Main.barrier.await(); } catch (Exception e) { System.out.println(e); }
new A();
System.out.println("B initialized");
Main.bInitialized = true;
diff --git a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
index c9110a905d..b7293015cf 100644
--- a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
+++ b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
@@ -27,8 +27,20 @@ namespace art {
namespace {
static volatile std::atomic<bool> vm_was_shutdown(false);
+static const int kThreadCount = 4;
+
+static std::atomic<int> barrier_count(kThreadCount + 1);
+
+static void JniThreadBarrierWait() {
+ barrier_count--;
+ while (barrier_count.load() != 0) {
+ usleep(1000);
+ }
+}
extern "C" JNIEXPORT void JNICALL Java_Main_waitAndCallIntoJniEnv(JNIEnv* env, jclass) {
+ // Wait for all threads to enter JNI together.
+ JniThreadBarrierWait();
// Wait until the runtime is shutdown.
while (!vm_was_shutdown.load()) {
usleep(1000);
@@ -40,6 +52,8 @@ extern "C" JNIEXPORT void JNICALL Java_Main_waitAndCallIntoJniEnv(JNIEnv* env, j
// NO_RETURN does not work with extern "C" for target builds.
extern "C" JNIEXPORT void JNICALL Java_Main_destroyJavaVMAndExit(JNIEnv* env, jclass) {
+ // Wait for all threads to enter JNI together.
+ JniThreadBarrierWait();
// Fake up the managed stack so we can detach.
Thread* const self = Thread::Current();
self->SetTopOfStack(nullptr);
diff --git a/test/149-suspend-all-stress/check b/test/149-suspend-all-stress/check
new file mode 100755
index 0000000000..d30b8888ca
--- /dev/null
+++ b/test/149-suspend-all-stress/check
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Only compare the last line.
+tail -n 1 "$2" | diff --strip-trailing-cr -q "$1" - >/dev/null \ No newline at end of file
diff --git a/test/149-suspend-all-stress/expected.txt b/test/149-suspend-all-stress/expected.txt
index f993efcdad..134d8d0b47 100644
--- a/test/149-suspend-all-stress/expected.txt
+++ b/test/149-suspend-all-stress/expected.txt
@@ -1,2 +1 @@
-JNI_OnLoad called
Finishing
diff --git a/test/149-suspend-all-stress/suspend_all.cc b/test/149-suspend-all-stress/suspend_all.cc
index c22ddadc22..dfd944a267 100644
--- a/test/149-suspend-all-stress/suspend_all.cc
+++ b/test/149-suspend-all-stress/suspend_all.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/time_utils.h"
#include "jni.h"
#include "runtime.h"
#include "thread_list.h"
@@ -21,12 +22,42 @@
namespace art {
extern "C" JNIEXPORT void JNICALL Java_Main_suspendAndResume(JNIEnv*, jclass) {
- usleep(100 * 1000); // Leave some time for threads to get in here before we start suspending.
- for (size_t i = 0; i < 500; ++i) {
- Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
- usleep(500);
- Runtime::Current()->GetThreadList()->ResumeAll();
+ static constexpr size_t kInitialSleepUS = 100 * 1000; // 100ms.
+ usleep(kInitialSleepUS); // Leave some time for threads to get in here before we start suspending.
+ enum Operation {
+ kOPSuspendAll,
+ kOPDumpStack,
+ kOPSuspendAllDumpStack,
+ // Total number of operations.
+ kOPNumber,
+ };
+ const uint64_t start_time = NanoTime();
+ size_t iterations = 0;
+ // Run for a fixed period of 10 seconds.
+ while (NanoTime() - start_time < MsToNs(10 * 1000)) {
+ switch (static_cast<Operation>(iterations % kOPNumber)) {
+ case kOPSuspendAll: {
+ ScopedSuspendAll ssa(__FUNCTION__);
+ usleep(500);
+ break;
+ }
+ case kOPDumpStack: {
+ Runtime::Current()->GetThreadList()->Dump(LOG(INFO));
+ usleep(500);
+ break;
+ }
+ case kOPSuspendAllDumpStack: {
+ // Not yet supported.
+ // ScopedSuspendAll ssa(__FUNCTION__);
+ // Runtime::Current()->GetThreadList()->Dump(LOG(INFO));
+ break;
+ }
+ case kOPNumber:
+ break;
+ }
+ ++iterations;
}
+ LOG(INFO) << "Did " << iterations << " iterations";
}
} // namespace art
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 7b2c6cbcd5..c0d93dd8a1 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -17,6 +17,7 @@
#include "art_method.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jit/profiling_info.h"
#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack_map.h"
@@ -37,8 +38,10 @@ static void do_checks(jclass cls, const char* method_name) {
if (code_cache->ContainsPc(header->GetCode())) {
break;
} else {
- // sleep one second to give time to the JIT compiler.
- sleep(1);
+ // Sleep to yield to the compiler thread.
+ usleep(1000);
+ // Will either ensure it's compiled or do the compilation itself.
+ jit->CompileMethod(method, soa.Self(), /* osr */ false);
}
}
@@ -47,7 +50,25 @@ static void do_checks(jclass cls, const char* method_name) {
CHECK(info.HasInlineInfo(encoding));
}
-extern "C" JNIEXPORT void JNICALL Java_Main_ensureJittedAndPolymorphicInline(JNIEnv*, jclass cls) {
+static void allocate_profiling_info(jclass cls, const char* method_name) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
+ ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, sizeof(void*));
+ ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfilingInfo566(JNIEnv*, jclass cls) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit == nullptr) {
+ return;
+ }
+
+ allocate_profiling_info(cls, "testInvokeVirtual");
+ allocate_profiling_info(cls, "testInvokeInterface");
+ allocate_profiling_info(cls, "$noinline$testInlineToSameTarget");
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureJittedAndPolymorphicInline566(JNIEnv*, jclass cls) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit == nullptr) {
return;
diff --git a/test/566-polymorphic-inlining/src/Main.java b/test/566-polymorphic-inlining/src/Main.java
index a59ce5b344..d39e6ed57b 100644
--- a/test/566-polymorphic-inlining/src/Main.java
+++ b/test/566-polymorphic-inlining/src/Main.java
@@ -39,6 +39,9 @@ public class Main implements Itf {
itfs[1] = mains[1] = new Subclass();
itfs[2] = mains[2] = new OtherSubclass();
+ // Create the profiling info eagerly to make sure they are filled.
+ ensureProfilingInfo566();
+
// Make testInvokeVirtual and testInvokeInterface hot to get them jitted.
// We pass Main and Subclass to get polymorphic inlining based on calling
// the same method.
@@ -51,7 +54,7 @@ public class Main implements Itf {
$noinline$testInlineToSameTarget(mains[1]);
}
- ensureJittedAndPolymorphicInline();
+ ensureJittedAndPolymorphicInline566();
// At this point, the JIT should have compiled both methods, and inline
// sameInvokeVirtual and sameInvokeInterface.
@@ -71,12 +74,12 @@ public class Main implements Itf {
}
public Class sameInvokeVirtual() {
- field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo
+ field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo.
return Main.class;
}
public Class sameInvokeInterface() {
- field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo
+ field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo.
return Itf.class;
}
@@ -95,7 +98,8 @@ public class Main implements Itf {
public Object field = new Object();
- public static native void ensureJittedAndPolymorphicInline();
+ public static native void ensureJittedAndPolymorphicInline566();
+ public static native void ensureProfilingInfo566();
public void increment() {
field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo
diff --git a/test/570-checker-select/src/Main.java b/test/570-checker-select/src/Main.java
index 59741d6d05..e0a76ca022 100644
--- a/test/570-checker-select/src/Main.java
+++ b/test/570-checker-select/src/Main.java
@@ -16,6 +16,8 @@
public class Main {
+ static boolean doThrow = false;
+
/// CHECK-START: int Main.BoolCond_IntVarVar(boolean, int, int) register (after)
/// CHECK: Select [{{i\d+}},{{i\d+}},{{z\d+}}]
@@ -35,6 +37,10 @@ public class Main {
/// CHECK: cmovnz/ne
public static int BoolCond_IntVarVar(boolean cond, int x, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? x : y;
}
@@ -57,6 +63,10 @@ public class Main {
/// CHECK: cmovnz/ne
public static int BoolCond_IntVarCst(boolean cond, int x) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? x : 1;
}
@@ -79,6 +89,10 @@ public class Main {
/// CHECK: cmovnz/ne
public static int BoolCond_IntCstVar(boolean cond, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? 1 : y;
}
@@ -102,6 +116,10 @@ public class Main {
/// CHECK-NEXT: cmovnz/ne
public static long BoolCond_LongVarVar(boolean cond, long x, long y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? x : y;
}
@@ -125,6 +143,10 @@ public class Main {
/// CHECK-NEXT: cmovnz/ne
public static long BoolCond_LongVarCst(boolean cond, long x) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? x : 1L;
}
@@ -148,6 +170,10 @@ public class Main {
/// CHECK-NEXT: cmovnz/ne
public static long BoolCond_LongCstVar(boolean cond, long y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? 1L : y;
}
@@ -160,6 +186,10 @@ public class Main {
/// CHECK-NEXT: fcsel ne
public static float BoolCond_FloatVarVar(boolean cond, float x, float y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? x : y;
}
@@ -172,6 +202,10 @@ public class Main {
/// CHECK-NEXT: fcsel ne
public static float BoolCond_FloatVarCst(boolean cond, float x) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? x : 1.0f;
}
@@ -184,6 +218,10 @@ public class Main {
/// CHECK-NEXT: fcsel ne
public static float BoolCond_FloatCstVar(boolean cond, float y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return cond ? 1.0f : y;
}
@@ -207,6 +245,10 @@ public class Main {
/// CHECK: cmovle/ng
public static int IntNonmatCond_IntVarVar(int a, int b, int x, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return a > b ? x : y;
}
@@ -233,6 +275,10 @@ public class Main {
/// CHECK: cmovle/ng
public static int IntMatCond_IntVarVar(int a, int b, int x, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
int result = (a > b ? x : y);
return result + (a > b ? 0 : 1);
}
@@ -258,6 +304,10 @@ public class Main {
/// CHECK-NEXT: cmovle/ng
public static long IntNonmatCond_LongVarVar(int a, int b, long x, long y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return a > b ? x : y;
}
@@ -291,6 +341,10 @@ public class Main {
/// CHECK-NEXT: cmovnz/ne
public static long IntMatCond_LongVarVar(int a, int b, long x, long y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
long result = (a > b ? x : y);
return result + (a > b ? 0L : 1L);
}
@@ -310,6 +364,10 @@ public class Main {
/// CHECK: cmovle/ngq
public static long LongNonmatCond_LongVarVar(long a, long b, long x, long y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return a > b ? x : y;
}
@@ -334,6 +392,10 @@ public class Main {
/// CHECK: cmovnz/neq
public static long LongMatCond_LongVarVar(long a, long b, long x, long y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
long result = (a > b ? x : y);
return result + (a > b ? 0L : 1L);
}
@@ -349,6 +411,10 @@ public class Main {
/// CHECK-NEXT: csel le
public static int FloatLtNonmatCond_IntVarVar(float a, float b, int x, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return a > b ? x : y;
}
@@ -363,6 +429,10 @@ public class Main {
/// CHECK-NEXT: csel hs
public static int FloatGtNonmatCond_IntVarVar(float a, float b, int x, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return a < b ? x : y;
}
@@ -377,6 +447,10 @@ public class Main {
/// CHECK-NEXT: fcsel hs
public static float FloatGtNonmatCond_FloatVarVar(float a, float b, float x, float y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
return a < b ? x : y;
}
@@ -393,6 +467,10 @@ public class Main {
/// CHECK-NEXT: csel le
public static int FloatLtMatCond_IntVarVar(float a, float b, int x, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
int result = (a > b ? x : y);
return result + (a > b ? 0 : 1);
}
@@ -410,6 +488,10 @@ public class Main {
/// CHECK-NEXT: csel hs
public static int FloatGtMatCond_IntVarVar(float a, float b, int x, int y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
int result = (a < b ? x : y);
return result + (a < b ? 0 : 1);
}
@@ -427,10 +509,70 @@ public class Main {
/// CHECK-NEXT: fcsel hs
public static float FloatGtMatCond_FloatVarVar(float a, float b, float x, float y) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
float result = (a < b ? x : y);
return result + (a < b ? 0 : 1);
}
+ /// CHECK-START: int Main.BoolCond_0_m1(boolean) register (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+
+ /// CHECK-START-ARM64: int Main.BoolCond_0_m1(boolean) disassembly (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK-NEXT: cmp {{w\d+}}, #0x0 (0)
+ /// CHECK-NEXT: csetm {{w\d+}}, eq
+
+ /// CHECK-START-X86_64: int Main.BoolCond_0_m1(boolean) disassembly (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK: cmovnz/ne
+
+ /// CHECK-START-X86: int Main.BoolCond_0_m1(boolean) disassembly (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK: cmovnz/ne
+
+ public static int BoolCond_0_m1(boolean cond) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
+ return cond ? 0 : -1;
+ }
+
+ /// CHECK-START: int Main.BoolCond_m1_0(boolean) register (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+
+ /// CHECK-START-ARM64: int Main.BoolCond_m1_0(boolean) disassembly (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK-NEXT: cmp {{w\d+}}, #0x0 (0)
+ /// CHECK-NEXT: csetm {{w\d+}}, ne
+
+ /// CHECK-START-X86_64: int Main.BoolCond_m1_0(boolean) disassembly (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK: cmovnz/ne
+
+ /// CHECK-START-X86: int Main.BoolCond_m1_0(boolean) disassembly (after)
+ /// CHECK: <<Cond:z\d+>> ParameterValue
+ /// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK: cmovnz/ne
+
+ public static int BoolCond_m1_0(boolean cond) {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
+ return cond ? -1 : 0;
+ }
+
public static void assertEqual(int expected, int actual) {
if (expected != actual) {
throw new Error("Assertion failed: " + expected + " != " + actual);
@@ -499,5 +641,10 @@ public class Main {
assertEqual(8, FloatGtMatCond_FloatVarVar(3, 2, 5, 7));
assertEqual(8, FloatGtMatCond_FloatVarVar(Float.NaN, 2, 5, 7));
assertEqual(8, FloatGtMatCond_FloatVarVar(2, Float.NaN, 5, 7));
+
+ assertEqual(0, BoolCond_0_m1(true));
+ assertEqual(-1, BoolCond_0_m1(false));
+ assertEqual(-1, BoolCond_m1_0(true));
+ assertEqual(0, BoolCond_m1_0(false));
}
}
diff --git a/test/606-erroneous-class/expected.txt b/test/606-erroneous-class/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/606-erroneous-class/expected.txt
diff --git a/test/606-erroneous-class/info.txt b/test/606-erroneous-class/info.txt
new file mode 100644
index 0000000000..42cbb7a153
--- /dev/null
+++ b/test/606-erroneous-class/info.txt
@@ -0,0 +1,3 @@
+Regression test for a DCHECK in the DexCache which prevented erroneous classes
+from being stored into it. This was bogus because the status of a class can be
+changed by another thread. \ No newline at end of file
diff --git a/test/606-erroneous-class/smali-multidex/ClassA.smali b/test/606-erroneous-class/smali-multidex/ClassA.smali
new file mode 100644
index 0000000000..f87fcb2469
--- /dev/null
+++ b/test/606-erroneous-class/smali-multidex/ClassA.smali
@@ -0,0 +1,27 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public final LClassA;
+.super Ljava/lang/Object;
+
+.method public static foo()V
+ .registers 1
+ # Obtain the ErrClass type from Dex cache of the first Dex file. Note that
+ # because the first Dex file has already been verified, we know the class
+ # is erroneous at this point.
+ sget-object v0, LClassB;->g:LErrClass;
+ # Use the object in a way that will try to store the ErrClass type in
+ # the Dex cache of the second Dex file.
+ invoke-virtual {v0}, LErrClass;->foo()V
+.end method
diff --git a/test/606-erroneous-class/smali/ClassB.smali b/test/606-erroneous-class/smali/ClassB.smali
new file mode 100644
index 0000000000..80754c8315
--- /dev/null
+++ b/test/606-erroneous-class/smali/ClassB.smali
@@ -0,0 +1,18 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LClassB;
+.super Ljava/lang/Object;
+
+.field public static g:LErrClass;
diff --git a/test/606-erroneous-class/smali/ErrClass.smali b/test/606-erroneous-class/smali/ErrClass.smali
new file mode 100644
index 0000000000..740f1e1c82
--- /dev/null
+++ b/test/606-erroneous-class/smali/ErrClass.smali
@@ -0,0 +1,26 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public final LErrClass;
+.super Ljava/lang/Object;
+
+.field public g:Ljava/lang/Object;
+
+.method public foo()V
+ .registers 6
+ # Use a new instance before initializing it => hard verifier error.
+ new-instance v0, LSomeClass;
+ iput-object v0, p0, LErrClass;->g:Ljava/lang/Object;
+ return-void
+.end method
diff --git a/test/606-erroneous-class/src/Main.java b/test/606-erroneous-class/src/Main.java
new file mode 100644
index 0000000000..7dbe5676a7
--- /dev/null
+++ b/test/606-erroneous-class/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ // Nothing to run.
+ }
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index feee7c2c3d..859847449b 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -87,7 +87,11 @@ define build-libarttest
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libarttest.mk
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
- $(call set-target-local-cflags-vars,debug)
+ ifeq ($$(suffix),d)
+ $(call set-target-local-cflags-vars,debug)
+ else
+ $(call set-target-local-cflags-vars,ndebug)
+ endif
LOCAL_SHARED_LIBRARIES += libdl
LOCAL_MULTILIB := both
LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32)
diff --git a/test/valgrind-target-suppressions.txt b/test/valgrind-target-suppressions.txt
index 16bb8fd887..7ae6d539f3 100644
--- a/test/valgrind-target-suppressions.txt
+++ b/test/valgrind-target-suppressions.txt
@@ -29,3 +29,24 @@
fun:_ZN7android12SharedBuffer5allocE?
fun:_ZN7android10VectorImpl5_growE??
}
+
+# Clang/LLVM uses memcpy for *x = *y, even though x == y (which is undefined behavior). Ignore.
+# b/29279679, https://llvm.org/bugs/show_bug.cgi?id=11763
+{
+ MemCpySelfAssign
+ Memcheck:Overlap
+ fun:memcpy
+ fun:je_tsd_set
+ fun:je_tsd_fetch
+ fun:je_malloc_tsd_boot0
+}
+
+# Setenv is known-leaking when overwriting mappings. This is triggered by re-initializing
+# ANDROID_DATA. Ignore all setenv leaks.
+{
+ SetenvAndroidDataReinit
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:setenv
+}
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 304c2a9398..d88a4a027e 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -51,7 +51,7 @@ if [[ $mode == "host" ]]; then
make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
elif [[ $mode == "target" ]]; then
make_command="make $j_arg $showcommands build-art-target-tests $common_targets"
- make_command+=" libjavacrypto libjavacoretests linker toybox toolbox sh"
+ make_command+=" libjavacrypto libjavacoretests libnetd_client linker toybox toolbox sh"
make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ "
make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt"
fi