summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk22
-rw-r--r--compiler/dex/quick/arm/int_arm.cc28
-rw-r--r--compiler/image_writer.cc56
-rw-r--r--compiler/image_writer.h4
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc97
-rw-r--r--compiler/optimizing/intrinsics_x86.cc92
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc92
-rw-r--r--compiler/optimizing/register_allocator.cc51
-rw-r--r--compiler/utils/x86/assembler_x86.cc22
-rw-r--r--compiler/utils/x86/assembler_x86.h4
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc25
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc39
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h7
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc50
-rw-r--r--dex2oat/dex2oat.cc770
-rw-r--r--dexdump/dexdump.cc51
-rw-r--r--dexdump/dexdump.h1
-rw-r--r--dexdump/dexdump_main.cc6
-rw-r--r--disassembler/disassembler_x86.cc8
-rw-r--r--oatdump/oatdump.cc20
-rw-r--r--patchoat/patchoat.cc95
-rw-r--r--patchoat/patchoat.h7
-rw-r--r--runtime/art_method-inl.h11
-rw-r--r--runtime/art_method.h14
-rw-r--r--runtime/base/bit_utils.h26
-rw-r--r--runtime/class_linker.cc50
-rw-r--r--runtime/elf_file.cc3
-rw-r--r--runtime/elf_file.h2
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc6
-rw-r--r--runtime/image.cc12
-rw-r--r--runtime/image.h2
-rw-r--r--runtime/leb128.h4
-rw-r--r--runtime/length_prefixed_array.h51
-rw-r--r--runtime/mirror/class-inl.h26
-rw-r--r--runtime/native/java_lang_Class.cc2
-rw-r--r--runtime/thread.cc20
-rw-r--r--runtime/utils.cc372
-rw-r--r--runtime/utils.h3
-rw-r--r--runtime/verifier/method_verifier.cc58
-rw-r--r--test/004-ThreadStress/src/Main.java3
-rw-r--r--test/107-int-math2/src/Main.java4
-rw-r--r--test/526-long-regalloc/expected.txt0
-rw-r--r--test/526-long-regalloc/info.txt2
-rw-r--r--test/526-long-regalloc/src/Main.java72
-rw-r--r--test/800-smali/expected.txt2
-rw-r--r--test/800-smali/smali/b_23201502.smali23
-rw-r--r--test/800-smali/src/Main.java4
49 files changed, 1778 insertions, 545 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 3a1bd0995b..7550f50a6d 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -133,10 +133,6 @@ ART_TARGET_CLANG_CFLAGS_mips64 :=
ART_TARGET_CLANG_CFLAGS_x86 :=
ART_TARGET_CLANG_CFLAGS_x86_64 :=
-# These are necessary for Clang ARM64 ART builds. TODO: remove.
-ART_TARGET_CLANG_CFLAGS_arm64 += \
- -DNVALGRIND
-
# Warn about thread safety violations with clang.
art_clang_cflags := -Wthread-safety -Wthread-safety-negative
@@ -299,6 +295,22 @@ ART_HOST_CFLAGS += $(art_cflags) -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRE
ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default
ART_HOST_ASFLAGS += $(art_asflags)
+# Disable -Wpessimizing-move: triggered for art/runtime/base/variant_map.h:261
+# Adding this flag to art_clang_cflags doesn't work because -Wall gets added to
+# ART_HOST_CFLAGS (as a part of art_cflags) after
+# -Wno-pessimizing-move. Instead, add the flag here to both
+# ART_TARGET_CLANG_CFLAGS and ART_HOST_CFLAGS
+ifeq ($(ART_HOST_CLANG),true)
+ART_HOST_CFLAGS += -Wno-pessimizing-move
+endif
+ART_TARGET_CLANG_CFLAGS += -Wno-pessimizing-move
+
+# The latest clang update trips over many of the files in art and never finishes
+# compiling for aarch64 with -O3 (or -O2). Drop back to -O1 while we investigate
+# to stop punishing the build server.
+# Bug: http://b/23256622
+ART_TARGET_CLANG_CFLAGS_arm64 += -O1
+
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
@@ -363,7 +375,7 @@ define set-target-local-cflags-vars
endif
LOCAL_CLANG_CFLAGS := $(ART_TARGET_CLANG_CFLAGS)
- $(foreach arch,$(ART_SUPPORTED_ARCH),
+ $(foreach arch,$(ART_TARGET_SUPPORTED_ARCH),
LOCAL_CLANG_CFLAGS_$(arch) += $$(ART_TARGET_CLANG_CFLAGS_$(arch)))
# Clear locally used variables.
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index cf0188456d..db76cc6f53 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -593,13 +593,20 @@ bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
return true;
}
+ // At this point lit != 1 (which is a power of two).
+ DCHECK_NE(lit, 1);
if (IsPowerOfTwo(lit - 1)) {
op->op = kOpAdd;
op->shift = CTZ(lit - 1);
return true;
}
- if (IsPowerOfTwo(lit + 1)) {
+ if (lit == -1) {
+ // Can be created as neg.
+ op->op = kOpNeg;
+ op->shift = 0;
+ return true;
+ } else if (IsPowerOfTwo(lit + 1)) {
op->op = kOpRsub;
op->shift = CTZ(lit + 1);
return true;
@@ -612,21 +619,26 @@ bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
+ DCHECK_NE(lit, 1); // A case of "1" should have been folded.
+ DCHECK_NE(lit, -1); // A case of "-1" should have been folded.
if (GetEasyMultiplyOp(lit, &ops[0])) {
ops[1].op = kOpInvalid;
ops[1].shift = 0;
return true;
}
- int lit1 = lit;
- uint32_t shift = CTZ(lit1);
+ DCHECK_NE(lit, 0); // Should be handled above.
+ DCHECK(!IsPowerOfTwo(lit)); // Same.
+
+ int lit1 = lit; // With the DCHECKs, it's clear we don't get "0", "1" or "-1" for
+ uint32_t shift = CTZ(lit1); // lit1.
if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
ops[1].op = kOpLsl;
ops[1].shift = shift;
return true;
}
- lit1 = lit - 1;
+ lit1 = lit - 1; // With the DCHECKs, it's clear we don't get "0" or "1" for lit1.
shift = CTZ(lit1);
if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
ops[1].op = kOpAdd;
@@ -634,7 +646,7 @@ bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
return true;
}
- lit1 = lit + 1;
+ lit1 = lit + 1; // With the DCHECKs, it's clear we don't get "0" here.
shift = CTZ(lit1);
if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
ops[1].op = kOpRsub;
@@ -652,7 +664,7 @@ bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
// Additional temporary register is required,
// if it need to generate 2 instructions and src/dest overlap.
void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) {
- // tmp1 = ( src << shift1) + [ src | -src | 0 ]
+ // tmp1 = (( src << shift1) + [ src | -src | 0 ] ) | -src
// dest = (tmp1 << shift2) + [ src | -src | 0 ]
RegStorage r_tmp1;
@@ -674,6 +686,9 @@ void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, Easy
case kOpRsub:
OpRegRegRegShift(kOpRsub, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
break;
+ case kOpNeg:
+ OpRegReg(kOpNeg, r_tmp1, r_src);
+ break;
default:
DCHECK_EQ(ops[0].op, kOpInvalid);
break;
@@ -691,6 +706,7 @@ void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, Easy
case kOpRsub:
OpRegRegRegShift(kOpRsub, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
break;
+ // No negation allowed in second op.
default:
LOG(FATAL) << "Unexpected opcode passed to GenEasyMultiplyTwoOps";
break;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a03ff755ab..3a3410cf3a 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -244,8 +244,8 @@ void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot
DCHECK(object != nullptr);
DCHECK_NE(image_objects_offset_begin_, 0u);
- size_t previous_bin_sizes = bin_slot_previous_sizes_[bin_slot.GetBin()];
- size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex();
+ size_t bin_slot_offset = bin_slot_offsets_[bin_slot.GetBin()];
+ size_t new_offset = bin_slot_offset + bin_slot.GetIndex();
DCHECK_ALIGNED(new_offset, kObjectAlignment);
SetImageOffset(object, new_offset);
@@ -866,8 +866,10 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
bool any_dirty = false;
size_t count = 0;
- const size_t method_size = ArtMethod::ObjectSize(target_ptr_size_);
- auto iteration_range = MakeIterationRangeFromLengthPrefixedArray(array, method_size);
+ const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
+ const size_t method_size = ArtMethod::Size(target_ptr_size_);
+ auto iteration_range =
+ MakeIterationRangeFromLengthPrefixedArray(array, method_size, method_alignment);
for (auto& m : iteration_range) {
any_dirty = any_dirty || WillMethodBeDirty(&m);
++count;
@@ -876,7 +878,9 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
kNativeObjectRelocationTypeArtMethodClean;
Bin bin_type = BinTypeForNativeRelocationType(type);
// Forward the entire array at once, but header first.
- const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0, method_size);
+ const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0,
+ method_size,
+ method_alignment);
auto it = native_object_relocations_.find(array);
CHECK(it == native_object_relocations_.end()) << "Method array " << array
<< " already forwarded";
@@ -910,7 +914,7 @@ void ImageWriter::AssignMethodOffset(ArtMethod* method, NativeObjectRelocationTy
<< PrettyMethod(method);
size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
native_object_relocations_.emplace(method, NativeObjectRelocation { offset, type });
- offset += ArtMethod::ObjectSize(target_ptr_size_);
+ offset += ArtMethod::Size(target_ptr_size_);
}
void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
@@ -972,9 +976,10 @@ void ImageWriter::CalculateNewObjectOffsets() {
size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
native_object_relocations_.emplace(&image_method_array_,
NativeObjectRelocation { offset, image_method_type });
+ size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
- 0, ArtMethod::ObjectSize(target_ptr_size_));
- CHECK_ALIGNED(array_size, 8u);
+ 0, ArtMethod::Size(target_ptr_size_), method_alignment);
+ CHECK_ALIGNED_PARAM(array_size, method_alignment);
offset += array_size;
for (auto* m : image_methods_) {
CHECK(m != nullptr);
@@ -982,13 +987,21 @@ void ImageWriter::CalculateNewObjectOffsets() {
AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean);
}
- // Calculate cumulative bin slot sizes.
- size_t previous_sizes = 0u;
+ // Calculate bin slot offsets.
+ size_t bin_offset = image_objects_offset_begin_;
for (size_t i = 0; i != kBinSize; ++i) {
- bin_slot_previous_sizes_[i] = previous_sizes;
- previous_sizes += bin_slot_sizes_[i];
+ bin_slot_offsets_[i] = bin_offset;
+ bin_offset += bin_slot_sizes_[i];
+ if (i == kBinArtField) {
+ static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
+ static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
+ DCHECK_ALIGNED(bin_offset, 4u);
+ DCHECK(method_alignment == 4u || method_alignment == 8u);
+ bin_offset = RoundUp(bin_offset, method_alignment);
+ }
}
- DCHECK_EQ(previous_sizes, GetBinSizeSum());
+ // NOTE: There may be additional padding between the bin slots and the intern table.
+
DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
// Transform each object's bin slot into an offset which will be used to do the final copy.
@@ -1002,7 +1015,7 @@ void ImageWriter::CalculateNewObjectOffsets() {
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
Bin bin_type = BinTypeForNativeRelocationType(relocation.type);
- relocation.offset += image_objects_offset_begin_ + bin_slot_previous_sizes_[bin_type];
+ relocation.offset += bin_slot_offsets_[bin_type];
}
// Calculate how big the intern table will be after being serialized.
@@ -1029,15 +1042,15 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
// Add field section.
auto* field_section = &sections[ImageHeader::kSectionArtFields];
*field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
- CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtField],
- field_section->Offset());
+ CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
cur_pos = field_section->End();
+ // Round up to the alignment the required by the method section.
+ cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size_));
// Add method section.
auto* methods_section = &sections[ImageHeader::kSectionArtMethods];
*methods_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtMethodClean] +
bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean],
- methods_section->Offset());
+ CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
cur_pos = methods_section->End();
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
@@ -1135,7 +1148,10 @@ void ImageWriter::CopyAndFixupNativeData() {
}
case kNativeObjectRelocationTypeArtMethodArrayClean:
case kNativeObjectRelocationTypeArtMethodArrayDirty: {
- memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(0));
+ memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(
+ 0,
+ ArtMethod::Size(target_ptr_size_),
+ ArtMethod::Alignment(target_ptr_size_)));
break;
}
}
@@ -1444,7 +1460,7 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(ArtMethod* method) {
}
void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) {
- memcpy(copy, orig, ArtMethod::ObjectSize(target_ptr_size_));
+ memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked()));
copy->SetDexCacheResolvedMethods(GetImageAddress(orig->GetDexCacheResolvedMethods()));
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index f4e10cc6ea..c8aa82dc32 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -54,7 +54,7 @@ class ImageWriter FINAL {
quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
- bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(),
+ bin_slot_sizes_(), bin_slot_offsets_(), bin_slot_count_(),
intern_table_bytes_(0u), image_method_array_(ImageHeader::kImageMethodsCount),
dirty_methods_(0u), clean_methods_(0u) {
CHECK_NE(image_begin, 0U);
@@ -359,7 +359,7 @@ class ImageWriter FINAL {
// Bin slot tracking for dirty object packing
size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin
- size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins.
+ size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins.
size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
// Cached size of the intern table for when we allocate memory.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 56313730dc..a5332ea794 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1055,6 +1055,102 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Temporary registers to store lengths of strings and for calculations.
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringEquals(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register str = WRegisterFrom(locations->InAt(0));
+ Register arg = WRegisterFrom(locations->InAt(1));
+ Register out = XRegisterFrom(locations->Out());
+
+ UseScratchRegisterScope scratch_scope(masm);
+ Register temp = scratch_scope.AcquireW();
+ Register temp1 = WRegisterFrom(locations->GetTemp(0));
+ Register temp2 = WRegisterFrom(locations->GetTemp(1));
+
+ vixl::Label loop;
+ vixl::Label end;
+ vixl::Label return_true;
+ vixl::Label return_false;
+
+ // Get offsets of count, value, and class fields within a string object.
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ const int32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check if input is null, return false if it is.
+ __ Cbz(arg, &return_false);
+
+ // Reference equality check, return true if same reference.
+ __ Cmp(str, arg);
+ __ B(&return_true, eq);
+
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ Ldr(temp, MemOperand(str.X(), class_offset));
+ __ Ldr(temp1, MemOperand(arg.X(), class_offset));
+ __ Cmp(temp, temp1);
+ __ B(&return_false, ne);
+
+ // Load lengths of this and argument strings.
+ __ Ldr(temp, MemOperand(str.X(), count_offset));
+ __ Ldr(temp1, MemOperand(arg.X(), count_offset));
+ // Check if lengths are equal, return false if they're not.
+ __ Cmp(temp, temp1);
+ __ B(&return_false, ne);
+ // Store offset of string value in preparation for comparison loop
+ __ Mov(temp1, value_offset);
+ // Return true if both strings are empty.
+ __ Cbz(temp, &return_true);
+
+ // Assertions that must hold in order to compare strings 4 characters at a time.
+ DCHECK_ALIGNED(value_offset, 8);
+ static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
+
+ temp1 = temp1.X();
+ temp2 = temp2.X();
+
+ // Loop to compare strings 4 characters at a time starting at the beginning of the string.
+ // Ok to do this because strings are zero-padded to be 8-byte aligned.
+ __ Bind(&loop);
+ __ Ldr(out, MemOperand(str.X(), temp1));
+ __ Ldr(temp2, MemOperand(arg.X(), temp1));
+ __ Add(temp1, temp1, Operand(sizeof(uint64_t)));
+ __ Cmp(out, temp2);
+ __ B(&return_false, ne);
+ __ Sub(temp, temp, Operand(4), SetFlags);
+ __ B(&loop, gt);
+
+ // Return true and exit the function.
+ // If loop does not result in returning false, we return true.
+ __ Bind(&return_true);
+ __ Mov(out, 1);
+ __ B(&end);
+
+ // Return false and exit the function.
+ __ Bind(&return_false);
+ __ Mov(out, 0);
+ __ Bind(&end);
+}
+
static void GenerateVisitStringIndexOf(HInvoke* invoke,
vixl::MacroAssembler* masm,
CodeGeneratorARM64* codegen,
@@ -1229,7 +1325,6 @@ void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(StringEquals)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 3c8be27fb9..4471d713e2 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -945,6 +945,97 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+
+ // Request temporary registers, ECX and EDI needed for repe_cmpsl instruction.
+ locations->AddTemp(Location::RegisterLocation(ECX));
+ locations->AddTemp(Location::RegisterLocation(EDI));
+
+ // Set output, ESI needed for repe_cmpsl instruction anyways.
+ locations->SetOut(Location::RegisterLocation(ESI), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringEquals(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register str = locations->InAt(0).AsRegister<Register>();
+ Register arg = locations->InAt(1).AsRegister<Register>();
+ Register ecx = locations->GetTemp(0).AsRegister<Register>();
+ Register edi = locations->GetTemp(1).AsRegister<Register>();
+ Register esi = locations->Out().AsRegister<Register>();
+
+ Label end;
+ Label return_true;
+ Label return_false;
+
+ // Get offsets of count, value, and class fields within a string object.
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+ const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check if input is null, return false if it is.
+ __ testl(arg, arg);
+ __ j(kEqual, &return_false);
+
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ movl(ecx, Address(str, class_offset));
+ __ cmpl(ecx, Address(arg, class_offset));
+ __ j(kNotEqual, &return_false);
+
+ // Reference equality check, return true if same reference.
+ __ cmpl(str, arg);
+ __ j(kEqual, &return_true);
+
+ // Load length of receiver string.
+ __ movl(ecx, Address(str, count_offset));
+ // Check if lengths are equal, return false if they're not.
+ __ cmpl(ecx, Address(arg, count_offset));
+ __ j(kNotEqual, &return_false);
+ // Return true if both strings are empty.
+ __ testl(ecx, ecx);
+ __ j(kEqual, &return_true);
+
+ // Load starting addresses of string values into ESI/EDI as required for repe_cmpsl instruction.
+ __ leal(esi, Address(str, value_offset));
+ __ leal(edi, Address(arg, value_offset));
+
+ // Divide string length by 2 to compare characters 2 at a time and adjust for odd lengths.
+ __ addl(ecx, Immediate(1));
+ __ shrl(ecx, Immediate(1));
+
+ // Assertions that must hold in order to compare strings 2 characters at a time.
+ DCHECK_ALIGNED(value_offset, 4);
+ static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
+
+ // Loop to compare strings two characters at a time starting at the beginning of the string.
+ __ repe_cmpsl();
+ // If strings are not equal, zero flag will be cleared.
+ __ j(kNotEqual, &return_false);
+
+ // Return true and exit the function.
+ // If loop does not result in returning false, we return true.
+ __ Bind(&return_true);
+ __ movl(esi, Immediate(1));
+ __ jmp(&end);
+
+ // Return false and exit the function.
+ __ Bind(&return_false);
+ __ xorl(esi, esi);
+ __ Bind(&end);
+}
+
static void CreateStringIndexOfLocations(HInvoke* invoke,
ArenaAllocator* allocator,
bool start_at_zero) {
@@ -1758,7 +1849,6 @@ UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(IntegerNumberOfLeadingZeros)
UNIMPLEMENTED_INTRINSIC(LongNumberOfLeadingZeros)
-UNIMPLEMENTED_INTRINSIC(StringEquals)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index b4926c2afa..9ea68ec07d 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -854,6 +854,97 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+
+ // Request temporary registers, RCX and RDI needed for repe_cmpsq instruction.
+ locations->AddTemp(Location::RegisterLocation(RCX));
+ locations->AddTemp(Location::RegisterLocation(RDI));
+
+ // Set output, RSI needed for repe_cmpsq instruction anyways.
+ locations->SetOut(Location::RegisterLocation(RSI), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringEquals(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ CpuRegister str = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister arg = locations->InAt(1).AsRegister<CpuRegister>();
+ CpuRegister rcx = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister rdi = locations->GetTemp(1).AsRegister<CpuRegister>();
+ CpuRegister rsi = locations->Out().AsRegister<CpuRegister>();
+
+ Label end;
+ Label return_true;
+ Label return_false;
+
+ // Get offsets of count, value, and class fields within a string object.
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+ const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check if input is null, return false if it is.
+ __ testl(arg, arg);
+ __ j(kEqual, &return_false);
+
+ // Instanceof check for the argument by comparing class fields.
+ // All string objects must have the same type since String cannot be subclassed.
+ // Receiver must be a string object, so its class field is equal to all strings' class fields.
+ // If the argument is a string object, its class field must be equal to receiver's class field.
+ __ movl(rcx, Address(str, class_offset));
+ __ cmpl(rcx, Address(arg, class_offset));
+ __ j(kNotEqual, &return_false);
+
+ // Reference equality check, return true if same reference.
+ __ cmpl(str, arg);
+ __ j(kEqual, &return_true);
+
+ // Load length of receiver string.
+ __ movl(rcx, Address(str, count_offset));
+ // Check if lengths are equal, return false if they're not.
+ __ cmpl(rcx, Address(arg, count_offset));
+ __ j(kNotEqual, &return_false);
+ // Return true if both strings are empty.
+ __ testl(rcx, rcx);
+ __ j(kEqual, &return_true);
+
+ // Load starting addresses of string values into RSI/RDI as required for repe_cmpsq instruction.
+ __ leal(rsi, Address(str, value_offset));
+ __ leal(rdi, Address(arg, value_offset));
+
+ // Divide string length by 4 and adjust for lengths not divisible by 4.
+ __ addl(rcx, Immediate(3));
+ __ shrl(rcx, Immediate(2));
+
+ // Assertions that must hold in order to compare strings 4 characters at a time.
+ DCHECK_ALIGNED(value_offset, 8);
+ static_assert(IsAligned<8>(kObjectAlignment), "String is not zero padded");
+
+ // Loop to compare strings four characters at a time starting at the beginning of the string.
+ __ repe_cmpsq();
+ // If strings are not equal, zero flag will be cleared.
+ __ j(kNotEqual, &return_false);
+
+ // Return true and exit the function.
+ // If loop does not result in returning false, we return true.
+ __ Bind(&return_true);
+ __ movl(rsi, Immediate(1));
+ __ jmp(&end);
+
+ // Return false and exit the function.
+ __ Bind(&return_false);
+ __ xorl(rsi, rsi);
+ __ Bind(&end);
+}
+
static void CreateStringIndexOfLocations(HInvoke* invoke,
ArenaAllocator* allocator,
bool start_at_zero) {
@@ -1607,7 +1698,6 @@ UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(IntegerNumberOfLeadingZeros)
UNIMPLEMENTED_INTRINSIC(LongNumberOfLeadingZeros)
-UNIMPLEMENTED_INTRINSIC(StringEquals)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 60f5ab24da..9f32a9eaf8 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -952,7 +952,16 @@ bool RegisterAllocator::PotentiallyRemoveOtherHalf(LiveInterval* interval,
// we spill `current` instead.
bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
size_t first_register_use = current->FirstRegisterUse();
- if (first_register_use == kNoLifetime) {
+ if (current->HasRegister()) {
+ DCHECK(current->IsHighInterval());
+ // The low interval has allocated the register for the high interval. In
+ // case the low interval had to split both intervals, we may end up in a
+ // situation where the high interval does not have a register use anymore.
+ // We must still proceed in order to split currently active and inactive
+ // uses of the high interval's register, and put the high interval in the
+ // active set.
+ DCHECK(first_register_use != kNoLifetime || (current->GetNextSibling() != nullptr));
+ } else if (first_register_use == kNoLifetime) {
AllocateSpillSlotFor(current);
return false;
}
@@ -1019,7 +1028,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
// When allocating the low part, we made sure the high register was available.
DCHECK_LT(first_use, next_use[reg]);
} else if (current->IsLowInterval()) {
- reg = FindAvailableRegisterPair(next_use, first_register_use);
+ reg = FindAvailableRegisterPair(next_use, first_use);
// We should spill if both registers are not available.
should_spill = (first_use >= next_use[reg])
|| (first_use >= next_use[GetHighForLowRegister(reg)]);
@@ -1033,16 +1042,28 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
if (should_spill) {
DCHECK(!current->IsHighInterval());
bool is_allocation_at_use_site = (current->GetStart() >= (first_register_use - 1));
- if (current->IsLowInterval()
- && is_allocation_at_use_site
- && TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(),
- first_register_use,
- next_use)) {
+ if (is_allocation_at_use_site) {
+ if (!current->IsLowInterval()) {
+ DumpInterval(std::cerr, current);
+ DumpAllIntervals(std::cerr);
+ // This situation has the potential to infinite loop, so we make it a non-debug CHECK.
+ HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2);
+ CHECK(false) << "There is not enough registers available for "
+ << current->GetParent()->GetDefinedBy()->DebugName() << " "
+ << current->GetParent()->GetDefinedBy()->GetId()
+ << " at " << first_register_use - 1 << " "
+ << (at == nullptr ? "" : at->DebugName());
+ }
+
// If we're allocating a register for `current` because the instruction at
// that position requires it, but we think we should spill, then there are
// non-pair intervals or unaligned pair intervals blocking the allocation.
// We split the first interval found, and put ourselves first in the
// `unhandled_` list.
+ bool success = TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(),
+ first_register_use,
+ next_use);
+ DCHECK(success);
LiveInterval* existing = unhandled_->Peek();
DCHECK(existing->IsHighInterval());
DCHECK_EQ(existing->GetLowInterval(), current);
@@ -1052,17 +1073,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
// register, we split this interval just before its first register use.
AllocateSpillSlotFor(current);
LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
- if (current == split) {
- DumpInterval(std::cerr, current);
- DumpAllIntervals(std::cerr);
- // This situation has the potential to infinite loop, so we make it a non-debug CHECK.
- HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2);
- CHECK(false) << "There is not enough registers available for "
- << split->GetParent()->GetDefinedBy()->DebugName() << " "
- << split->GetParent()->GetDefinedBy()->GetId()
- << " at " << first_register_use - 1 << " "
- << (at == nullptr ? "" : at->DebugName());
- }
+ DCHECK(current != split);
AddSorted(unhandled_, split);
}
return false;
@@ -1243,7 +1254,9 @@ LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position)
void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
if (interval->IsHighInterval()) {
- // The low interval will contain the spill slot.
+ // The low interval already took care of allocating the spill slot.
+ DCHECK(!interval->GetLowInterval()->HasRegister());
+ DCHECK(interval->GetLowInterval()->GetParent()->HasSpillSlot());
return;
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 8c2a3ed637..9b3d792903 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -158,6 +158,20 @@ void X86Assembler::bswapl(Register dst) {
EmitUint8(0xC8 + dst);
}
+void X86Assembler::bsrl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBD);
+ EmitRegisterOperand(dst, src);
+}
+
+void X86Assembler::bsrl(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBD);
+ EmitOperand(dst, src);
+}
+
void X86Assembler::movzxb(Register dst, ByteRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -1552,6 +1566,14 @@ void X86Assembler::repe_cmpsl() {
}
+void X86Assembler::rep_movsw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF3);
+ EmitUint8(0xA5);
+}
+
+
X86Assembler* X86Assembler::lock() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF0);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 37c69fee9f..a9227f38b0 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -234,6 +234,8 @@ class X86Assembler FINAL : public Assembler {
void movntl(const Address& dst, Register src);
void bswapl(Register dst);
+ void bsrl(Register dst, Register src);
+ void bsrl(Register dst, const Address& src);
void movzxb(Register dst, ByteRegister src);
void movzxb(Register dst, const Address& src);
@@ -470,6 +472,7 @@ class X86Assembler FINAL : public Assembler {
void repne_scasw();
void repe_cmpsw();
void repe_cmpsl();
+ void rep_movsw();
X86Assembler* lock();
void cmpxchgl(const Address& address, Register reg);
@@ -649,7 +652,6 @@ class X86Assembler FINAL : public Assembler {
void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
void EmitLabel(Label* label, int instruction_size);
void EmitLabelLink(Label* label);
- void EmitNearLabelLink(Label* label);
void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
void EmitGenericShift(int rm, const Operand& operand, Register shifter);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index b664d2342f..731b5f4ac5 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -218,4 +218,29 @@ TEST_F(AssemblerX86Test, Repecmpsl) {
DriverStr(expected, "Repecmpsl");
}
+TEST_F(AssemblerX86Test, RepneScasw) {
+ GetAssembler()->repne_scasw();
+ const char* expected = "repne scasw\n";
+ DriverStr(expected, "repne_scasw");
+}
+
+TEST_F(AssemblerX86Test, RepMovsw) {
+ GetAssembler()->rep_movsw();
+ const char* expected = "rep movsw\n";
+ DriverStr(expected, "rep_movsw");
+}
+
+TEST_F(AssemblerX86Test, Bsrl) {
+ DriverStr(RepeatRR(&x86::X86Assembler::bsrl, "bsrl %{reg2}, %{reg1}"), "bsrl");
+}
+
+TEST_F(AssemblerX86Test, BsrlAddress) {
+ GetAssembler()->bsrl(x86::Register(x86::EDI), x86::Address(
+ x86::Register(x86::EDI), x86::Register(x86::EBX), x86::TIMES_4, 12));
+ const char* expected =
+ "bsrl 0xc(%EDI,%EBX,4), %EDI\n";
+
+ DriverStr(expected, "bsrl_address");
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 22e7b9b120..dc61c992e0 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2006,6 +2006,14 @@ void X86_64Assembler::jmp(Label* label) {
}
+void X86_64Assembler::rep_movsw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF3);
+ EmitUint8(0xA5);
+}
+
+
X86_64Assembler* X86_64Assembler::lock() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF0);
@@ -2084,6 +2092,37 @@ void X86_64Assembler::bswapq(CpuRegister dst) {
EmitUint8(0xC8 + dst.LowBits());
}
+void X86_64Assembler::bsrl(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xBD);
+ EmitRegisterOperand(dst.LowBits(), src.LowBits());
+}
+
+void X86_64Assembler::bsrl(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xBD);
+ EmitOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::bsrq(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xBD);
+ EmitRegisterOperand(dst.LowBits(), src.LowBits());
+}
+
+void X86_64Assembler::bsrq(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xBD);
+ EmitOperand(dst.LowBits(), src);
+}
void X86_64Assembler::repne_scasw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index beca0372ae..da42213048 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -606,10 +606,16 @@ class X86_64Assembler FINAL : public Assembler {
void bswapl(CpuRegister dst);
void bswapq(CpuRegister dst);
+ void bsrl(CpuRegister dst, CpuRegister src);
+ void bsrl(CpuRegister dst, const Address& src);
+ void bsrq(CpuRegister dst, CpuRegister src);
+ void bsrq(CpuRegister dst, const Address& src);
+
void repne_scasw();
void repe_cmpsw();
void repe_cmpsl();
void repe_cmpsq();
+ void rep_movsw();
//
// Macros for High-level operations.
@@ -803,7 +809,6 @@ class X86_64Assembler FINAL : public Assembler {
void EmitComplex(uint8_t rm, const Operand& operand, const Immediate& immediate);
void EmitLabel(Label* label, int instruction_size);
void EmitLabelLink(Label* label);
- void EmitNearLabelLink(Label* label);
void EmitGenericShift(bool wide, int rm, CpuRegister reg, const Immediate& imm);
void EmitGenericShift(bool wide, int rm, CpuRegister operand, CpuRegister shifter);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 296487e798..8673f039ed 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -836,6 +836,18 @@ TEST_F(AssemblerX86_64Test, Xorq) {
DriverStr(expected, "xorq");
}
+TEST_F(AssemblerX86_64Test, RepneScasw) {
+ GetAssembler()->repne_scasw();
+ const char* expected = "repne scasw\n";
+ DriverStr(expected, "repne_scasw");
+}
+
+TEST_F(AssemblerX86_64Test, RepMovsw) {
+ GetAssembler()->rep_movsw();
+ const char* expected = "rep movsw\n";
+ DriverStr(expected, "rep_movsw");
+}
+
TEST_F(AssemblerX86_64Test, Movsxd) {
DriverStr(RepeatRr(&x86_64::X86_64Assembler::movsxd, "movsxd %{reg2}, %{reg1}"), "movsxd");
}
@@ -1129,6 +1141,44 @@ TEST_F(AssemblerX86_64Test, Bswapq) {
DriverStr(RepeatR(&x86_64::X86_64Assembler::bswapq, "bswap %{reg}"), "bswapq");
}
+TEST_F(AssemblerX86_64Test, Bsrl) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::bsrl, "bsrl %{reg2}, %{reg1}"), "bsrl");
+}
+
+TEST_F(AssemblerX86_64Test, BsrlAddress) {
+ GetAssembler()->bsrl(x86_64::CpuRegister(x86_64::R10), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
+ GetAssembler()->bsrl(x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::R10), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
+ GetAssembler()->bsrl(x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12));
+ const char* expected =
+ "bsrl 0xc(%RDI,%RBX,4), %R10d\n"
+ "bsrl 0xc(%R10,%RBX,4), %edi\n"
+ "bsrl 0xc(%RDI,%R9,4), %edi\n";
+
+ DriverStr(expected, "bsrl_address");
+}
+
+TEST_F(AssemblerX86_64Test, Bsrq) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::bsrq, "bsrq %{reg2}, %{reg1}"), "bsrq");
+}
+
+TEST_F(AssemblerX86_64Test, BsrqAddress) {
+ GetAssembler()->bsrq(x86_64::CpuRegister(x86_64::R10), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
+ GetAssembler()->bsrq(x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::R10), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
+ GetAssembler()->bsrq(x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12));
+ const char* expected =
+ "bsrq 0xc(%RDI,%RBX,4), %R10\n"
+ "bsrq 0xc(%R10,%RBX,4), %RDI\n"
+ "bsrq 0xc(%RDI,%R9,4), %RDI\n";
+
+ DriverStr(expected, "bsrq_address");
+}
+
std::string setcc_test_fn(AssemblerX86_64Test::Base* assembler_test,
x86_64::X86_64Assembler* assembler) {
// From Condition
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 976c002812..56536129f2 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -539,26 +539,11 @@ class Dex2Oat FINAL {
}
}
- // Parse the arguments from the command line. In case of an unrecognized option or impossible
- // values/combinations, a usage error will be displayed and exit() is called. Thus, if the method
- // returns, arguments have been successfully parsed.
- void ParseArgs(int argc, char** argv) {
- original_argc = argc;
- original_argv = argv;
-
- InitLogging(argv);
-
- // Skip over argv[0].
- argv++;
- argc--;
-
- if (argc == 0) {
- Usage("No arguments specified");
- }
-
+ struct ParserOptions {
std::string oat_symbols;
std::string boot_image_filename;
const char* compiler_filter_string = nullptr;
+ CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
bool compile_pic = false;
int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold;
int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold;
@@ -580,277 +565,192 @@ class Dex2Oat FINAL {
bool abort_on_hard_verifier_error = false;
bool requested_specific_compiler = false;
+ bool implicit_null_checks = false;
+ bool implicit_so_checks = false;
+ bool implicit_suspend_checks = false;
+
PassManagerOptions pass_manager_options;
std::string error_msg;
+ };
+
+ template <typename T>
+ static void ParseUintOption(const StringPiece& option,
+ const std::string& option_name,
+ T* out,
+ bool is_long_option = true) {
+ std::string option_prefix = option_name + (is_long_option ? "=" : "");
+ DCHECK(option.starts_with(option_prefix));
+ const char* value_string = option.substr(option_prefix.size()).data();
+ int64_t parsed_integer_value;
+ if (!ParseInt(value_string, &parsed_integer_value)) {
+ Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
+ }
+ if (parsed_integer_value < 0) {
+ Usage("%s passed a negative value %d", option_name.c_str(), parsed_integer_value);
+ }
+ *out = dchecked_integral_cast<T>(parsed_integer_value);
+ }
- for (int i = 0; i < argc; i++) {
- const StringPiece option(argv[i]);
- const bool log_options = false;
- if (log_options) {
- LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
- }
- if (option.starts_with("--dex-file=")) {
- dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data());
- } else if (option.starts_with("--dex-location=")) {
- dex_locations_.push_back(option.substr(strlen("--dex-location=")).data());
- } else if (option.starts_with("--zip-fd=")) {
- const char* zip_fd_str = option.substr(strlen("--zip-fd=")).data();
- if (!ParseInt(zip_fd_str, &zip_fd_)) {
- Usage("Failed to parse --zip-fd argument '%s' as an integer", zip_fd_str);
- }
- if (zip_fd_ < 0) {
- Usage("--zip-fd passed a negative value %d", zip_fd_);
- }
- } else if (option.starts_with("--zip-location=")) {
- zip_location_ = option.substr(strlen("--zip-location=")).data();
- } else if (option.starts_with("--oat-file=")) {
- oat_filename_ = option.substr(strlen("--oat-file=")).data();
- } else if (option.starts_with("--oat-symbols=")) {
- oat_symbols = option.substr(strlen("--oat-symbols=")).data();
- } else if (option.starts_with("--oat-fd=")) {
- const char* oat_fd_str = option.substr(strlen("--oat-fd=")).data();
- if (!ParseInt(oat_fd_str, &oat_fd_)) {
- Usage("Failed to parse --oat-fd argument '%s' as an integer", oat_fd_str);
- }
- if (oat_fd_ < 0) {
- Usage("--oat-fd passed a negative value %d", oat_fd_);
- }
- } else if (option == "--watch-dog") {
- watch_dog_enabled = true;
- } else if (option == "--no-watch-dog") {
- watch_dog_enabled = false;
- } else if (option.starts_with("-j")) {
- const char* thread_count_str = option.substr(strlen("-j")).data();
- if (!ParseUint(thread_count_str, &thread_count_)) {
- Usage("Failed to parse -j argument '%s' as an integer", thread_count_str);
- }
- } else if (option.starts_with("--oat-location=")) {
- oat_location_ = option.substr(strlen("--oat-location=")).data();
- } else if (option.starts_with("--image=")) {
- image_filename_ = option.substr(strlen("--image=")).data();
- } else if (option.starts_with("--image-classes=")) {
- image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
- } else if (option.starts_with("--image-classes-zip=")) {
- image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data();
- } else if (option.starts_with("--compiled-classes=")) {
- compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
- } else if (option.starts_with("--compiled-classes-zip=")) {
- compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
- } else if (option.starts_with("--compiled-methods=")) {
- compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data();
- } else if (option.starts_with("--compiled-methods-zip=")) {
- compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data();
- } else if (option.starts_with("--base=")) {
- const char* image_base_str = option.substr(strlen("--base=")).data();
- char* end;
- image_base_ = strtoul(image_base_str, &end, 16);
- if (end == image_base_str || *end != '\0') {
- Usage("Failed to parse hexadecimal value for option %s", option.data());
- }
- } else if (option.starts_with("--boot-image=")) {
- boot_image_filename = option.substr(strlen("--boot-image=")).data();
- } else if (option.starts_with("--android-root=")) {
- android_root_ = option.substr(strlen("--android-root=")).data();
- } else if (option.starts_with("--instruction-set=")) {
- StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
- // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it.
- std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]);
- strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length());
- buf.get()[instruction_set_str.length()] = 0;
- instruction_set_ = GetInstructionSetFromString(buf.get());
- // arm actually means thumb2.
- if (instruction_set_ == InstructionSet::kArm) {
- instruction_set_ = InstructionSet::kThumb2;
- }
- } else if (option.starts_with("--instruction-set-variant=")) {
- StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(instruction_set_, str.as_string(), &error_msg));
- if (instruction_set_features_.get() == nullptr) {
- Usage("%s", error_msg.c_str());
- }
- } else if (option.starts_with("--instruction-set-features=")) {
- StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
- if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg));
- if (instruction_set_features_.get() == nullptr) {
- Usage("Problem initializing default instruction set features variant: %s",
- error_msg.c_str());
- }
- }
- instruction_set_features_.reset(
- instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
- if (instruction_set_features_.get() == nullptr) {
- Usage("Error parsing '%s': %s", option.data(), error_msg.c_str());
- }
- } else if (option.starts_with("--compiler-backend=")) {
- requested_specific_compiler = true;
- StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
- if (backend_str == "Quick") {
- compiler_kind_ = Compiler::kQuick;
- } else if (backend_str == "Optimizing") {
- compiler_kind_ = Compiler::kOptimizing;
- } else {
- Usage("Unknown compiler backend: %s", backend_str.data());
- }
- } else if (option.starts_with("--compiler-filter=")) {
- compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
- } else if (option == "--compile-pic") {
- compile_pic = true;
- } else if (option.starts_with("--huge-method-max=")) {
- const char* threshold = option.substr(strlen("--huge-method-max=")).data();
- if (!ParseInt(threshold, &huge_method_threshold)) {
- Usage("Failed to parse --huge-method-max '%s' as an integer", threshold);
- }
- if (huge_method_threshold < 0) {
- Usage("--huge-method-max passed a negative value %s", huge_method_threshold);
- }
- } else if (option.starts_with("--large-method-max=")) {
- const char* threshold = option.substr(strlen("--large-method-max=")).data();
- if (!ParseInt(threshold, &large_method_threshold)) {
- Usage("Failed to parse --large-method-max '%s' as an integer", threshold);
- }
- if (large_method_threshold < 0) {
- Usage("--large-method-max passed a negative value %s", large_method_threshold);
- }
- } else if (option.starts_with("--small-method-max=")) {
- const char* threshold = option.substr(strlen("--small-method-max=")).data();
- if (!ParseInt(threshold, &small_method_threshold)) {
- Usage("Failed to parse --small-method-max '%s' as an integer", threshold);
- }
- if (small_method_threshold < 0) {
- Usage("--small-method-max passed a negative value %s", small_method_threshold);
- }
- } else if (option.starts_with("--tiny-method-max=")) {
- const char* threshold = option.substr(strlen("--tiny-method-max=")).data();
- if (!ParseInt(threshold, &tiny_method_threshold)) {
- Usage("Failed to parse --tiny-method-max '%s' as an integer", threshold);
- }
- if (tiny_method_threshold < 0) {
- Usage("--tiny-method-max passed a negative value %s", tiny_method_threshold);
- }
- } else if (option.starts_with("--num-dex-methods=")) {
- const char* threshold = option.substr(strlen("--num-dex-methods=")).data();
- if (!ParseInt(threshold, &num_dex_methods_threshold)) {
- Usage("Failed to parse --num-dex-methods '%s' as an integer", threshold);
- }
- if (num_dex_methods_threshold < 0) {
- Usage("--num-dex-methods passed a negative value %s", num_dex_methods_threshold);
- }
- } else if (option.starts_with("--inline-depth-limit=")) {
- const char* limit = option.substr(strlen("--inline-depth-limit=")).data();
- if (!ParseInt(limit, &inline_depth_limit)) {
- Usage("Failed to parse --inline-depth-limit '%s' as an integer", limit);
- }
- if (inline_depth_limit < 0) {
- Usage("--inline-depth-limit passed a negative value %s", inline_depth_limit);
- }
- } else if (option.starts_with("--inline-max-code-units=")) {
- const char* code_units = option.substr(strlen("--inline-max-code-units=")).data();
- if (!ParseInt(code_units, &inline_max_code_units)) {
- Usage("Failed to parse --inline-max-code-units '%s' as an integer", code_units);
- }
- if (inline_max_code_units < 0) {
- Usage("--inline-max-code-units passed a negative value %s", inline_max_code_units);
- }
- } else if (option == "--host") {
- is_host_ = true;
- } else if (option == "--runtime-arg") {
- if (++i >= argc) {
- Usage("Missing required argument for --runtime-arg");
- }
- if (log_options) {
- LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
- }
- runtime_args_.push_back(argv[i]);
- } else if (option == "--dump-timing") {
- dump_timing_ = true;
- } else if (option == "--dump-passes") {
- dump_passes_ = true;
- } else if (option.starts_with("--dump-cfg=")) {
- dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
- } else if (option == "--dump-stats") {
- dump_stats_ = true;
- } else if (option == "--generate-debug-info" || option == "-g") {
- generate_debug_info = true;
- } else if (option == "--no-generate-debug-info") {
- generate_debug_info = false;
- } else if (option == "--debuggable") {
- debuggable = true;
- generate_debug_info = true;
- } else if (option.starts_with("--profile-file=")) {
- profile_file_ = option.substr(strlen("--profile-file=")).data();
- VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
- } else if (option == "--no-profile-file") {
- // No profile
- } else if (option.starts_with("--top-k-profile-threshold=")) {
- ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold);
- } else if (option == "--print-pass-names") {
- pass_manager_options.SetPrintPassNames(true);
- } else if (option.starts_with("--disable-passes=")) {
- const std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
- pass_manager_options.SetDisablePassList(disable_passes);
- } else if (option.starts_with("--print-passes=")) {
- const std::string print_passes = option.substr(strlen("--print-passes=")).data();
- pass_manager_options.SetPrintPassList(print_passes);
- } else if (option == "--print-all-passes") {
- pass_manager_options.SetPrintAllPasses();
- } else if (option.starts_with("--dump-cfg-passes=")) {
- const std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data();
- pass_manager_options.SetDumpPassList(dump_passes_string);
- } else if (option == "--print-pass-options") {
- pass_manager_options.SetPrintPassOptions(true);
- } else if (option.starts_with("--pass-options=")) {
- const std::string options = option.substr(strlen("--pass-options=")).data();
- pass_manager_options.SetOverriddenPassOptions(options);
- } else if (option == "--include-patch-information") {
- include_patch_information = true;
- } else if (option == "--no-include-patch-information") {
- include_patch_information = false;
- } else if (option.starts_with("--verbose-methods=")) {
- // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages
- // conditional on having verbost methods.
- gLogVerbosity.compiler = false;
- Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
- } else if (option.starts_with("--dump-init-failures=")) {
- std::string file_name = option.substr(strlen("--dump-init-failures=")).data();
- init_failure_output_.reset(new std::ofstream(file_name));
- if (init_failure_output_.get() == nullptr) {
- LOG(ERROR) << "Failed to allocate ofstream";
- } else if (init_failure_output_->fail()) {
- LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization "
- << "failures.";
- init_failure_output_.reset();
- }
- } else if (option.starts_with("--swap-file=")) {
- swap_file_name_ = option.substr(strlen("--swap-file=")).data();
- } else if (option.starts_with("--swap-fd=")) {
- const char* swap_fd_str = option.substr(strlen("--swap-fd=")).data();
- if (!ParseInt(swap_fd_str, &swap_fd_)) {
- Usage("Failed to parse --swap-fd argument '%s' as an integer", swap_fd_str);
- }
- if (swap_fd_ < 0) {
- Usage("--swap-fd passed a negative value %d", swap_fd_);
- }
- } else if (option == "--abort-on-hard-verifier-error") {
- abort_on_hard_verifier_error = true;
- } else {
- Usage("Unknown argument %s", option.data());
+ void ParseZipFd(const StringPiece& option) {
+ ParseUintOption(option, "--zip-fd", &zip_fd_);
+ }
+
+ void ParseOatFd(const StringPiece& option) {
+ ParseUintOption(option, "--oat-fd", &oat_fd_);
+ }
+
+ void ParseJ(const StringPiece& option) {
+ ParseUintOption(option, "-j", &thread_count_, /* is_long_option */ false);
+ }
+
+ void ParseBase(const StringPiece& option) {
+ DCHECK(option.starts_with("--base="));
+ const char* image_base_str = option.substr(strlen("--base=")).data();
+ char* end;
+ image_base_ = strtoul(image_base_str, &end, 16);
+ if (end == image_base_str || *end != '\0') {
+ Usage("Failed to parse hexadecimal value for option %s", option.data());
+ }
+ }
+
+ void ParseInstructionSet(const StringPiece& option) {
+ DCHECK(option.starts_with("--instruction-set="));
+ StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
+ // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it.
+ std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]);
+ strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length());
+ buf.get()[instruction_set_str.length()] = 0;
+ instruction_set_ = GetInstructionSetFromString(buf.get());
+ // arm actually means thumb2.
+ if (instruction_set_ == InstructionSet::kArm) {
+ instruction_set_ = InstructionSet::kThumb2;
+ }
+ }
+
+ void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) {
+ DCHECK(option.starts_with("--instruction-set-variant="));
+ StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+ instruction_set_features_.reset(
+ InstructionSetFeatures::FromVariant(
+ instruction_set_, str.as_string(), &parser_options->error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("%s", parser_options->error_msg.c_str());
+ }
+ }
+
+ void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) {
+ DCHECK(option.starts_with("--instruction-set-features="));
+ StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
+ if (instruction_set_features_.get() == nullptr) {
+ instruction_set_features_.reset(
+ InstructionSetFeatures::FromVariant(
+ instruction_set_, "default", &parser_options->error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Problem initializing default instruction set features variant: %s",
+ parser_options->error_msg.c_str());
}
}
+ instruction_set_features_.reset(
+ instruction_set_features_->AddFeaturesFromString(str.as_string(),
+ &parser_options->error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str());
+ }
+ }
+
+ void ParseCompilerBackend(const StringPiece& option, ParserOptions* parser_options) {
+ DCHECK(option.starts_with("--compiler-backend="));
+ parser_options->requested_specific_compiler = true;
+ StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
+ if (backend_str == "Quick") {
+ compiler_kind_ = Compiler::kQuick;
+ } else if (backend_str == "Optimizing") {
+ compiler_kind_ = Compiler::kOptimizing;
+ } else {
+ Usage("Unknown compiler backend: %s", backend_str.data());
+ }
+ }
+
+ void ParseHugeMethodMax(const StringPiece& option, ParserOptions* parser_options) {
+ ParseUintOption(option, "--huge-method-max", &parser_options->huge_method_threshold);
+ }
+ void ParseLargeMethodMax(const StringPiece& option, ParserOptions* parser_options) {
+ ParseUintOption(option, "--large-method-max", &parser_options->large_method_threshold);
+ }
+
+ void ParseSmallMethodMax(const StringPiece& option, ParserOptions* parser_options) {
+ ParseUintOption(option, "--small-method-max", &parser_options->small_method_threshold);
+ }
+
+ void ParseTinyMethodMax(const StringPiece& option, ParserOptions* parser_options) {
+ ParseUintOption(option, "--tiny-method-max", &parser_options->tiny_method_threshold);
+ }
+
+ void ParseNumDexMethods(const StringPiece& option, ParserOptions* parser_options) {
+ ParseUintOption(option, "--num-dex-methods", &parser_options->num_dex_methods_threshold);
+ }
+
+ void ParseInlineDepthLimit(const StringPiece& option, ParserOptions* parser_options) {
+ ParseUintOption(option, "--inline-depth-limit", &parser_options->inline_depth_limit);
+ }
+
+ void ParseInlineMaxCodeUnits(const StringPiece& option, ParserOptions* parser_options) {
+ ParseUintOption(option, "--inline-max-code-units=", &parser_options->inline_max_code_units);
+ }
+
+ void ParseDisablePasses(const StringPiece& option, ParserOptions* parser_options) {
+ DCHECK(option.starts_with("--disable-passes="));
+ const std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
+ parser_options->pass_manager_options.SetDisablePassList(disable_passes);
+ }
+
+ void ParsePrintPasses(const StringPiece& option, ParserOptions* parser_options) {
+ DCHECK(option.starts_with("--print-passes="));
+ const std::string print_passes = option.substr(strlen("--print-passes=")).data();
+ parser_options->pass_manager_options.SetPrintPassList(print_passes);
+ }
+
+ void ParseDumpCfgPasses(const StringPiece& option, ParserOptions* parser_options) {
+ DCHECK(option.starts_with("--dump-cfg-passes="));
+ const std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data();
+ parser_options->pass_manager_options.SetDumpPassList(dump_passes_string);
+ }
+
+ void ParsePassOptions(const StringPiece& option, ParserOptions* parser_options) {
+ DCHECK(option.starts_with("--pass-options="));
+ const std::string pass_options = option.substr(strlen("--pass-options=")).data();
+ parser_options->pass_manager_options.SetOverriddenPassOptions(pass_options);
+ }
+
+ void ParseDumpInitFailures(const StringPiece& option) {
+ DCHECK(option.starts_with("--dump-init-failures="));
+ std::string file_name = option.substr(strlen("--dump-init-failures=")).data();
+ init_failure_output_.reset(new std::ofstream(file_name));
+ if (init_failure_output_.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate ofstream";
+ } else if (init_failure_output_->fail()) {
+ LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization "
+ << "failures.";
+ init_failure_output_.reset();
+ }
+ }
+
+ void ParseSwapFd(const StringPiece& option) {
+ ParseUintOption(option, "--swap-fd", &swap_fd_);
+ }
+
+ void ProcessOptions(ParserOptions* parser_options) {
image_ = (!image_filename_.empty());
- if (!requested_specific_compiler && !kUseOptimizingCompiler) {
+ if (!parser_options->requested_specific_compiler && !kUseOptimizingCompiler) {
// If no specific compiler is requested, the current behavior is
// to compile the boot image with Quick, and the rest with Optimizing.
compiler_kind_ = image_ ? Compiler::kQuick : Compiler::kOptimizing;
}
-
if (compiler_kind_ == Compiler::kOptimizing) {
// Optimizing only supports PIC mode.
- compile_pic = true;
+ parser_options->compile_pic = true;
}
if (oat_filename_.empty() && oat_fd_ == -1) {
@@ -861,11 +761,11 @@ class Dex2Oat FINAL {
Usage("--oat-file should not be used with --oat-fd");
}
- if (!oat_symbols.empty() && oat_fd_ != -1) {
+ if (!parser_options->oat_symbols.empty() && oat_fd_ != -1) {
Usage("--oat-symbols should not be used with --oat-fd");
}
- if (!oat_symbols.empty() && is_host_) {
+ if (!parser_options->oat_symbols.empty() && is_host_) {
Usage("--oat-symbols should not be used with --host");
}
@@ -881,13 +781,13 @@ class Dex2Oat FINAL {
android_root_ += android_root_env_var;
}
- if (!image_ && boot_image_filename.empty()) {
- boot_image_filename += android_root_;
- boot_image_filename += "/framework/boot.art";
+ if (!image_ && parser_options->boot_image_filename.empty()) {
+ parser_options->boot_image_filename += android_root_;
+ parser_options->boot_image_filename += "/framework/boot.art";
}
- if (!boot_image_filename.empty()) {
+ if (!parser_options->boot_image_filename.empty()) {
boot_image_option_ += "-Ximage:";
- boot_image_option_ += boot_image_filename;
+ boot_image_option_ += parser_options->boot_image_filename;
}
if (image_classes_filename_ != nullptr && !image_) {
@@ -945,8 +845,8 @@ class Dex2Oat FINAL {
}
oat_stripped_ = oat_filename_;
- if (!oat_symbols.empty()) {
- oat_unstripped_ = oat_symbols;
+ if (!parser_options->oat_symbols.empty()) {
+ oat_unstripped_ = parser_options->oat_symbols;
} else {
oat_unstripped_ = oat_filename_;
}
@@ -955,10 +855,11 @@ class Dex2Oat FINAL {
// instruction set.
if (instruction_set_features_.get() == nullptr) {
instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(
+ instruction_set_, "default", &parser_options->error_msg));
if (instruction_set_features_.get() == nullptr) {
Usage("Problem initializing default instruction set features variant: %s",
- error_msg.c_str());
+ parser_options->error_msg.c_str());
}
}
@@ -973,52 +874,50 @@ class Dex2Oat FINAL {
}
}
- if (compiler_filter_string == nullptr) {
- compiler_filter_string = "speed";
- }
-
- CHECK(compiler_filter_string != nullptr);
- CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
- if (strcmp(compiler_filter_string, "verify-none") == 0) {
- compiler_filter = CompilerOptions::kVerifyNone;
- } else if (strcmp(compiler_filter_string, "interpret-only") == 0) {
- compiler_filter = CompilerOptions::kInterpretOnly;
- } else if (strcmp(compiler_filter_string, "verify-at-runtime") == 0) {
- compiler_filter = CompilerOptions::kVerifyAtRuntime;
- } else if (strcmp(compiler_filter_string, "space") == 0) {
- compiler_filter = CompilerOptions::kSpace;
- } else if (strcmp(compiler_filter_string, "balanced") == 0) {
- compiler_filter = CompilerOptions::kBalanced;
- } else if (strcmp(compiler_filter_string, "speed") == 0) {
- compiler_filter = CompilerOptions::kSpeed;
- } else if (strcmp(compiler_filter_string, "everything") == 0) {
- compiler_filter = CompilerOptions::kEverything;
- } else if (strcmp(compiler_filter_string, "time") == 0) {
- compiler_filter = CompilerOptions::kTime;
+ if (parser_options->compiler_filter_string == nullptr) {
+ parser_options->compiler_filter_string = "speed";
+ }
+
+ CHECK(parser_options->compiler_filter_string != nullptr);
+ if (strcmp(parser_options->compiler_filter_string, "verify-none") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kVerifyNone;
+ } else if (strcmp(parser_options->compiler_filter_string, "interpret-only") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kInterpretOnly;
+ } else if (strcmp(parser_options->compiler_filter_string, "verify-at-runtime") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kVerifyAtRuntime;
+ } else if (strcmp(parser_options->compiler_filter_string, "space") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kSpace;
+ } else if (strcmp(parser_options->compiler_filter_string, "balanced") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kBalanced;
+ } else if (strcmp(parser_options->compiler_filter_string, "speed") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kSpeed;
+ } else if (strcmp(parser_options->compiler_filter_string, "everything") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kEverything;
+ } else if (strcmp(parser_options->compiler_filter_string, "time") == 0) {
+ parser_options->compiler_filter = CompilerOptions::kTime;
} else {
- Usage("Unknown --compiler-filter value %s", compiler_filter_string);
+ Usage("Unknown --compiler-filter value %s", parser_options->compiler_filter_string);
}
// It they are not set, use default values for inlining settings.
// TODO: We should rethink the compiler filter. We mostly save
// time here, which is orthogonal to space.
- if (inline_depth_limit == kUnsetInlineDepthLimit) {
- inline_depth_limit = (compiler_filter == CompilerOptions::kSpace)
+ if (parser_options->inline_depth_limit == ParserOptions::kUnsetInlineDepthLimit) {
+ parser_options->inline_depth_limit =
+ (parser_options->compiler_filter == CompilerOptions::kSpace)
// Implementation of the space filter: limit inlining depth.
? CompilerOptions::kSpaceFilterInlineDepthLimit
: CompilerOptions::kDefaultInlineDepthLimit;
}
- if (inline_max_code_units == kUnsetInlineMaxCodeUnits) {
- inline_max_code_units = (compiler_filter == CompilerOptions::kSpace)
+ if (parser_options->inline_max_code_units == ParserOptions::kUnsetInlineMaxCodeUnits) {
+ parser_options->inline_max_code_units =
+ (parser_options->compiler_filter == CompilerOptions::kSpace)
// Implementation of the space filter: limit inlining max code units.
? CompilerOptions::kSpaceFilterInlineMaxCodeUnits
: CompilerOptions::kDefaultInlineMaxCodeUnits;
}
// Checks are all explicit until we know the architecture.
- bool implicit_null_checks = false;
- bool implicit_so_checks = false;
- bool implicit_suspend_checks = false;
// Set the compilation target's implicit checks options.
switch (instruction_set_) {
case kArm:
@@ -1028,8 +927,8 @@ class Dex2Oat FINAL {
case kX86_64:
case kMips:
case kMips64:
- implicit_null_checks = true;
- implicit_so_checks = true;
+ parser_options->implicit_null_checks = true;
+ parser_options->implicit_so_checks = true;
break;
default:
@@ -1037,55 +936,224 @@ class Dex2Oat FINAL {
break;
}
- compiler_options_.reset(new CompilerOptions(compiler_filter,
- huge_method_threshold,
- large_method_threshold,
- small_method_threshold,
- tiny_method_threshold,
- num_dex_methods_threshold,
- inline_depth_limit,
- inline_max_code_units,
- include_patch_information,
- top_k_profile_threshold,
- debuggable,
- generate_debug_info,
- implicit_null_checks,
- implicit_so_checks,
- implicit_suspend_checks,
- compile_pic,
+ compiler_options_.reset(new CompilerOptions(parser_options->compiler_filter,
+ parser_options->huge_method_threshold,
+ parser_options->large_method_threshold,
+ parser_options->small_method_threshold,
+ parser_options->tiny_method_threshold,
+ parser_options->num_dex_methods_threshold,
+ parser_options->inline_depth_limit,
+ parser_options->inline_max_code_units,
+ parser_options->include_patch_information,
+ parser_options->top_k_profile_threshold,
+ parser_options->debuggable,
+ parser_options->generate_debug_info,
+ parser_options->implicit_null_checks,
+ parser_options->implicit_so_checks,
+ parser_options->implicit_suspend_checks,
+ parser_options->compile_pic,
verbose_methods_.empty() ?
nullptr :
&verbose_methods_,
- new PassManagerOptions(pass_manager_options),
+ new PassManagerOptions(
+ parser_options->pass_manager_options),
init_failure_output_.get(),
- abort_on_hard_verifier_error));
+ parser_options->abort_on_hard_verifier_error));
// Done with usage checks, enable watchdog if requested
- if (watch_dog_enabled) {
+ if (parser_options->watch_dog_enabled) {
watchdog_.reset(new WatchDog(true));
}
// Fill some values into the key-value store for the oat header.
key_value_store_.reset(new SafeMap<std::string, std::string>());
+ }
- // Insert some compiler things.
- {
- std::ostringstream oss;
- for (int i = 0; i < argc; ++i) {
- if (i > 0) {
- oss << ' ';
+ void InsertCompileOptions(int argc, char** argv, ParserOptions* parser_options) {
+ std::ostringstream oss;
+ for (int i = 0; i < argc; ++i) {
+ if (i > 0) {
+ oss << ' ';
+ }
+ oss << argv[i];
+ }
+ key_value_store_->Put(OatHeader::kDex2OatCmdLineKey, oss.str());
+ oss.str(""); // Reset.
+ oss << kRuntimeISA;
+ key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
+ key_value_store_->Put(
+ OatHeader::kPicKey,
+ parser_options->compile_pic ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+ key_value_store_->Put(
+ OatHeader::kDebuggableKey,
+ parser_options->debuggable ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+ }
+
+ // Parse the arguments from the command line. In case of an unrecognized option or impossible
+ // values/combinations, a usage error will be displayed and exit() is called. Thus, if the method
+ // returns, arguments have been successfully parsed.
+ void ParseArgs(int argc, char** argv) {
+ original_argc = argc;
+ original_argv = argv;
+
+ InitLogging(argv);
+
+ // Skip over argv[0].
+ argv++;
+ argc--;
+
+ if (argc == 0) {
+ Usage("No arguments specified");
+ }
+
+ std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
+
+ for (int i = 0; i < argc; i++) {
+ const StringPiece option(argv[i]);
+ const bool log_options = false;
+ if (log_options) {
+ LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
+ }
+ if (option.starts_with("--dex-file=")) {
+ dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data());
+ } else if (option.starts_with("--dex-location=")) {
+ dex_locations_.push_back(option.substr(strlen("--dex-location=")).data());
+ } else if (option.starts_with("--zip-fd=")) {
+ ParseZipFd(option);
+ } else if (option.starts_with("--zip-location=")) {
+ zip_location_ = option.substr(strlen("--zip-location=")).data();
+ } else if (option.starts_with("--oat-file=")) {
+ oat_filename_ = option.substr(strlen("--oat-file=")).data();
+ } else if (option.starts_with("--oat-symbols=")) {
+ parser_options->oat_symbols = option.substr(strlen("--oat-symbols=")).data();
+ } else if (option.starts_with("--oat-fd=")) {
+ ParseOatFd(option);
+ } else if (option == "--watch-dog") {
+ parser_options->watch_dog_enabled = true;
+ } else if (option == "--no-watch-dog") {
+ parser_options->watch_dog_enabled = false;
+ } else if (option.starts_with("-j")) {
+ ParseJ(option);
+ } else if (option.starts_with("--oat-location=")) {
+ oat_location_ = option.substr(strlen("--oat-location=")).data();
+ } else if (option.starts_with("--image=")) {
+ image_filename_ = option.substr(strlen("--image=")).data();
+ } else if (option.starts_with("--image-classes=")) {
+ image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
+ } else if (option.starts_with("--image-classes-zip=")) {
+ image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data();
+ } else if (option.starts_with("--compiled-classes=")) {
+ compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
+ } else if (option.starts_with("--compiled-classes-zip=")) {
+ compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
+ } else if (option.starts_with("--compiled-methods=")) {
+ compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data();
+ } else if (option.starts_with("--compiled-methods-zip=")) {
+ compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data();
+ } else if (option.starts_with("--base=")) {
+ ParseBase(option);
+ } else if (option.starts_with("--boot-image=")) {
+ parser_options->boot_image_filename = option.substr(strlen("--boot-image=")).data();
+ } else if (option.starts_with("--android-root=")) {
+ android_root_ = option.substr(strlen("--android-root=")).data();
+ } else if (option.starts_with("--instruction-set=")) {
+ ParseInstructionSet(option);
+ } else if (option.starts_with("--instruction-set-variant=")) {
+ ParseInstructionSetVariant(option, parser_options.get());
+ } else if (option.starts_with("--instruction-set-features=")) {
+ ParseInstructionSetFeatures(option, parser_options.get());
+ } else if (option.starts_with("--compiler-backend=")) {
+ ParseCompilerBackend(option, parser_options.get());
+ } else if (option.starts_with("--compiler-filter=")) {
+ parser_options->compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
+ } else if (option == "--compile-pic") {
+ parser_options->compile_pic = true;
+ } else if (option.starts_with("--huge-method-max=")) {
+ ParseHugeMethodMax(option, parser_options.get());
+ } else if (option.starts_with("--large-method-max=")) {
+ ParseLargeMethodMax(option, parser_options.get());
+ } else if (option.starts_with("--small-method-max=")) {
+ ParseSmallMethodMax(option, parser_options.get());
+ } else if (option.starts_with("--tiny-method-max=")) {
+ ParseTinyMethodMax(option, parser_options.get());
+ } else if (option.starts_with("--num-dex-methods=")) {
+ ParseNumDexMethods(option, parser_options.get());
+ } else if (option.starts_with("--inline-depth-limit=")) {
+ ParseInlineDepthLimit(option, parser_options.get());
+ } else if (option.starts_with("--inline-max-code-units=")) {
+ ParseInlineMaxCodeUnits(option, parser_options.get());
+ } else if (option == "--host") {
+ is_host_ = true;
+ } else if (option == "--runtime-arg") {
+ if (++i >= argc) {
+ Usage("Missing required argument for --runtime-arg");
+ }
+ if (log_options) {
+ LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
}
- oss << argv[i];
+ runtime_args_.push_back(argv[i]);
+ } else if (option == "--dump-timing") {
+ dump_timing_ = true;
+ } else if (option == "--dump-passes") {
+ dump_passes_ = true;
+ } else if (option.starts_with("--dump-cfg=")) {
+ dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
+ } else if (option == "--dump-stats") {
+ dump_stats_ = true;
+ } else if (option == "--generate-debug-info" || option == "-g") {
+ parser_options->generate_debug_info = true;
+ } else if (option == "--no-generate-debug-info") {
+ parser_options->generate_debug_info = false;
+ } else if (option == "--debuggable") {
+ parser_options->debuggable = true;
+ parser_options->generate_debug_info = true;
+ } else if (option.starts_with("--profile-file=")) {
+ profile_file_ = option.substr(strlen("--profile-file=")).data();
+ VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
+ } else if (option == "--no-profile-file") {
+ // No profile
+ } else if (option.starts_with("--top-k-profile-threshold=")) {
+ ParseDouble(option.data(), '=', 0.0, 100.0, &parser_options->top_k_profile_threshold);
+ } else if (option == "--print-pass-names") {
+ parser_options->pass_manager_options.SetPrintPassNames(true);
+ } else if (option.starts_with("--disable-passes=")) {
+ ParseDisablePasses(option, parser_options.get());
+ } else if (option.starts_with("--print-passes=")) {
+ ParsePrintPasses(option, parser_options.get());
+ } else if (option == "--print-all-passes") {
+ parser_options->pass_manager_options.SetPrintAllPasses();
+ } else if (option.starts_with("--dump-cfg-passes=")) {
+ ParseDumpCfgPasses(option, parser_options.get());
+ } else if (option == "--print-pass-options") {
+ parser_options->pass_manager_options.SetPrintPassOptions(true);
+ } else if (option.starts_with("--pass-options=")) {
+ ParsePassOptions(option, parser_options.get());
+ } else if (option == "--include-patch-information") {
+ parser_options->include_patch_information = true;
+ } else if (option == "--no-include-patch-information") {
+ parser_options->include_patch_information = false;
+ } else if (option.starts_with("--verbose-methods=")) {
+ // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages
+ // conditional on having verbost methods.
+ gLogVerbosity.compiler = false;
+ Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
+ } else if (option.starts_with("--dump-init-failures=")) {
+ ParseDumpInitFailures(option);
+ } else if (option.starts_with("--swap-file=")) {
+ swap_file_name_ = option.substr(strlen("--swap-file=")).data();
+ } else if (option.starts_with("--swap-fd=")) {
+ ParseSwapFd(option);
+ } else if (option == "--abort-on-hard-verifier-error") {
+ parser_options->abort_on_hard_verifier_error = true;
+ } else {
+ Usage("Unknown argument %s", option.data());
}
- key_value_store_->Put(OatHeader::kDex2OatCmdLineKey, oss.str());
- oss.str(""); // Reset.
- oss << kRuntimeISA;
- key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
- key_value_store_->Put(OatHeader::kPicKey,
- compile_pic ? OatHeader::kTrueValue : OatHeader::kFalseValue);
- key_value_store_->Put(OatHeader::kDebuggableKey,
- debuggable ? OatHeader::kTrueValue : OatHeader::kFalseValue);
}
+
+ ProcessOptions(parser_options.get());
+
+ // Insert some compiler things.
+ InsertCompileOptions(argc, argv, parser_options.get());
}
// Check whether the oat output file is writable, and open it for later. Also open a swap file,
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 84c465fb8a..282db5de83 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -38,11 +38,14 @@
#include <inttypes.h>
#include <stdio.h>
+#include <iostream>
#include <memory>
+#include <sstream>
#include <vector>
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
+#include "utils.h"
namespace art {
@@ -1046,6 +1049,49 @@ static void dumpIField(const DexFile* pDexFile, u4 idx, u4 flags, int i) {
}
/*
+ * Dumping a CFG. Note that this will do duplicate work. utils.h doesn't expose the code-item
+ * version, so the DumpMethodCFG code will have to iterate again to find it. But dexdump is a
+ * tool, so this is not performance-critical.
+ */
+
+static void dumpCfg(const DexFile* dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item) {
+ if (code_item != nullptr) {
+ std::ostringstream oss;
+ DumpMethodCFG(dex_file, dex_method_idx, oss);
+ fprintf(gOutFile, "%s", oss.str().c_str());
+ }
+}
+
+static void dumpCfg(const DexFile* dex_file, int idx) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(idx);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+ if (class_data == nullptr) { // empty class such as a marker interface?
+ return;
+ }
+ ClassDataItemIterator it(*dex_file, class_data);
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ while (it.HasNextDirectMethod()) {
+ dumpCfg(dex_file,
+ it.GetMemberIndex(),
+ it.GetMethodCodeItem());
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ dumpCfg(dex_file,
+ it.GetMemberIndex(),
+ it.GetMethodCodeItem());
+ it.Next();
+ }
+}
+
+/*
* Dumps the class.
*
* Note "idx" is a DexClassDef index, not a DexTypeId index.
@@ -1061,6 +1107,11 @@ static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) {
return;
}
+ if (gOptions.cfg) {
+ dumpCfg(pDexFile, idx);
+ return;
+ }
+
// For the XML output, show the package name. Ideally we'd gather
// up the classes, sort them, and dump them alphabetically so the
// package name wouldn't jump around, but that's not a great plan
diff --git a/dexdump/dexdump.h b/dexdump/dexdump.h
index f2cd16a6d7..50280a9f28 100644
--- a/dexdump/dexdump.h
+++ b/dexdump/dexdump.h
@@ -45,6 +45,7 @@ struct Options {
bool showFileHeaders;
bool showSectionHeaders;
bool verbose;
+ bool cfg;
OutputFormat outputFormat;
const char* outputFileName;
const char* tempFileName;
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index 9be0922877..2466f33d1e 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -46,6 +46,7 @@ static void usage(void) {
fprintf(stderr, " -c : verify checksum and exit\n");
fprintf(stderr, " -d : disassemble code sections\n");
fprintf(stderr, " -f : display summary information from file header\n");
+ fprintf(stderr, " -g : dump CFG for dex\n");
fprintf(stderr, " -h : display file header details\n");
fprintf(stderr, " -i : ignore checksum failures\n");
fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
@@ -68,7 +69,7 @@ int dexdumpDriver(int argc, char** argv) {
// Parse all arguments.
while (1) {
- const int ic = getopt(argc, argv, "cdfhil:t:o:");
+ const int ic = getopt(argc, argv, "cdfghil:t:o:");
if (ic < 0) {
break; // done
}
@@ -82,6 +83,9 @@ int dexdumpDriver(int argc, char** argv) {
case 'f': // dump outer file header
gOptions.showFileHeaders = true;
break;
+ case 'g': // dump cfg
+ gOptions.cfg = true;
+ break;
case 'h': // dump section headers, i.e. all meta-data
gOptions.showSectionHeaders = true;
break;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 44787a7ac8..d4574f4f0a 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -928,6 +928,11 @@ DISASSEMBLER_ENTRY(cmp,
has_modrm = true;
load = true;
break;
+ case 0xBD:
+ opcode1 = "bsr";
+ has_modrm = true;
+ load = true;
+ break;
case 0xBE:
opcode1 = "movsxb";
has_modrm = true;
@@ -1117,6 +1122,9 @@ DISASSEMBLER_ENTRY(cmp,
opcode1 = opcode_tmp.c_str();
}
break;
+ case 0xA5:
+ opcode1 = (prefix[2] == 0x66 ? "movsw" : "movsl");
+ break;
case 0xA7:
opcode1 = (prefix[2] == 0x66 ? "cmpsw" : "cmpsl");
break;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 5e29ca75f9..44b78ff0a3 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1618,9 +1618,7 @@ class ImageDumper {
const size_t pointer_size =
InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet());
DumpArtMethodVisitor visitor(this);
- methods_section.VisitPackedArtMethods(&visitor,
- image_space->Begin(),
- ArtMethod::ObjectSize(pointer_size));
+ methods_section.VisitPackedArtMethods(&visitor, image_space->Begin(), pointer_size);
}
}
// Dump the large objects separately.
@@ -1642,13 +1640,19 @@ class ImageDumper {
const auto& intern_section = image_header_.GetImageSection(
ImageHeader::kSectionInternedStrings);
stats_.header_bytes = header_bytes;
- size_t alignment_bytes = RoundUp(header_bytes, kObjectAlignment) - header_bytes;
- stats_.alignment_bytes += alignment_bytes;
+ stats_.alignment_bytes += RoundUp(header_bytes, kObjectAlignment) - header_bytes;
+ // Add padding between the field and method section.
+ // (Field section is 4-byte aligned, method section is 8-byte aligned on 64-bit targets.)
+ stats_.alignment_bytes +=
+ method_section.Offset() - (field_section.Offset() + field_section.Size());
+ // Add padding between the method section and the intern table.
+ // (Method section is 4-byte aligned on 32-bit targets, intern table is 8-byte aligned.)
+ stats_.alignment_bytes +=
+ intern_section.Offset() - (method_section.Offset() + method_section.Size());
stats_.alignment_bytes += bitmap_section.Offset() - image_header_.GetImageSize();
stats_.bitmap_bytes += bitmap_section.Size();
stats_.art_field_bytes += field_section.Size();
- // RoundUp to 8 bytes to match the intern table alignment expectation.
- stats_.art_method_bytes += RoundUp(method_section.Size(), sizeof(uint64_t));
+ stats_.art_method_bytes += method_section.Size();
stats_.interned_strings_bytes += intern_section.Size();
stats_.Dump(os, indent_os);
os << "\n";
@@ -1966,7 +1970,7 @@ class ImageDumper {
method_access_flags);
size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes +
- vmap_table_bytes + quick_oat_code_size + ArtMethod::ObjectSize(image_pointer_size);
+ vmap_table_bytes + quick_oat_code_size + ArtMethod::Size(image_pointer_size);
double expansion =
static_cast<double>(quick_oat_code_size) / static_cast<double>(dex_instruction_bytes);
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 283eea9e2a..d60103597a 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -92,6 +92,32 @@ static bool LocationToFilename(const std::string& location, InstructionSet isa,
}
}
+static const OatHeader* GetOatHeader(const ElfFile* elf_file) {
+ uint64_t off = 0;
+ if (!elf_file->GetSectionOffsetAndSize(".rodata", &off, nullptr)) {
+ return nullptr;
+ }
+
+ OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + off);
+ return oat_header;
+}
+
+// This function takes an elf file and reads the current patch delta value
+// encoded in its oat header value
+static bool ReadOatPatchDelta(const ElfFile* elf_file, off_t* delta, std::string* error_msg) {
+ const OatHeader* oat_header = GetOatHeader(elf_file);
+ if (oat_header == nullptr) {
+ *error_msg = "Unable to get oat header from elf file.";
+ return false;
+ }
+ if (!oat_header->IsValid()) {
+ *error_msg = "Elf file has an invalid oat header";
+ return false;
+ }
+ *delta = oat_header->GetImagePatchDelta();
+ return true;
+}
+
bool PatchOat::Patch(const std::string& image_location, off_t delta,
File* output_image, InstructionSet isa,
TimingLogger* timings) {
@@ -454,9 +480,8 @@ class PatchOatArtMethodVisitor : public ArtMethodVisitor {
void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
const auto& section = image_header->GetMethodsSection();
const size_t pointer_size = InstructionSetPointerSize(isa_);
- const size_t method_size = ArtMethod::ObjectSize(pointer_size);
PatchOatArtMethodVisitor visitor(this);
- section.VisitPackedArtMethods(&visitor, heap_->Begin(), method_size);
+ section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
}
class FixupRootVisitor : public RootVisitor {
@@ -585,25 +610,6 @@ void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED,
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-const OatHeader* PatchOat::GetOatHeader(const ElfFile* elf_file) {
- if (elf_file->Is64Bit()) {
- return GetOatHeader<ElfFileImpl64>(elf_file->GetImpl64());
- } else {
- return GetOatHeader<ElfFileImpl32>(elf_file->GetImpl32());
- }
-}
-
-template <typename ElfFileImpl>
-const OatHeader* PatchOat::GetOatHeader(const ElfFileImpl* elf_file) {
- auto rodata_sec = elf_file->FindSectionByName(".rodata");
- if (rodata_sec == nullptr) {
- return nullptr;
- }
-
- OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + rodata_sec->sh_offset);
- return oat_header;
-}
-
// Called by BitmapCallback
void PatchOat::VisitObject(mirror::Object* object) {
mirror::Object* copy = RelocatedCopyOf(object);
@@ -871,11 +877,11 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
UsageError(" This value may be negative.");
UsageError("");
- UsageError(" --patched-image-file=<file.art>: Use the same patch delta as was used to patch");
- UsageError(" the given image file.");
+ UsageError(" --patched-image-file=<file.art>: Relocate the oat file to be the same as the");
+ UsageError(" given image file.");
UsageError("");
- UsageError(" --patched-image-location=<file.art>: Use the same patch delta as was used to");
- UsageError(" patch the given image location. If used one must also specify the");
+ UsageError(" --patched-image-location=<file.art>: Relocate the oat file to be the same as the");
+ UsageError(" image at the given location. If used one must also specify the");
UsageError(" --instruction-set flag. It will search for this image in the same way that");
UsageError(" is done when loading one.");
UsageError("");
@@ -991,6 +997,7 @@ static int patchoat(int argc, char **argv) {
bool orig_base_offset_set = false;
off_t base_delta = 0;
bool base_delta_set = false;
+ bool match_delta = false;
std::string patched_image_filename;
std::string patched_image_location;
bool dump_timings = kIsDebugBuild;
@@ -1189,7 +1196,11 @@ static int patchoat(int argc, char **argv) {
base_delta_set = true;
base_delta = base_offset - orig_base_offset;
} else if (!patched_image_filename.empty()) {
+ if (have_image_files) {
+ Usage("--patched-image-location should not be used when patching other images");
+ }
base_delta_set = true;
+ match_delta = true;
std::string error_msg;
if (!ReadBaseDelta(patched_image_filename.c_str(), &base_delta, &error_msg)) {
Usage(error_msg.c_str(), patched_image_filename.c_str());
@@ -1307,6 +1318,32 @@ static int patchoat(int argc, char **argv) {
return EXIT_FAILURE;
}
+ if (match_delta) {
+ CHECK(!have_image_files); // We will not do this with images.
+ std::string error_msg;
+ // Figure out what the current delta is so we can match it to the desired delta.
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat.get(), PROT_READ, MAP_PRIVATE,
+ &error_msg));
+ off_t current_delta = 0;
+ if (elf.get() == nullptr) {
+ LOG(ERROR) << "unable to open oat file " << input_oat->GetPath() << " : " << error_msg;
+ cleanup(false);
+ return EXIT_FAILURE;
+ } else if (!ReadOatPatchDelta(elf.get(), &current_delta, &error_msg)) {
+ LOG(ERROR) << "Unable to get current delta: " << error_msg;
+ cleanup(false);
+ return EXIT_FAILURE;
+ }
+ // Before this line base_delta is the desired final delta. We need it to be the actual amount to
+ // change everything by. We subtract the current delta from it to make it this.
+ base_delta -= current_delta;
+ if (!IsAligned<kPageSize>(base_delta)) {
+ LOG(ERROR) << "Given image file was relocated by an illegal delta";
+ cleanup(false);
+ return false;
+ }
+ }
+
if (debug) {
LOG(INFO) << "moving offset by " << base_delta
<< " (0x" << std::hex << base_delta << ") bytes or "
@@ -1333,18 +1370,18 @@ static int patchoat(int argc, char **argv) {
new_oat_out);
// The order here doesn't matter. If the first one is successfully saved and the second one
// erased, ImageSpace will still detect a problem and not use the files.
- ret = ret && FinishFile(output_image.get(), ret);
- ret = ret && FinishFile(output_oat.get(), ret);
+ ret = FinishFile(output_image.get(), ret);
+ ret = FinishFile(output_oat.get(), ret);
} else if (have_oat_files) {
TimingLogger::ScopedTiming pt("patch oat", &timings);
ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings,
output_oat_fd >= 0, // was it opened from FD?
new_oat_out);
- ret = ret && FinishFile(output_oat.get(), ret);
+ ret = FinishFile(output_oat.get(), ret);
} else if (have_image_files) {
TimingLogger::ScopedTiming pt("patch image", &timings);
ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings);
- ret = ret && FinishFile(output_image.get(), ret);
+ ret = FinishFile(output_image.get(), ret);
} else {
CHECK(false);
ret = true;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 43cdaea97b..87ecc618eb 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -163,13 +163,6 @@ class PatchOat {
return ret;
}
- // Look up the oat header from any elf file.
- static const OatHeader* GetOatHeader(const ElfFile* elf_file);
-
- // Templatized version to actually look up the oat header
- template <typename ElfFileImpl>
- static const OatHeader* GetOatHeader(const ElfFileImpl* elf_file);
-
// Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
// change the heap.
class PatchVisitor {
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index bb3c72c433..40bb9e1d9b 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -63,6 +63,15 @@ inline void ArtMethod::SetDeclaringClass(mirror::Class* new_declaring_class) {
declaring_class_ = GcRoot<mirror::Class>(new_declaring_class);
}
+inline bool ArtMethod::CASDeclaringClass(mirror::Class* expected_class,
+ mirror::Class* desired_class) {
+ GcRoot<mirror::Class> expected_root(expected_class);
+ GcRoot<mirror::Class> desired_root(desired_class);
+ return reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_)->
+ CompareExchangeStrongSequentiallyConsistent(
+ expected_root, desired_root);
+}
+
inline uint32_t ArtMethod::GetAccessFlags() {
DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
GetDeclaringClass()->IsErroneous());
@@ -497,7 +506,7 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor) {
inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) {
memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
- ObjectSize(image_pointer_size));
+ Size(image_pointer_size));
declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
dex_cache_resolved_methods_ = GcRoot<mirror::PointerArray>(
const_cast<ArtMethod*>(src)->GetDexCacheResolvedMethods());
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 90352b7c08..1afd056655 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -67,6 +67,9 @@ class ArtMethod FINAL {
void SetDeclaringClass(mirror::Class *new_declaring_class)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CASDeclaringClass(mirror::Class* expected_class, mirror::Class* desired_class)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
@@ -504,12 +507,19 @@ class ArtMethod FINAL {
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Size of an instance of this object.
- static size_t ObjectSize(size_t pointer_size) {
+ // Size of an instance of this native class.
+ static size_t Size(size_t pointer_size) {
return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size) +
(sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
}
+ // Alignment of an instance of this native class.
+ static size_t Alignment(size_t pointer_size) {
+ // The ArtMethod alignment is the same as image pointer size. This differs from
+ // alignof(ArtMethod) if cross-compiling with pointer_size != sizeof(void*).
+ return pointer_size;
+ }
+
void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 6f45dc8209..1b0d774419 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -29,21 +29,28 @@ namespace art {
template<typename T>
static constexpr int CLZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
- // TODO: assert unsigned. There is currently many uses with signed values.
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
"T too large, must be smaller than long long");
- return (sizeof(T) == sizeof(uint32_t))
- ? __builtin_clz(x) // TODO: __builtin_clz[ll] has undefined behavior for x=0
- : __builtin_clzll(x);
+ return
+ DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
+ (sizeof(T) == sizeof(uint32_t))
+ ? __builtin_clz(x)
+ : __builtin_clzll(x);
}
template<typename T>
static constexpr int CTZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
- // TODO: assert unsigned. There is currently many uses with signed values.
- return (sizeof(T) == sizeof(uint32_t))
- ? __builtin_ctz(x)
- : __builtin_ctzll(x);
+ // It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check
+ // that T is an unsigned type.
+ static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
+ "T too large, must be smaller than long long");
+ return
+ DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
+ (sizeof(T) == sizeof(uint32_t))
+ ? __builtin_ctz(x)
+ : __builtin_ctzll(x);
}
template<typename T>
@@ -158,6 +165,9 @@ static inline bool IsAlignedParam(T x, int n) {
#define DCHECK_ALIGNED(value, alignment) \
DCHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value)
+#define CHECK_ALIGNED_PARAM(value, alignment) \
+ CHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
+
#define DCHECK_ALIGNED_PARAM(value, alignment) \
DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f19263d757..c179c64491 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1213,9 +1213,8 @@ void ClassLinker::InitFromImage() {
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
const ImageHeader& header = space->GetImageHeader();
const ImageSection& methods = header.GetMethodsSection();
- const size_t art_method_size = ArtMethod::ObjectSize(image_pointer_size_);
SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
- methods.VisitPackedArtMethods(&visitor, space->Begin(), art_method_size);
+ methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
}
// reinit class_roots_
@@ -2294,9 +2293,11 @@ LengthPrefixedArray<ArtField>* ClassLinker::AllocArtFieldArray(Thread* self, siz
if (length == 0) {
return nullptr;
}
- auto* ret = new(Runtime::Current()->GetLinearAlloc()->Alloc(
- self, LengthPrefixedArray<ArtField>::ComputeSize(length))) LengthPrefixedArray<ArtField>(
- length);
+ // If the ArtField alignment changes, review all uses of LengthPrefixedArray<ArtField>.
+ static_assert(alignof(ArtField) == 4, "ArtField alignment is expected to be 4.");
+ size_t storage_size = LengthPrefixedArray<ArtField>::ComputeSize(length);
+ void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ auto* ret = new(array_storage) LengthPrefixedArray<ArtField>(length);
CHECK(ret != nullptr);
std::uninitialized_fill_n(&ret->At(0), length, ArtField());
return ret;
@@ -2306,13 +2307,15 @@ LengthPrefixedArray<ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self, s
if (length == 0) {
return nullptr;
}
- const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
- auto* ret = new (Runtime::Current()->GetLinearAlloc()->Alloc(
- self, LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size)))
- LengthPrefixedArray<ArtMethod>(length);
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ const size_t storage_size =
+ LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size, method_alignment);
+ void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ auto* ret = new (array_storage) LengthPrefixedArray<ArtMethod>(length);
CHECK(ret != nullptr);
for (size_t i = 0; i < length; ++i) {
- new(reinterpret_cast<void*>(&ret->At(i, method_size))) ArtMethod;
+ new(reinterpret_cast<void*>(&ret->At(i, method_size, method_alignment))) ArtMethod;
}
return ret;
}
@@ -4689,7 +4692,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
const bool have_interfaces = interfaces.Get() != nullptr;
const size_t num_interfaces =
have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
- const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
if (num_interfaces == 0) {
if (super_ifcount == 0) {
// Class implements no interfaces.
@@ -4914,7 +4918,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
// matter which direction we go. We walk it backward anyway.)
for (k = input_array_length - 1; k >= 0; --k) {
ArtMethod* vtable_method = input_virtual_methods != nullptr ?
- &input_virtual_methods->At(k, method_size) :
+ &input_virtual_methods->At(k, method_size, method_alignment) :
input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
ArtMethod* vtable_method_for_name_comparison =
vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_);
@@ -4975,10 +4979,14 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
// where GCs could attempt to mark stale pointers due to memcpy. And since we overwrite the
// realloced memory with out->CopyFrom, we are guaranteed to have objects in the to space since
// CopyFrom has internal read barriers.
- const size_t old_size = old_virtuals != nullptr ?
- LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count, method_size) : 0u;
+ const size_t old_size = old_virtuals != nullptr
+ ? LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count,
+ method_size,
+ method_alignment)
+ : 0u;
const size_t new_size = LengthPrefixedArray<ArtMethod>::ComputeSize(new_method_count,
- method_size);
+ method_size,
+ method_alignment);
auto* virtuals = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
runtime->GetLinearAlloc()->Realloc(self, old_virtuals, old_size, new_size));
if (UNLIKELY(virtuals == nullptr)) {
@@ -4989,7 +4997,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
if (virtuals != old_virtuals) {
// Maps from heap allocated miranda method to linear alloc miranda method.
- StrideIterator<ArtMethod> out = virtuals->Begin(method_size);
+ StrideIterator<ArtMethod> out = virtuals->Begin(method_size, method_alignment);
// Copy over the old methods + miranda methods.
for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) {
move_table.emplace(&m, &*out);
@@ -4999,7 +5007,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
++out;
}
}
- StrideIterator<ArtMethod> out(virtuals->Begin(method_size) + old_method_count);
+ StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment) + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
for (ArtMethod* mir_method : miranda_methods) {
@@ -5022,7 +5030,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
self->AssertPendingOOMException();
return false;
}
- out = StrideIterator<ArtMethod>(virtuals->Begin(method_size) + old_method_count);
+ out = virtuals->Begin(method_size, method_alignment) + old_method_count;
size_t vtable_pos = old_vtable_count;
for (size_t i = old_method_count; i < new_method_count; ++i) {
// Leave the declaring class alone as type indices are relative to it
@@ -5893,8 +5901,10 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi
}
ArtMethod* ClassLinker::CreateRuntimeMethod() {
- const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
- ArtMethod* method = &AllocArtMethodArray(Thread::Current(), 1)->At(0, method_size);
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ LengthPrefixedArray<ArtMethod>* method_array = AllocArtMethodArray(Thread::Current(), 1);
+ ArtMethod* method = &method_array->At(0, method_size, method_alignment);
CHECK(method != nullptr);
method->SetDexMethodIndex(DexFile::kDexNoIndex);
CHECK(method->IsRuntimeMethod());
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 9fd8c87435..723ee74eb6 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1868,7 +1868,8 @@ const File& ElfFile::GetFile() const {
DELEGATE_TO_IMPL(GetFile);
}
-bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) {
+bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset,
+ uint64_t* size) const {
if (elf32_.get() == nullptr) {
CHECK(elf64_.get() != nullptr);
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 48cb4b8b2e..1188c97658 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -60,7 +60,7 @@ class ElfFile {
const File& GetFile() const;
- bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size);
+ bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) const;
uint64_t FindSymbolAddress(unsigned section_type,
const std::string& symbol_name,
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index eaf26bc462..eaf33f6b7f 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -298,7 +298,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
interface_method->GetArtMethod(), sizeof(void*));
auto* virtual_methods = proxy_class->GetVirtualMethodsPtr();
size_t num_virtuals = proxy_class->NumVirtualMethods();
- size_t method_size = ArtMethod::ObjectSize(sizeof(void*));
+ size_t method_size = ArtMethod::Size(sizeof(void*));
int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
reinterpret_cast<uintptr_t>(virtual_methods)) / method_size;
CHECK_LT(throws_index, static_cast<int>(num_virtuals));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 47f9b1b88e..c3a962737f 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -331,7 +331,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
// TODO: Check linear alloc and image.
- DCHECK_ALIGNED(ArtMethod::ObjectSize(sizeof(void*)), sizeof(void*))
+ DCHECK_ALIGNED(ArtMethod::Size(sizeof(void*)), sizeof(void*))
<< "ArtMethod is not pointer aligned";
if (method_obj == nullptr || !IsAligned<sizeof(void*)>(method_obj)) {
VLOG(signals) << "no method";
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c11c134326..fc2a801b7f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -614,7 +614,9 @@ void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
for (size_t i = 0; i < count; ++i) {
auto* root = roots[i];
auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
- MarkObject(&ref);
+ // The root can be in the to-space since we may visit the declaring class of an ArtMethod
+ // multiple times if it is on the call stack.
+ MarkObjectIfNotInToSpace(&ref);
if (*root != ref.AsMirrorPtr()) {
*root = ref.AsMirrorPtr();
}
@@ -624,7 +626,7 @@ void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED) {
for (size_t i = 0; i < count; ++i) {
- MarkObject(roots[i]);
+ MarkObjectIfNotInToSpace(roots[i]);
}
}
diff --git a/runtime/image.cc b/runtime/image.cc
index ba1e58bb2c..2586959e55 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '8', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '9', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -153,19 +153,21 @@ void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base)
for (size_t i = 0; i < array->Length(); ++i) {
visitor->Visit(&array->At(i, sizeof(ArtField)));
}
- pos += array->ComputeSize(array->Length(), sizeof(ArtField));
+ pos += array->ComputeSize(array->Length());
}
}
void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor,
uint8_t* base,
- size_t method_size) const {
+ size_t pointer_size) const {
+ const size_t method_alignment = ArtMethod::Alignment(pointer_size);
+ const size_t method_size = ArtMethod::Size(pointer_size);
for (size_t pos = 0; pos < Size(); ) {
auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
for (size_t i = 0; i < array->Length(); ++i) {
- visitor->Visit(&array->At(i, method_size));
+ visitor->Visit(&array->At(i, method_size, method_alignment));
}
- pos += array->ComputeSize(array->Length(), method_size);
+ pos += array->ComputeSize(array->Length(), method_size, method_alignment);
}
}
diff --git a/runtime/image.h b/runtime/image.h
index eb26f7f9b6..1a0d8fd92f 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -65,7 +65,7 @@ class PACKED(4) ImageSection {
}
// Visit ArtMethods in the section starting at base.
- void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t method_size) const;
+ void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
// Visit ArtMethods in the section starting at base.
void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 14683d4063..976936d639 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -101,7 +101,7 @@ static inline int32_t DecodeSignedLeb128(const uint8_t** data) {
static inline uint32_t UnsignedLeb128Size(uint32_t data) {
// bits_to_encode = (data != 0) ? 32 - CLZ(x) : 1 // 32 - CLZ(data | 1)
// bytes = ceil(bits_to_encode / 7.0); // (6 + bits_to_encode) / 7
- uint32_t x = 6 + 32 - CLZ(data | 1);
+ uint32_t x = 6 + 32 - CLZ(data | 1U);
// Division by 7 is done by (x * 37) >> 8 where 37 = ceil(256 / 7).
// This works for 0 <= x < 256 / (7 * 37 - 256), i.e. 0 <= x <= 85.
return (x * 37) >> 8;
@@ -111,7 +111,7 @@ static inline uint32_t UnsignedLeb128Size(uint32_t data) {
static inline uint32_t SignedLeb128Size(int32_t data) {
// Like UnsignedLeb128Size(), but we need one bit beyond the highest bit that differs from sign.
data = data ^ (data >> 31);
- uint32_t x = 1 /* we need to encode the sign bit */ + 6 + 32 - CLZ(data | 1);
+ uint32_t x = 1 /* we need to encode the sign bit */ + 6 + 32 - CLZ(data | 1U);
return (x * 37) >> 8;
}
diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h
index 2b2e8d34d2..d9bc656673 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/length_prefixed_array.h
@@ -21,6 +21,8 @@
#include "linear_alloc.h"
#include "stride_iterator.h"
+#include "base/bit_utils.h"
+#include "base/casts.h"
#include "base/iteration_range.h"
namespace art {
@@ -28,29 +30,35 @@ namespace art {
template<typename T>
class LengthPrefixedArray {
public:
- explicit LengthPrefixedArray(uint64_t length) : length_(length) {}
+ explicit LengthPrefixedArray(size_t length)
+ : length_(dchecked_integral_cast<uint32_t>(length)) {}
- T& At(size_t index, size_t element_size = sizeof(T)) {
+ T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
DCHECK_LT(index, length_);
- return *reinterpret_cast<T*>(&data_[0] + index * element_size);
+ return AtUnchecked(index, element_size, alignment);
}
- StrideIterator<T> Begin(size_t element_size = sizeof(T)) {
- return StrideIterator<T>(reinterpret_cast<T*>(&data_[0]), element_size);
+ StrideIterator<T> Begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ return StrideIterator<T>(&AtUnchecked(0, element_size, alignment), element_size);
}
- StrideIterator<T> End(size_t element_size = sizeof(T)) {
- return StrideIterator<T>(reinterpret_cast<T*>(&data_[0] + element_size * length_),
- element_size);
+ StrideIterator<T> End(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ return StrideIterator<T>(&AtUnchecked(length_, element_size, alignment), element_size);
}
- static size_t OffsetOfElement(size_t index, size_t element_size = sizeof(T)) {
- return offsetof(LengthPrefixedArray<T>, data_) + index * element_size;
+ static size_t OffsetOfElement(size_t index,
+ size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) {
+ DCHECK_ALIGNED_PARAM(element_size, alignment);
+ return RoundUp(offsetof(LengthPrefixedArray<T>, data), alignment) + index * element_size;
}
- // Alignment is the caller's responsibility.
- static size_t ComputeSize(size_t num_elements, size_t element_size = sizeof(T)) {
- return OffsetOfElement(num_elements, element_size);
+ static size_t ComputeSize(size_t num_elements,
+ size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) {
+ size_t result = OffsetOfElement(num_elements, element_size, alignment);
+ DCHECK_ALIGNED_PARAM(result, alignment);
+ return result;
}
uint64_t Length() const {
@@ -58,21 +66,26 @@ class LengthPrefixedArray {
}
// Update the length but does not reallocate storage.
- void SetLength(uint64_t length) {
- length_ = length;
+ void SetLength(size_t length) {
+ length_ = dchecked_integral_cast<uint32_t>(length);
}
private:
- uint64_t length_; // 64 bits for 8 byte alignment of data_.
- uint8_t data_[0];
+ T& AtUnchecked(size_t index, size_t element_size, size_t alignment) {
+ return *reinterpret_cast<T*>(
+ reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment));
+ }
+
+ uint32_t length_;
+ uint8_t data[0];
};
// Returns empty iteration range if the array is null.
template<typename T>
IterationRange<StrideIterator<T>> MakeIterationRangeFromLengthPrefixedArray(
- LengthPrefixedArray<T>* arr, size_t element_size) {
+ LengthPrefixedArray<T>* arr, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
return arr != nullptr ?
- MakeIterationRange(arr->Begin(element_size), arr->End(element_size)) :
+ MakeIterationRange(arr->Begin(element_size, alignment), arr->End(element_size, alignment)) :
MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 887e204a44..ac9cb09731 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -92,14 +92,18 @@ inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size)
CheckPointerSize(pointer_size);
auto* methods = GetDirectMethodsPtrUnchecked();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) {
CheckPointerSize(pointer_size);
auto* methods = GetDirectMethodsPtr();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
template<VerifyObjectFlags kVerifyFlags>
@@ -133,7 +137,9 @@ inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size
CheckPointerSize(pointer_size);
LengthPrefixedArray<ArtMethod>* methods = GetVirtualMethodsPtrUnchecked();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline PointerArray* Class::GetVTable() {
@@ -837,29 +843,31 @@ void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) {
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetDirectMethodsPtrUnchecked(),
- ArtMethod::ObjectSize(pointer_size));
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetVirtualMethodsPtrUnchecked(),
- ArtMethod::ObjectSize(pointer_size));
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFields() {
- return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtr(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtr());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetSFields() {
- return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtr(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtr());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFieldsUnchecked() {
- return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtrUnchecked(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtrUnchecked());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() {
- return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 1ca98e50d8..c337e91cf8 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -208,7 +208,7 @@ ALWAYS_INLINE static inline ArtField* FindFieldByName(
}
}
if (kIsDebugBuild) {
- for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields, sizeof(ArtField))) {
+ for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields)) {
CHECK_NE(field.GetName(), name->ToModifiedUtf8());
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d54a7a6aa8..a33e150b93 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2419,6 +2419,7 @@ class ReferenceMapVisitor : public StackVisitor {
void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = shadow_frame->GetMethod();
+ VisitDeclaringClass(m);
DCHECK(m != nullptr);
size_t num_regs = shadow_frame->NumberOfVRegs();
if (m->IsNative() || shadow_frame->HasReferenceArray()) {
@@ -2459,10 +2460,25 @@ class ReferenceMapVisitor : public StackVisitor {
}
private:
+ // Visiting the declaring class is necessary so that we don't unload the class of a method that
+ // is executing. We need to ensure that the code stays mapped.
+ void VisitDeclaringClass(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* klass = method->GetDeclaringClassNoBarrier();
+ // klass can be null for runtime methods.
+ if (klass != nullptr) {
+ mirror::Object* new_ref = klass;
+ visitor_(&new_ref, -1, this);
+ if (new_ref != klass) {
+ method->CASDeclaringClass(klass, new_ref->AsClass());
+ }
+ }
+ }
+
void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
- auto* cur_quick_frame = GetCurrentQuickFrame();
+ ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
- auto* m = *cur_quick_frame;
+ ArtMethod* m = *cur_quick_frame;
+ VisitDeclaringClass(m);
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 20512f9765..8aa1189a95 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -30,6 +30,7 @@
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
+#include "dex_instruction.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -1452,4 +1453,375 @@ std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
+static void DumpMethodCFGImpl(const DexFile* dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item,
+ std::ostream& os) {
+ os << "digraph {\n";
+ os << " # /* " << PrettyMethod(dex_method_idx, *dex_file, true) << " */\n";
+
+ std::set<uint32_t> dex_pc_is_branch_target;
+ {
+ // Go and populate.
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (inst->IsBranch()) {
+ dex_pc_is_branch_target.insert(dex_pc + inst->GetTargetOffset());
+ } else if (inst->IsSwitch()) {
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ dex_pc_is_branch_target.insert(dex_pc + offset);
+ }
+ }
+ }
+ }
+
+ // Create nodes for "basic blocks."
+ std::map<uint32_t, uint32_t> dex_pc_to_node_id; // This only has entries for block starts.
+ std::map<uint32_t, uint32_t> dex_pc_to_incl_id; // This has entries for all dex pcs.
+
+ {
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ bool first_in_block = true;
+ bool force_new_block = false;
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (dex_pc == 0 ||
+ (dex_pc_is_branch_target.find(dex_pc) != dex_pc_is_branch_target.end()) ||
+ force_new_block) {
+ uint32_t id = dex_pc_to_node_id.size();
+ if (id > 0) {
+ // End last node.
+ os << "}\"];\n";
+ }
+ // Start next node.
+ os << " node" << id << " [shape=record,label=\"{";
+ dex_pc_to_node_id.insert(std::make_pair(dex_pc, id));
+ first_in_block = true;
+ force_new_block = false;
+ }
+
+ // Register instruction.
+ dex_pc_to_incl_id.insert(std::make_pair(dex_pc, dex_pc_to_node_id.size() - 1));
+
+ // Print instruction.
+ if (!first_in_block) {
+ os << " | ";
+ } else {
+ first_in_block = false;
+ }
+
+ // Dump the instruction. Need to escape '"', '<', '>', '{' and '}'.
+ os << "<" << "p" << dex_pc << ">";
+ os << " 0x" << std::hex << dex_pc << std::dec << ": ";
+ std::string inst_str = inst->DumpString(dex_file);
+ size_t cur_start = 0; // It's OK to start at zero, instruction dumps don't start with chars
+ // we need to escape.
+ while (cur_start != std::string::npos) {
+ size_t next_escape = inst_str.find_first_of("\"{}<>", cur_start + 1);
+ if (next_escape == std::string::npos) {
+ os << inst_str.substr(cur_start, inst_str.size() - cur_start);
+ break;
+ } else {
+ os << inst_str.substr(cur_start, next_escape - cur_start);
+ // Escape all necessary characters.
+ while (next_escape < inst_str.size()) {
+ char c = inst_str.at(next_escape);
+ if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') {
+ os << '\\' << c;
+ } else {
+ break;
+ }
+ next_escape++;
+ }
+ if (next_escape >= inst_str.size()) {
+ next_escape = std::string::npos;
+ }
+ cur_start = next_escape;
+ }
+ }
+
+ // Force a new block for some fall-throughs and some instructions that terminate the "local"
+ // control flow.
+ force_new_block = inst->IsSwitch() || inst->IsBasicBlockEnd();
+ }
+ // Close last node.
+ if (dex_pc_to_node_id.size() > 0) {
+ os << "}\"];\n";
+ }
+ }
+
+ // Create edges between them.
+ {
+ std::ostringstream regular_edges;
+ std::ostringstream taken_edges;
+ std::ostringstream exception_edges;
+
+ // Common set of exception edges.
+ std::set<uint32_t> exception_targets;
+
+ // These blocks (given by the first dex pc) need exception per dex-pc handling in a second
+ // pass. In the first pass we try and see whether we can use a common set of edges.
+ std::set<uint32_t> blocks_with_detailed_exceptions;
+
+ {
+ uint32_t last_node_id = std::numeric_limits<uint32_t>::max();
+ uint32_t old_dex_pc = 0;
+ uint32_t block_start_dex_pc = std::numeric_limits<uint32_t>::max();
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ old_dex_pc = dex_pc, dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ {
+ auto it = dex_pc_to_node_id.find(dex_pc);
+ if (it != dex_pc_to_node_id.end()) {
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+
+ block_start_dex_pc = dex_pc;
+
+ // Seems to be a fall-through, connect to last_node_id. May be spurious edges for things
+ // like switch data.
+ uint32_t old_last = last_node_id;
+ last_node_id = it->second;
+ if (old_last != std::numeric_limits<uint32_t>::max()) {
+ regular_edges << " node" << old_last << ":p" << old_dex_pc
+ << " -> node" << last_node_id << ":p" << dex_pc
+ << ";\n";
+ }
+ }
+
+ // Look at the exceptions of the first entry.
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ exception_targets.insert(catch_it.GetHandlerAddress());
+ }
+ }
+
+ // Handle instruction.
+
+ // Branch: something with at most two targets.
+ if (inst->IsBranch()) {
+ const int32_t offset = inst->GetTargetOffset();
+ const bool conditional = !inst->IsUnconditional();
+
+ auto target_it = dex_pc_to_node_id.find(dex_pc + offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (dex_pc + offset)
+ << ";\n";
+ }
+ if (!conditional) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ } else if (inst->IsSwitch()) {
+ // TODO: Iterate through all switch targets.
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ /* make sure the start of the switch is in range */
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ /* offset to switch table is a relative branch-style offset */
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ /* make sure the end of the switch is in range */
+ /* verify each switch target */
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ int32_t abs_offset = dex_pc + offset;
+ auto target_it = dex_pc_to_node_id.find(abs_offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ // TODO: value label.
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (abs_offset)
+ << ";\n";
+ }
+ }
+ }
+
+ // Exception edges. If this is not the first instruction in the block
+ if (block_start_dex_pc != dex_pc) {
+ std::set<uint32_t> current_handler_pcs;
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ current_handler_pcs.insert(catch_it.GetHandlerAddress());
+ }
+ if (current_handler_pcs != exception_targets) {
+ exception_targets.clear(); // Clear so we don't do something at the end.
+ blocks_with_detailed_exceptions.insert(block_start_dex_pc);
+ }
+ }
+
+ if (inst->IsReturn() ||
+ (inst->Opcode() == Instruction::THROW) ||
+ (inst->IsBranch() && inst->IsUnconditional())) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ }
+ // Finish up the last block, if it had common exceptions.
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+ }
+
+ // Second pass for detailed exception blocks.
+ // TODO
+ // Exception edges. If this is not the first instruction in the block
+ for (uint32_t dex_pc : blocks_with_detailed_exceptions) {
+ const Instruction* inst = Instruction::At(&code_item->insns_[dex_pc]);
+ uint32_t this_node_id = dex_pc_to_incl_id.find(dex_pc)->second;
+ while (true) {
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ if (catch_it.HasNext()) {
+ std::set<uint32_t> handled_targets;
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ uint32_t handler_pc = catch_it.GetHandlerAddress();
+ auto it = handled_targets.find(handler_pc);
+ if (it == handled_targets.end()) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << this_node_id << ":p" << dex_pc
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+
+ // Mark as done.
+ handled_targets.insert(handler_pc);
+ }
+ }
+ }
+ if (inst->IsBasicBlockEnd()) {
+ break;
+ }
+
+ // Loop update. Have a break-out if the next instruction is a branch target and thus in
+ // another block.
+ dex_pc += inst->SizeInCodeUnits();
+ if (dex_pc >= code_item->insns_size_in_code_units_) {
+ break;
+ }
+ if (dex_pc_to_node_id.find(dex_pc) != dex_pc_to_node_id.end()) {
+ break;
+ }
+ inst = inst->Next();
+ }
+ }
+
+ // Write out the sub-graphs to make edges styled.
+ os << "\n";
+ os << " subgraph regular_edges {\n";
+ os << " edge [color=\"#000000\",weight=.3,len=3];\n\n";
+ os << " " << regular_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph taken_edges {\n";
+ os << " edge [color=\"#00FF00\",weight=.3,len=3];\n\n";
+ os << " " << taken_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph exception_edges {\n";
+ os << " edge [color=\"#FF0000\",weight=.3,len=3];\n\n";
+ os << " " << exception_edges.str() << "\n";
+ os << " }\n\n";
+ }
+
+ os << "}\n";
+}
+
+void DumpMethodCFG(ArtMethod* method, std::ostream& os) {
+ const DexFile* dex_file = method->GetDexFile();
+ const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+
+ DumpMethodCFGImpl(dex_file, method->GetDexMethodIndex(), code_item, os);
+}
+
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) {
+ // This is painful, we need to find the code item. That means finding the class, and then
+ // iterating the table.
+ if (dex_method_idx >= dex_file->NumMethodIds()) {
+ os << "Could not find method-idx.";
+ return;
+ }
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
+
+ const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_);
+ if (class_def == nullptr) {
+ os << "Could not find class-def.";
+ return;
+ }
+
+ const uint8_t* class_data = dex_file->GetClassData(*class_def);
+ if (class_data == nullptr) {
+ os << "No class data.";
+ return;
+ }
+
+ ClassDataItemIterator it(*dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ it.Next();
+ }
+
+ // Find method, and dump it.
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == dex_method_idx) {
+ DumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os);
+ return;
+ }
+ it.Next();
+ }
+
+ // Otherwise complain.
+ os << "Something went wrong, didn't find the method in the class data.";
+}
+
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index 4fa5f5a539..d1be51aff7 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -324,6 +324,9 @@ static inline constexpr bool ValidPointerSize(size_t pointer_size) {
return pointer_size == 4 || pointer_size == 8;
}
+void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 16615340bd..1828b91e2a 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -53,6 +53,9 @@ static constexpr bool kTimeVerifyMethod = !kIsDebugBuild;
static constexpr bool gDebugVerify = false;
// TODO: Add a constant to method_verifier to turn on verbose logging?
+// On VLOG(verifier), should we dump the whole state when we run into a hard failure?
+static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
+
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
MethodVerifier* verifier) {
@@ -638,6 +641,12 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
have_pending_hard_failure_ = true;
+ if (VLOG_IS_ON(verifier) && kDumpRegLinesOnHardFailureIfVLOG) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::ostringstream oss;
+ Dump(oss);
+ LOG(ERROR) << oss.str();
+ }
break;
}
}
@@ -1034,8 +1043,8 @@ bool MethodVerifier::CheckArrayData(uint32_t cur_offset) {
DCHECK_LT(cur_offset, insn_count);
/* make sure the start of the array data table is in range */
- array_data_offset = insns[1] | (((int32_t) insns[2]) << 16);
- if ((int32_t) cur_offset + array_data_offset < 0 ||
+ array_data_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ if (static_cast<int32_t>(cur_offset) + array_data_offset < 0 ||
cur_offset + array_data_offset + 2 >= insn_count) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset
<< ", data offset " << array_data_offset
@@ -1147,8 +1156,9 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
DCHECK_LT(cur_offset, insn_count);
const uint16_t* insns = code_item_->insns_ + cur_offset;
/* make sure the start of the switch is in range */
- int32_t switch_offset = insns[1] | ((int32_t) insns[2]) << 16;
- if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 > insn_count) {
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ if (static_cast<int32_t>(cur_offset) + switch_offset < 0 ||
+ cur_offset + switch_offset + 2 > insn_count) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset
<< ", switch offset " << switch_offset
<< ", count " << insn_count;
@@ -1204,8 +1214,9 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
if (keys_offset > 0 && switch_count > 1) {
int32_t last_key = switch_insns[keys_offset] | (switch_insns[keys_offset + 1] << 16);
for (uint32_t targ = 1; targ < switch_count; targ++) {
- int32_t key = (int32_t) switch_insns[keys_offset + targ * 2] |
- (int32_t) (switch_insns[keys_offset + targ * 2 + 1] << 16);
+ int32_t key =
+ static_cast<int32_t>(switch_insns[keys_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[keys_offset + targ * 2 + 1] << 16);
if (key <= last_key) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid packed switch: last key=" << last_key
<< ", this=" << key;
@@ -1216,11 +1227,11 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
}
/* verify each switch target */
for (uint32_t targ = 0; targ < switch_count; targ++) {
- int32_t offset = (int32_t) switch_insns[targets_offset + targ * 2] |
- (int32_t) (switch_insns[targets_offset + targ * 2 + 1] << 16);
+ int32_t offset = static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
- abs_offset >= (int32_t) insn_count ||
+ abs_offset >= static_cast<int32_t>(insn_count) ||
!insn_flags_[abs_offset].IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
<< " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
@@ -1319,7 +1330,7 @@ void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
ScopedIndentation indent1(vios);
const Instruction* inst = Instruction::At(code_item_->insns_);
for (size_t dex_pc = 0; dex_pc < code_item_->insns_size_in_code_units_;
- dex_pc += inst->SizeInCodeUnits()) {
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
if (reg_line != nullptr) {
vios->Stream() << reg_line->Dump(this) << "\n";
@@ -1331,7 +1342,6 @@ void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
vios->Stream() << inst->DumpHex(5) << " ";
}
vios->Stream() << inst->DumpString(dex_file_) << "\n";
- inst = inst->Next();
}
}
@@ -2139,7 +2149,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
// Now verify if the element width in the table matches the element width declared in
// the array
- const uint16_t* array_data = insns + (insns[1] | (((int32_t) insns[2]) << 16));
+ const uint16_t* array_data =
+ insns + (insns[1] | (static_cast<int32_t>(insns[2]) << 16));
if (array_data[0] != Instruction::kArrayDataSignature) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid magic for array-data";
} else {
@@ -3077,7 +3088,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* just need to walk through and tag the targets.
*/
if ((opcode_flags & Instruction::kSwitch) != 0) {
- int offset_to_switch = insns[1] | (((int32_t) insns[2]) << 16);
+ int offset_to_switch = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
const uint16_t* switch_insns = insns + offset_to_switch;
int switch_count = switch_insns[1];
int offset_to_targets, targ;
@@ -3098,7 +3109,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* offsets are 32-bit, and only partly endian-swapped */
offset = switch_insns[offset_to_targets + targ * 2] |
- (((int32_t) switch_insns[offset_to_targets + targ * 2 + 1]) << 16);
+ (static_cast<int32_t>(switch_insns[offset_to_targets + targ * 2 + 1]) << 16);
abs_offset = work_insn_idx_ + offset;
DCHECK_LT(abs_offset, code_item_->insns_size_in_code_units_);
if (!CheckNotMoveExceptionOrMoveResult(code_item_->insns_, abs_offset)) {
@@ -3938,7 +3949,24 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
if (array_type.IsZero()) {
// Null array type; this code path will fail at runtime.
// Still check that the given value matches the instruction's type.
- work_line_->VerifyRegisterType(this, inst->VRegA_23x(), insn_type);
+ // Note: this is, as usual, complicated by the fact the the instruction isn't fully typed
+ // and fits multiple register types.
+ const RegType* modified_reg_type = &insn_type;
+ if ((modified_reg_type == &reg_types_.Integer()) ||
+ (modified_reg_type == &reg_types_.LongLo())) {
+ // May be integer or float | long or double. Overwrite insn_type accordingly.
+ const RegType& value_type = work_line_->GetRegisterType(this, inst->VRegA_23x());
+ if (modified_reg_type == &reg_types_.Integer()) {
+ if (&value_type == &reg_types_.Float()) {
+ modified_reg_type = &value_type;
+ }
+ } else {
+ if (&value_type == &reg_types_.DoubleLo()) {
+ modified_reg_type = &value_type;
+ }
+ }
+ }
+ work_line_->VerifyRegisterType(this, inst->VRegA_23x(), *modified_reg_type);
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
diff --git a/test/004-ThreadStress/src/Main.java b/test/004-ThreadStress/src/Main.java
index 1db7cc8385..7acd950e68 100644
--- a/test/004-ThreadStress/src/Main.java
+++ b/test/004-ThreadStress/src/Main.java
@@ -546,6 +546,9 @@ public class Main implements Runnable {
operation.perform();
i = (i + 1) % operations.length;
}
+ } catch (OutOfMemoryError e) {
+ // Catch OutOfMemoryErrors since these can cause the test to fail it they print
+ // the stack trace after "Finishing worker".
} finally {
if (DEBUG) {
System.out.println("Finishing ThreadStress Daemon for " + id);
diff --git a/test/107-int-math2/src/Main.java b/test/107-int-math2/src/Main.java
index 6a6227cee5..0c91d4438d 100644
--- a/test/107-int-math2/src/Main.java
+++ b/test/107-int-math2/src/Main.java
@@ -412,7 +412,7 @@ class Main extends IntMathBase {
*/
static int lit8Test(int x) {
- int[] results = new int[8];
+ int[] results = new int[9];
/* try to generate op-int/lit8" instructions */
results[0] = x + 10;
@@ -423,6 +423,7 @@ class Main extends IntMathBase {
results[5] = x & 10;
results[6] = x | -10;
results[7] = x ^ -10;
+ results[8] = x * -256;
int minInt = -2147483648;
int result = minInt / -1;
if (result != minInt) {return 1; }
@@ -434,6 +435,7 @@ class Main extends IntMathBase {
if (results[5] != 8) {return 7; }
if (results[6] != -1) {return 8; }
if (results[7] != 55563) {return 9; }
+ if (results[8] != 14222080) {return 10; }
return 0;
}
diff --git a/test/526-long-regalloc/expected.txt b/test/526-long-regalloc/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/526-long-regalloc/expected.txt
diff --git a/test/526-long-regalloc/info.txt b/test/526-long-regalloc/info.txt
new file mode 100644
index 0000000000..a5ce1bc011
--- /dev/null
+++ b/test/526-long-regalloc/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing that used to trip when allocating a register
+pair under certain circumstances.
diff --git a/test/526-long-regalloc/src/Main.java b/test/526-long-regalloc/src/Main.java
new file mode 100644
index 0000000000..e8b3096d06
--- /dev/null
+++ b/test/526-long-regalloc/src/Main.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public static void main(String[] args) {
+ foo();
+ }
+
+ public static void foo() {
+ int a = myField1; // esi
+ int b = myField2; // edi
+ $noinline$bar(); // makes allocation of a and b to be callee-save registers
+ int c = myField3; // ecx
+ int e = myField4; // ebx
+ int f = myField5; // edx
+ long d = a == 42 ? myLongField1 : 42L; // Will call AllocateBlockedReg -> edx/ebx
+
+ // At this point, the register allocator used to be in a bogus state, where the low
+ // part of the interval was in the active set, but not the high part.
+
+ long i = myLongField1; // Will call TrySplitNonPairOrUnalignedPairIntervalAt -> Failing DCHECK
+
+ // Use esi and edi first to not have d allocated to them.
+ myField2 = a;
+ myField3 = b;
+
+ // The following sequence of instructions are making the AllocateBlockedReg call
+ // for allocating the d variable misbehave: allocation of the low interval would split
+ // both low and high interval at the fixed use; therefore the allocation of the high interval
+ // would not see the register use, and think the interval can just be spilled and not be
+ // put in the active set, even though it is holding a register.
+ myField1 = (int)d; // stack use
+ myLongField3 = (long) myField2; // edx fixed use
+ myLongField2 = d; // register use
+
+ // Ensure the HInstruction mapping to i, c, e, and f have a live range.
+ myLongField1 = i;
+ myField4 = c;
+ myField5 = e;
+ myField6 = f;
+ }
+
+ public static long $noinline$bar() {
+ if (doThrow) throw new Error();
+ return 42;
+ }
+
+ public static boolean doThrow = false;
+
+ public static int myField1 = 0;
+ public static int myField2 = 0;
+ public static int myField3 = 0;
+ public static int myField4 = 0;
+ public static int myField5 = 0;
+ public static int myField6 = 0;
+ public static long myLongField1 = 0L;
+ public static long myLongField2 = 0L;
+ public static long myLongField3 = 0L;
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index e2101a66e5..dd37cdbaf5 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -39,4 +39,6 @@ b/22411633 (5)
b/22777307
b/22881413
b/20843113
+b/23201502 (float)
+b/23201502 (double)
Done!
diff --git a/test/800-smali/smali/b_23201502.smali b/test/800-smali/smali/b_23201502.smali
new file mode 100644
index 0000000000..d958938abf
--- /dev/null
+++ b/test/800-smali/smali/b_23201502.smali
@@ -0,0 +1,23 @@
+.class public LB23201502;
+
+.super Ljava/lang/Object;
+
+.method public static runFloat()V
+ .registers 3
+ const v0, 0 # Null array.
+ const v1, 0 # 0 index into array.
+ const v2, 0 # 0 value, will be turned into float.
+ int-to-float v2, v2 # Definitely make v2 float.
+ aput v2 , v0, v1 # Put into null array.
+ return-void
+.end method
+
+.method public static runDouble()V
+ .registers 4
+ const v0, 0 # Null array.
+ const v1, 0 # 0 index into array.
+ const v2, 0 # 0 value, will be turned into double.
+ int-to-double v2, v2 # Definitely make v2+v3 double.
+ aput-wide v2 , v0, v1 # Put into null array.
+ return-void
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 3c88040f0f..b481a1dbc4 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -123,6 +123,10 @@ public class Main {
null));
testCases.add(new TestCase("b/22881413", "B22881413", "run", null, null, null));
testCases.add(new TestCase("b/20843113", "B20843113", "run", null, null, null));
+ testCases.add(new TestCase("b/23201502 (float)", "B23201502", "runFloat", null,
+ new NullPointerException(), null));
+ testCases.add(new TestCase("b/23201502 (double)", "B23201502", "runDouble", null,
+ new NullPointerException(), null));
}
public void runTests() {