Merge "Improve the graph visualizer's output for constant locations."
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index d4dd978..2471f79 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -31,10 +31,6 @@
}
uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
- // NOTE: The final thunk can be reserved from InitCodeMethodVisitor::EndClass() while it
- // may be written early by WriteCodeMethodVisitor::VisitMethod() for a deduplicated chunk
- // of code. To avoid any alignment discrepancies for the final chunk, we always align the
- // offset after reserving of writing any chunk.
uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset,
MethodReference(nullptr, 0u),
@@ -46,7 +42,7 @@
unprocessed_patches_.clear();
thunk_locations_.push_back(aligned_offset);
- offset = CompiledMethod::AlignCode(aligned_offset + thunk_code_.size(), instruction_set_);
+ offset = aligned_offset + thunk_code_.size();
}
return offset;
}
@@ -65,13 +61,7 @@
if (UNLIKELY(!WriteRelCallThunk(out, ArrayRef<const uint8_t>(thunk_code_)))) {
return 0u;
}
- uint32_t thunk_end_offset = aligned_offset + thunk_code_.size();
- // Align after writing chunk, see the ReserveSpace() above.
- offset = CompiledMethod::AlignCode(thunk_end_offset, instruction_set_);
- aligned_code_delta = offset - thunk_end_offset;
- if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) {
- return 0u;
- }
+ offset = aligned_offset + thunk_code_.size();
}
return offset;
}
@@ -92,7 +82,7 @@
MethodReference method_ref,
uint32_t max_extra_space) {
uint32_t quick_code_size = compiled_method->GetQuickCode().size();
- uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
+ uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size);
// Adjust for extra space required by the subclass.
next_aligned_offset = compiled_method->AlignCode(next_aligned_offset + max_extra_space);
@@ -106,9 +96,9 @@
if (needs_thunk) {
// A single thunk will cover all pending patches.
unprocessed_patches_.clear();
- uint32_t thunk_location = compiled_method->AlignCode(offset);
+ uint32_t thunk_location = CompiledMethod::AlignCode(offset, instruction_set_);
thunk_locations_.push_back(thunk_location);
- offset = CompiledMethod::AlignCode(thunk_location + thunk_code_.size(), instruction_set_);
+ offset = thunk_location + thunk_code_.size();
}
}
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index a8078e3..eace3d4 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -48,18 +48,18 @@
const ArrayRef<const LinkerPatch>& method3_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArmAlignment, 0u);
- const uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kThumb2) + sizeof(OatQuickMethodHeader);
+ uint32_t method1_offset =
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
// We want to put the method3 at a very precise offset.
const uint32_t method3_offset = method1_offset + distance_without_thunks;
- CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment);
+ CHECK_ALIGNED(method3_offset, kArmAlignment);
// Calculate size of method2 so that we put method3 at the correct place.
+ const uint32_t method1_end = method1_offset + method1_code.size();
const uint32_t method2_offset =
- CompiledCode::AlignCode(method1_offset + method1_code.size(), kThumb2) +
- sizeof(OatQuickMethodHeader);
+ method1_end + CodeAlignmentSize(method1_end) + sizeof(OatQuickMethodHeader);
const uint32_t method2_size = (method3_offset - sizeof(OatQuickMethodHeader) - method2_offset);
std::vector<uint8_t> method2_raw_code(method2_size);
ArrayRef<const uint8_t> method2_code(method2_raw_code);
@@ -78,8 +78,11 @@
if (result3.second == method3_offset + 1 /* thumb mode */) {
return false; // No thunk.
} else {
- uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kThumb2);
- CHECK_EQ(result3.second, method3_offset + aligned_thunk_size + 1 /* thumb mode */);
+ uint32_t thunk_end =
+ CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) +
+ ThunkSize();
+ uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
+ CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */);
return true; // Thunk present.
}
}
@@ -352,9 +355,12 @@
uint32_t method1_offset = GetMethodOffset(1u);
uint32_t method3_offset = GetMethodOffset(3u);
+ ASSERT_TRUE(IsAligned<kArmAlignment>(method3_offset));
uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader);
- ASSERT_TRUE(IsAligned<kArmAlignment>(method3_header_offset));
- uint32_t thunk_offset = method3_header_offset - CompiledCode::AlignCode(ThunkSize(), kThumb2);
+ uint32_t thunk_offset =
+ RoundDown(method3_header_offset - ThunkSize(), GetInstructionSetAlignment(kThumb2));
+ DCHECK_EQ(thunk_offset + ThunkSize() + CodeAlignmentSize(thunk_offset + ThunkSize()),
+ method3_header_offset);
ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset));
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1 + 4u /* PC adjustment */);
ASSERT_EQ(diff & 1u, 0u);
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index fdd14be..4c8788e 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -83,7 +83,7 @@
// Now that we have the actual offset where the code will be placed, locate the ADRP insns
// that actually require the thunk.
- uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
+ uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size());
DCHECK(compiled_method != nullptr);
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 09729fd..573de73 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -67,36 +67,39 @@
const ArrayRef<const LinkerPatch>& last_method_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
- const uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ uint32_t method1_offset =
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
- const uint32_t gap_start =
- CompiledCode::AlignCode(method1_offset + method1_code.size(), kArm64);
+ const uint32_t gap_start = method1_offset + method1_code.size();
// We want to put the method3 at a very precise offset.
const uint32_t last_method_offset = method1_offset + distance_without_thunks;
+ CHECK_ALIGNED(last_method_offset, kArm64Alignment);
const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
- CHECK_ALIGNED(gap_end, kArm64Alignment);
- // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB).
+ // Fill the gap with intermediate methods in chunks of 2MiB and the first in [2MiB, 4MiB).
// (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
- // offsets by this test.)
+ // offsets by this test. Making the first chunk bigger makes it easy to give all intermediate
+ // methods the same alignment of the end, so the thunk insertion adds a predictable size as
+ // long as it's after the first chunk.)
uint32_t method_idx = 2u;
constexpr uint32_t kSmallChunkSize = 2 * MB;
std::vector<uint8_t> gap_code;
- size_t gap_size = gap_end - gap_start;
- for (; gap_size >= 2u * kSmallChunkSize; gap_size -= kSmallChunkSize) {
- uint32_t chunk_code_size = kSmallChunkSize - sizeof(OatQuickMethodHeader);
+ uint32_t gap_size = gap_end - gap_start;
+ uint32_t num_small_chunks = std::max(gap_size / kSmallChunkSize, 1u) - 1u;
+ uint32_t chunk_start = gap_start;
+ uint32_t chunk_size = gap_size - num_small_chunks * kSmallChunkSize;
+ for (uint32_t i = 0; i <= num_small_chunks; ++i) { // num_small_chunks+1 iterations.
+ uint32_t chunk_code_size =
+ chunk_size - CodeAlignmentSize(chunk_start) - sizeof(OatQuickMethodHeader);
gap_code.resize(chunk_code_size, 0u);
AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
ArrayRef<const LinkerPatch>());
method_idx += 1u;
+ chunk_start += chunk_size;
+ chunk_size = kSmallChunkSize; // For all but the first chunk.
+ DCHECK_EQ(CodeAlignmentSize(gap_end), CodeAlignmentSize(chunk_start));
}
- uint32_t chunk_code_size = gap_size - sizeof(OatQuickMethodHeader);
- gap_code.resize(chunk_code_size, 0u);
- AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
- ArrayRef<const LinkerPatch>());
- method_idx += 1u;
// Add the last method and link
AddCompiledMethod(MethodRef(method_idx), last_method_code, last_method_patches);
@@ -109,8 +112,9 @@
// There may be a thunk before method2.
if (last_result.second != last_method_offset) {
// Thunk present. Check that there's only one.
- uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kArm64);
- CHECK_EQ(last_result.second, last_method_offset + aligned_thunk_size);
+ uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + ThunkSize();
+ uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
+ CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
}
return method_idx;
}
@@ -341,7 +345,7 @@
uint32_t dex_cache_arrays_begin,
uint32_t element_offset) {
uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
ASSERT_LT(method1_offset, adrp_offset);
CHECK_ALIGNED(adrp_offset, 4u);
uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
@@ -391,7 +395,7 @@
bool has_thunk,
uint32_t string_offset) {
uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
ASSERT_LT(method1_offset, adrp_offset);
CHECK_ALIGNED(adrp_offset, 4u);
uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
@@ -614,10 +618,12 @@
uint32_t method1_offset = GetMethodOffset(1u);
uint32_t last_method_offset = GetMethodOffset(last_method_idx);
+ ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
- ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_header_offset));
- uint32_t thunk_offset = last_method_header_offset - CompiledCode::AlignCode(ThunkSize(), kArm64);
- ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset));
+ uint32_t thunk_offset =
+ RoundDown(last_method_header_offset - ThunkSize(), GetInstructionSetAlignment(kArm64));
+ DCHECK_EQ(thunk_offset + ThunkSize() + CodeAlignmentSize(thunk_offset + ThunkSize()),
+ last_method_header_offset);
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
CHECK_ALIGNED(diff, 4u);
ASSERT_LT(diff, 128 * MB);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index ec69107..d21f33e 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -98,6 +98,14 @@
patches));
}
+ uint32_t CodeAlignmentSize(uint32_t header_offset_to_align) {
+ // We want to align the code rather than the preheader.
+ uint32_t unaligned_code_offset = header_offset_to_align + sizeof(OatQuickMethodHeader);
+ uint32_t aligned_code_offset =
+ CompiledMethod::AlignCode(unaligned_code_offset, instruction_set_);
+ return aligned_code_offset - unaligned_code_offset;
+ }
+
void Link() {
// Reserve space.
static_assert(kTrampolineOffset == 0u, "Unexpected trampoline offset.");
@@ -106,9 +114,8 @@
for (auto& compiled_method : compiled_methods_) {
offset = patcher_->ReserveSpace(offset, compiled_method.get(), compiled_method_refs_[idx]);
- uint32_t aligned_offset = compiled_method->AlignCode(offset);
- uint32_t aligned_code_delta = aligned_offset - offset;
- offset += aligned_code_delta;
+ uint32_t alignment_size = CodeAlignmentSize(offset);
+ offset += alignment_size;
offset += sizeof(OatQuickMethodHeader);
uint32_t quick_code_offset = offset + compiled_method->CodeDelta();
@@ -136,11 +143,10 @@
for (auto& compiled_method : compiled_methods_) {
offset = patcher_->WriteThunks(&out_, offset);
- uint32_t aligned_offset = compiled_method->AlignCode(offset);
- uint32_t aligned_code_delta = aligned_offset - offset;
- CHECK_LE(aligned_code_delta, sizeof(kPadding));
- out_.WriteFully(kPadding, aligned_code_delta);
- offset += aligned_code_delta;
+ uint32_t alignment_size = CodeAlignmentSize(offset);
+ CHECK_LE(alignment_size, sizeof(kPadding));
+ out_.WriteFully(kPadding, alignment_size);
+ offset += alignment_size;
out_.WriteFully(dummy_header, sizeof(OatQuickMethodHeader));
offset += sizeof(OatQuickMethodHeader);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index f20c715..8273b15 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -87,6 +87,13 @@
OatHeader* const oat_header_;
};
+inline uint32_t CodeAlignmentSize(uint32_t header_offset, const CompiledMethod& compiled_method) {
+ // We want to align the code rather than the preheader.
+ uint32_t unaligned_code_offset = header_offset + sizeof(OatQuickMethodHeader);
+ uint32_t aligned_code_offset = compiled_method.AlignCode(unaligned_code_offset);
+ return aligned_code_offset - unaligned_code_offset;
+}
+
} // anonymous namespace
// Defines the location of the raw dex file to write.
@@ -817,8 +824,8 @@
uint32_t thumb_offset) {
offset_ = writer_->relative_patcher_->ReserveSpace(
offset_, compiled_method, MethodReference(dex_file_, it.GetMemberIndex()));
- offset_ = compiled_method->AlignCode(offset_);
- DCHECK_ALIGNED_PARAM(offset_,
+ offset_ += CodeAlignmentSize(offset_, *compiled_method);
+ DCHECK_ALIGNED_PARAM(offset_ + sizeof(OatQuickMethodHeader),
GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
return offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
}
@@ -1011,17 +1018,16 @@
ReportWriteFailure("relative call thunk", it);
return false;
}
- uint32_t aligned_offset = compiled_method->AlignCode(offset_);
- uint32_t aligned_code_delta = aligned_offset - offset_;
- if (aligned_code_delta != 0) {
- if (!writer_->WriteCodeAlignment(out, aligned_code_delta)) {
+ uint32_t alignment_size = CodeAlignmentSize(offset_, *compiled_method);
+ if (alignment_size != 0) {
+ if (!writer_->WriteCodeAlignment(out, alignment_size)) {
ReportWriteFailure("code alignment padding", it);
return false;
}
- offset_ += aligned_code_delta;
+ offset_ += alignment_size;
DCHECK_OFFSET_();
}
- DCHECK_ALIGNED_PARAM(offset_,
+ DCHECK_ALIGNED_PARAM(offset_ + sizeof(OatQuickMethodHeader),
GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
DCHECK_EQ(method_offsets.code_offset_,
offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta())
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c18b793..cd7a90e 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1289,6 +1289,44 @@
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
+void InstructionCodeGeneratorARM::GenerateVcmp(HInstruction* instruction) {
+ Primitive::Type type = instruction->InputAt(0)->GetType();
+ Location lhs_loc = instruction->GetLocations()->InAt(0);
+ Location rhs_loc = instruction->GetLocations()->InAt(1);
+ if (rhs_loc.IsConstant()) {
+ // 0.0 is the only immediate that can be encoded directly in
+ // a VCMP instruction.
+ //
+ // Both the JLS (section 15.20.1) and the JVMS (section 6.5)
+ // specify that in a floating-point comparison, positive zero
+ // and negative zero are considered equal, so we can use the
+ // literal 0.0 for both cases here.
+ //
+ // Note however that some methods (Float.equal, Float.compare,
+ // Float.compareTo, Double.equal, Double.compare,
+ // Double.compareTo, Math.max, Math.min, StrictMath.max,
+ // StrictMath.min) consider 0.0 to be (strictly) greater than
+ // -0.0. So if we ever translate calls to these methods into a
+ // HCompare instruction, we must handle the -0.0 case with
+ // care here.
+ DCHECK(rhs_loc.GetConstant()->IsArithmeticZero());
+ if (type == Primitive::kPrimFloat) {
+ __ vcmpsz(lhs_loc.AsFpuRegister<SRegister>());
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ vcmpdz(FromLowSToD(lhs_loc.AsFpuRegisterPairLow<SRegister>()));
+ }
+ } else {
+ if (type == Primitive::kPrimFloat) {
+ __ vcmps(lhs_loc.AsFpuRegister<SRegister>(), rhs_loc.AsFpuRegister<SRegister>());
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ vcmpd(FromLowSToD(lhs_loc.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(rhs_loc.AsFpuRegisterPairLow<SRegister>()));
+ }
+ }
+}
+
void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label ATTRIBUTE_UNUSED) {
@@ -1389,22 +1427,14 @@
Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
- LocationSummary* locations = condition->GetLocations();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
-
Primitive::Type type = condition->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong:
GenerateLongComparesAndJumps(condition, true_target, false_target);
break;
case Primitive::kPrimFloat:
- __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
- GenerateFPJumps(condition, true_target, false_target);
- break;
case Primitive::kPrimDouble:
- __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
- FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateVcmp(condition);
GenerateFPJumps(condition, true_target, false_target);
break;
default:
@@ -1585,7 +1615,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -1632,12 +1662,8 @@
GenerateLongComparesAndJumps(cond, &true_label, &false_label);
break;
case Primitive::kPrimFloat:
- __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
- GenerateFPJumps(cond, &true_label, &false_label);
- break;
case Primitive::kPrimDouble:
- __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
- FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateVcmp(cond);
GenerateFPJumps(cond, &true_label, &false_label);
break;
}
@@ -3654,7 +3680,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(compare->InputAt(1)));
locations->SetOut(Location::RequiresRegister());
break;
}
@@ -3699,12 +3725,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
__ LoadImmediate(out, 0);
- if (type == Primitive::kPrimFloat) {
- __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
- } else {
- __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
- FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
- }
+ GenerateVcmp(compare);
__ vmstat(); // transfer FP status register to ARM APSR.
less_cond = ARMFPCondition(kCondLT, compare->IsGtBias());
break;
@@ -3998,6 +4019,17 @@
}
}
+Location LocationsBuilderARM::ArithmeticZeroOrFpuRegister(HInstruction* input) {
+ DCHECK(input->GetType() == Primitive::kPrimDouble || input->GetType() == Primitive::kPrimFloat)
+ << input->GetType();
+ if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) ||
+ (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) {
+ return Location::ConstantLocation(input->AsConstant());
+ } else {
+ return Location::RequiresFpuRegister();
+ }
+}
+
Location LocationsBuilderARM::ArmEncodableConstantOrRegister(HInstruction* constant,
Opcode opcode) {
DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index f9fcabd..fa7709b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -180,6 +180,7 @@
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ Location ArithmeticZeroOrFpuRegister(HInstruction* input);
Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode);
@@ -281,6 +282,7 @@
void GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target,
Label* false_target);
+ void GenerateVcmp(HInstruction* instruction);
void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 240936c..1b5fa85 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -243,7 +243,7 @@
}
Arm64Assembler* GetAssembler() const { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
private:
void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
@@ -364,7 +364,7 @@
private:
Arm64Assembler* GetAssembler() const;
vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
- return GetAssembler()->vixl_masm_;
+ return GetAssembler()->GetVIXLAssembler();
}
CodeGeneratorARM64* const codegen_;
@@ -413,7 +413,7 @@
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
// Emit a write barrier.
void MarkGCCard(vixl::aarch64::Register object,
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 06d1148..e3a9d27 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -26,7 +26,6 @@
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
-#include "utils/arm64/constants_arm64.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
@@ -62,14 +61,14 @@
} // namespace
MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() {
- return codegen_->GetAssembler()->vixl_masm_;
+ return codegen_->GetVIXLAssembler();
}
ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
-#define __ codegen->GetAssembler()->vixl_masm_->
+#define __ codegen->GetVIXLAssembler()->
static void MoveFromReturnRegister(Location trg,
Primitive::Type type,
@@ -782,7 +781,7 @@
DCHECK((type == Primitive::kPrimInt) ||
(type == Primitive::kPrimLong) ||
(type == Primitive::kPrimNot));
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Location base_loc = locations->InAt(1);
Register base = WRegisterFrom(base_loc); // Object pointer.
Location offset_loc = locations->InAt(2);
@@ -916,7 +915,7 @@
bool is_volatile,
bool is_ordered,
CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
@@ -1035,7 +1034,7 @@
}
static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register out = WRegisterFrom(locations->Out()); // Boolean result.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index d82caf5..dc1f24a 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -28,7 +28,7 @@
#ifdef ___
#error "ARM64 Assembler macro already defined."
#else
-#define ___ vixl_masm_->
+#define ___ vixl_masm_.
#endif
void Arm64Assembler::FinalizeCode() {
@@ -39,16 +39,16 @@
}
size_t Arm64Assembler::CodeSize() const {
- return vixl_masm_->GetBufferCapacity() - vixl_masm_->GetRemainingBufferSpace();
+ return vixl_masm_.GetBufferCapacity() - vixl_masm_.GetRemainingBufferSpace();
}
const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
- return vixl_masm_->GetStartAddress<uint8_t*>();
+ return vixl_masm_.GetStartAddress<uint8_t*>();
}
void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
// Copy the instructions from the buffer.
- MemoryRegion from(vixl_masm_->GetStartAddress<void*>(), CodeSize());
+ MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
region.CopyFrom(0, from);
}
@@ -86,7 +86,7 @@
} else {
// temp = rd + value
// rd = cond ? temp : rn
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(rd), reg_x(rn));
Register temp = temps.AcquireX();
___ Add(temp, reg_x(rn), value);
@@ -183,7 +183,7 @@
}
void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
@@ -207,7 +207,7 @@
// temp = value
// rd = cond ? temp : rd
if (value != 0) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dest));
Register temp = temps.AcquireX();
___ Mov(temp, value);
@@ -314,7 +314,7 @@
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
// Remove dst and base form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
@@ -528,7 +528,7 @@
CHECK(base.IsXRegister()) << base;
CHECK(scratch.IsXRegister()) << scratch;
// Remove base and scratch form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
___ Br(reg_x(scratch.AsXRegister()));
@@ -621,7 +621,7 @@
}
void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
Register temp = temps.AcquireX();
@@ -653,7 +653,7 @@
void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Since we are operating on register pairs, we would like to align on
// double the standard size; on the other hand, we don't want to insert
// an extra store, which will happen if the number of registers is even.
@@ -681,7 +681,7 @@
void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Be consistent with the logic for spilling registers.
if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
const CPURegister& dst0 = registers.PopLowestIndex();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 24b7982..b8434b9 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -23,7 +23,6 @@
#include "base/arena_containers.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
#include "offsets.h"
@@ -84,16 +83,13 @@
class Arm64Assembler FINAL : public Assembler {
public:
- // We indicate the size of the initial code generation buffer to the VIXL
- // assembler. From there we it will automatically manage the buffer.
explicit Arm64Assembler(ArenaAllocator* arena)
: Assembler(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
- vixl_masm_(new vixl::aarch64::MacroAssembler(kArm64BaseBufferSize)) {}
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
- virtual ~Arm64Assembler() {
- delete vixl_masm_;
- }
+ virtual ~Arm64Assembler() {}
+
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
// Finalize the code.
void FinalizeCode() OVERRIDE;
@@ -287,9 +283,8 @@
// List of exception blocks to generate at the end of the code cache.
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
- public:
- // Vixl assembler.
- vixl::aarch64::MacroAssembler* const vixl_masm_;
+ // VIXL assembler.
+ vixl::aarch64::MacroAssembler vixl_masm_;
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
deleted file mode 100644
index 01e8be9..0000000
--- a/compiler/utils/arm64/constants_arm64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-#define ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-
-#include <stdint.h>
-#include <iosfwd>
-#include "arch/arm64/registers_arm64.h"
-#include "base/casts.h"
-#include "base/logging.h"
-#include "globals.h"
-
-// TODO: Extend this file by adding missing functionality.
-
-namespace art {
-namespace arm64 {
-
-constexpr size_t kArm64BaseBufferSize = 4096;
-
-} // namespace arm64
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index f7d74d2..7378a0a 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
#define ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
+#include "arch/arm64/registers_arm64.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 6c43e86..4f0e144 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -941,17 +941,11 @@
opcode << (op != 0 ? "vsqrt" : "vneg") << (S != 0 ? ".f64" : ".f32");
args << d << ", " << m;
} else if (op5 == 4) {
- opcode << "vcmp" << (S != 0 ? ".f64" : ".f32");
+ opcode << "vcmp" << ((op != 0) ? "e" : "") << (S != 0 ? ".f64" : ".f32");
args << d << ", " << m;
- if (op != 0) {
- args << " (quiet nan)";
- }
} else if (op5 == 5) {
- opcode << "vcmpe" << (S != 0 ? ".f64" : ".f32");
+ opcode << "vcmp" << ((op != 0) ? "e" : "") << (S != 0 ? ".f64" : ".f32");
args << d << ", #0.0";
- if (op != 0) {
- args << " (quiet nan)";
- }
if ((instr & 0x2f) != 0) {
args << " (UNPREDICTABLE)";
}
diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc
index 06d6211..0a9ab7a 100644
--- a/runtime/arch/mips/thread_mips.cc
+++ b/runtime/arch/mips/thread_mips.cc
@@ -25,7 +25,7 @@
void Thread::InitCpu() {
CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 16087a5..061babd 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -63,7 +63,9 @@
{'0', '3', '5', '\0'},
// Dex version 036 skipped because of an old dalvik bug on some versions of android where dex
// files with that version number would erroneously be accepted and run.
- {'0', '3', '7', '\0'}
+ {'0', '3', '7', '\0'},
+ // Dex version 038: Android "O" and beyond.
+ {'0', '3', '8', '\0'}
};
bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 3dffe4b..2eca495 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -63,7 +63,7 @@
static const uint32_t kClassDefinitionOrderEnforcedVersion = 37;
static const uint8_t kDexMagic[];
- static constexpr size_t kNumDexVersions = 2;
+ static constexpr size_t kNumDexVersions = 3;
static constexpr size_t kDexVersionLen = 4;
static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 4f8e6f1..616c2a0 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -133,8 +133,40 @@
"AAACAAAAQAEAAAEgAAACAAAAVAEAAAYgAAACAAAAiAEAAAEQAAABAAAAqAEAAAIgAAAPAAAArgEA"
"AAMgAAACAAAAiAIAAAQgAAADAAAAlAIAAAAgAAACAAAAqwIAAAAQAAABAAAAxAIAAA==";
-static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
- const char* location) {
+// kRawDex38 and 39 are dex'ed versions of the following Java source :
+//
+// public class Main {
+// public static void main(String[] foo) {
+// }
+// }
+//
+// The dex file was manually edited to change its dex version code to 38
+// or 39, respectively.
+static const char kRawDex38[] =
+ "ZGV4CjAzOAC4OovJlJ1089ikzK6asMf/f8qp3Kve5VsgAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAwAQAA8AAAACIB"
+ "AAAqAQAAMgEAAEYBAABRAQAAVAEAAFgBAABtAQAAAQAAAAIAAAAEAAAABgAAAAQAAAACAAAAAAAA"
+ "AAUAAAACAAAAHAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAADAAAA"
+ "AAAAAH4BAAAAAAAAAQABAAEAAABzAQAABAAAAHAQAgAAAA4AAQABAAAAAAB4AQAAAQAAAA4AAAAB"
+ "AAAAAwAGPGluaXQ+AAZMTWFpbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwAJTWFpbi5qYXZhAAFWAAJW"
+ "TAATW0xqYXZhL2xhbmcvU3RyaW5nOwAEbWFpbgABAAcOAAMBAAcOAAAAAgAAgYAE8AEBCYgCDAAA"
+ "AAAAAAABAAAAAAAAAAEAAAAIAAAAcAAAAAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAA"
+ "uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
+ "AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+
+static const char kRawDex39[] =
+ "ZGV4CjAzOQC4OovJlJ1089ikzK6asMf/f8qp3Kve5VsgAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAwAQAA8AAAACIB"
+ "AAAqAQAAMgEAAEYBAABRAQAAVAEAAFgBAABtAQAAAQAAAAIAAAAEAAAABgAAAAQAAAACAAAAAAAA"
+ "AAUAAAACAAAAHAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAADAAAA"
+ "AAAAAH4BAAAAAAAAAQABAAEAAABzAQAABAAAAHAQAgAAAA4AAQABAAAAAAB4AQAAAQAAAA4AAAAB"
+ "AAAAAwAGPGluaXQ+AAZMTWFpbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwAJTWFpbi5qYXZhAAFWAAJW"
+ "TAATW0xqYXZhL2xhbmcvU3RyaW5nOwAEbWFpbgABAAcOAAMBAAcOAAAAAgAAgYAE8AEBCYgCDAAA"
+ "AAAAAAABAAAAAAAAAAEAAAAIAAAAcAAAAAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAA"
+ "uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
+ "AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+
+static void DecodeAndWriteDexFile(const char* base64, const char* location) {
// decode base64
CHECK(base64 != nullptr);
size_t length;
@@ -150,7 +182,11 @@
if (file->FlushCloseOrErase() != 0) {
PLOG(FATAL) << "Could not flush and close test file.";
}
- file.reset();
+}
+
+static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
+ const char* location) {
+ DecodeAndWriteDexFile(base64, location);
// read dex file
ScopedObjectAccess soa(Thread::Current());
@@ -197,6 +233,27 @@
EXPECT_EQ(header.checksum_, raw->GetLocationChecksum());
}
+TEST_F(DexFileTest, Version38Accepted) {
+ ScratchFile tmp;
+ std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex38, tmp.GetFilename().c_str()));
+ ASSERT_TRUE(raw.get() != nullptr);
+
+ const DexFile::Header& header = raw->GetHeader();
+ EXPECT_EQ(38u, header.GetVersion());
+}
+
+TEST_F(DexFileTest, Version39Rejected) {
+ ScratchFile tmp;
+ const char* location = tmp.GetFilename().c_str();
+ DecodeAndWriteDexFile(kRawDex39, location);
+
+ ScopedObjectAccess soa(Thread::Current());
+ static constexpr bool kVerifyChecksum = true;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+}
+
TEST_F(DexFileTest, GetLocationChecksum) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> raw(OpenTestDexFile("Main"));
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 8f8b667..8d7d70d 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -26,7 +26,8 @@
# The path where build only targets will be output, e.g.
# out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA
-art_run_tests_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
+art_run_tests_build_dir := $(call intermediates-dir-for,JAVA_LIBRARIES,art-run-tests)/DATA
+art_run_tests_install_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
# A generated list of prerequisites that call 'run-test --build-only', the actual prerequisite is
# an empty file touched in the intermediate directory.
@@ -49,7 +50,8 @@
# Helper to create individual build targets for tests. Must be called with $(eval).
# $(1): the test number
define define-build-art-run-test
- dmart_target := $(art_run_tests_dir)/art-run-tests/$(1)/touch
+ dmart_target := $(art_run_tests_build_dir)/art-run-tests/$(1)/touch
+ dmart_install_target := $(art_run_tests_install_dir)/art-run-tests/$(1)/touch
run_test_options = --build-only
ifeq ($(ART_TEST_QUIET),true)
run_test_options += --quiet
@@ -67,8 +69,13 @@
$(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1)
$(hide) touch $$@
- TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_target)
+$$(dmart_install_target): $$(dmart_target)
+ $(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
+ $(hide) cp $$(dir $$<)/* $$(dir $$@)/
+
+ TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_install_target)
dmart_target :=
+ dmart_install_target :=
run_test_options :=
endef
$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-build-art-run-test,$(test))))
@@ -78,12 +85,13 @@
LOCAL_MODULE := art-run-tests
LOCAL_ADDITIONAL_DEPENDENCIES := $(TEST_ART_RUN_TEST_BUILD_RULES)
# The build system use this flag to pick up files generated by declare-make-art-run-test.
-LOCAL_PICKUP_FILES := $(art_run_tests_dir)
+LOCAL_PICKUP_FILES := $(art_run_tests_install_dir)
include $(BUILD_PHONY_PACKAGE)
# Clear temp vars.
-art_run_tests_dir :=
+art_run_tests_build_dir :=
+art_run_tests_install_dir :=
define-build-art-run-test :=
TEST_ART_RUN_TEST_BUILD_RULES :=