summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc18
-rw-r--r--compiler/dex/gvn_dead_code_elimination_test.cc105
-rw-r--r--compiler/dex/quick/mips/target_mips.cc49
-rw-r--r--compiler/image_writer.cc10
-rw-r--r--compiler/image_writer.h3
-rw-r--r--compiler/optimizing/code_generator_arm.cc3
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_mips64.cc3
-rw-r--r--compiler/optimizing/code_generator_x86.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc3
-rw-r--r--compiler/optimizing/inliner.cc6
-rw-r--r--compiler/optimizing/nodes.h2
-rw-r--r--runtime/class_linker.cc5
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/jit/jit_code_cache_test.cc2
-rw-r--r--runtime/length_prefixed_array.h10
-rw-r--r--runtime/mirror/class.h3
-rw-r--r--runtime/stride_iterator.h13
-rw-r--r--runtime/verifier/reg_type_cache.cc4
-rw-r--r--test/800-smali/smali/b_22881413.smali4
20 files changed, 207 insertions, 48 deletions
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index d29b865ce9..4de3410616 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -715,6 +715,7 @@ void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_
// Try to find a MOVE to a vreg that wasn't changed since check_change.
uint16_t value_name =
data->wide_def ? lvn_->GetSregValueWide(dest_s_reg) : lvn_->GetSregValue(dest_s_reg);
+ uint32_t dest_v_reg = mir_graph_->SRegToVReg(dest_s_reg);
for (size_t c = check_change + 1u, size = vreg_chains_.NumMIRs(); c != size; ++c) {
MIRData* d = vreg_chains_.GetMIRData(c);
if (d->is_move && d->wide_def == data->wide_def &&
@@ -731,8 +732,21 @@ void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_
if (!vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg, mir_graph_) &&
(!d->wide_def ||
!vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg + 1, mir_graph_))) {
- RecordPassKillMoveByRenamingSrcDef(check_change, c);
- return;
+ // If the move's destination vreg changed, check if the vreg we're trying
+ // to rename is unused after that change.
+ uint16_t dest_change = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg, c);
+ if (d->wide_def) {
+ uint16_t dest_change_high = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg + 1, c);
+ if (dest_change_high != kNPos &&
+ (dest_change == kNPos || dest_change_high < dest_change)) {
+ dest_change = dest_change_high;
+ }
+ }
+ if (dest_change == kNPos ||
+ !vreg_chains_.IsVRegUsed(dest_change + 1u, size, dest_v_reg, mir_graph_)) {
+ RecordPassKillMoveByRenamingSrcDef(check_change, c);
+ return;
+ }
}
}
}
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
index 6ba91b64b6..4df0a8b98d 100644
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -1933,6 +1933,78 @@ TEST_F(GvnDeadCodeEliminationTestDiamond, LongOverlaps1) {
}
}
+TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps2) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
+ DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
+ DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u),
+ };
+
+ // The last insn should overlap the first and second.
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 0, 2, 4 };
+ MarkAsWideSRegs(wide_sregs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+
+ static const bool eliminated[] = {
+ false, true, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the CONST_WIDE registers have been correctly renamed.
+ MIR* const_wide = &mirs_[0];
+ ASSERT_EQ(2u, const_wide->ssa_rep->num_defs);
+ EXPECT_EQ(4, const_wide->ssa_rep->defs[0]);
+ EXPECT_EQ(5, const_wide->ssa_rep->defs[1]);
+ EXPECT_EQ(1u, const_wide->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps3) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
+ DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
+ DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u),
+ };
+
+ // The last insn should overlap the first and second.
+ static const int32_t sreg_to_vreg_map[] = { 2, 3, 0, 1, 1, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 0, 2, 4 };
+ MarkAsWideSRegs(wide_sregs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+
+ static const bool eliminated[] = {
+ false, true, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that the CONST_WIDE registers have been correctly renamed.
+ MIR* const_wide = &mirs_[0];
+ ASSERT_EQ(2u, const_wide->ssa_rep->num_defs);
+ EXPECT_EQ(4, const_wide->ssa_rep->defs[0]);
+ EXPECT_EQ(5, const_wide->ssa_rep->defs[1]);
+ EXPECT_EQ(1u, const_wide->dalvikInsn.vA);
+}
+
TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) {
static const MIRDef mirs[] = {
DEF_CONST(3, Instruction::CONST, 0u, 1000u),
@@ -2093,4 +2165,37 @@ TEST_F(GvnDeadCodeEliminationTestSimple, ArrayLengthThrows) {
}
}
+TEST_F(GvnDeadCodeEliminationTestSimple, Dependancy) {
+ static const MIRDef mirs[] = {
+ DEF_MOVE(3, Instruction::MOVE, 5u, 1u), // move v5,v1
+ DEF_MOVE(3, Instruction::MOVE, 6u, 1u), // move v12,v1
+ DEF_MOVE(3, Instruction::MOVE, 7u, 0u), // move v13,v0
+ DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 8u, 2u), // move v0_1,v2_3
+ DEF_MOVE(3, Instruction::MOVE, 10u, 6u), // move v3,v12
+ DEF_MOVE(3, Instruction::MOVE, 11u, 4u), // move v2,v4
+ DEF_MOVE(3, Instruction::MOVE, 12u, 7u), // move v4,v13
+ DEF_MOVE(3, Instruction::MOVE, 13, 11u), // move v12,v2
+ DEF_MOVE(3, Instruction::MOVE, 14u, 10u), // move v2,v3
+ DEF_MOVE(3, Instruction::MOVE, 15u, 5u), // move v3,v5
+ DEF_MOVE(3, Instruction::MOVE, 16u, 12u), // move v5,v4
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 12, 13, 0, 1, 3, 2, 4, 12, 2, 3, 5 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 2, 8 };
+ MarkAsWideSRegs(wide_sregs);
+ PerformGVN_DCE();
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, true, true, false, false,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index b098bc2b5d..ec4bad778c 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -49,9 +49,11 @@ static constexpr RegStorage reserved_regs_arr_32[] =
static constexpr RegStorage core_temps_arr_32[] =
{rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32,
rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8};
-static constexpr RegStorage sp_temps_arr_32[] =
+static constexpr RegStorage sp_fr0_temps_arr_32[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
+static constexpr RegStorage sp_fr1_temps_arr_32[] =
+ {rs_rF0, rs_rF2, rs_rF4, rs_rF6, rs_rF8, rs_rF10, rs_rF12, rs_rF14};
static constexpr RegStorage dp_fr0_temps_arr_32[] =
{rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
rs_rD7_fr0};
@@ -130,7 +132,8 @@ static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32);
static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32);
static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_fr0_temps_32(sp_fr0_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_fr1_temps_32(sp_fr1_temps_arr_32);
static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32);
static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32);
@@ -591,22 +594,22 @@ void MipsMir2Lir::ClobberCallerSave() {
Clobber(rs_rFP);
Clobber(rs_rRA);
Clobber(rs_rF0);
- Clobber(rs_rF1);
Clobber(rs_rF2);
- Clobber(rs_rF3);
Clobber(rs_rF4);
- Clobber(rs_rF5);
Clobber(rs_rF6);
- Clobber(rs_rF7);
Clobber(rs_rF8);
- Clobber(rs_rF9);
Clobber(rs_rF10);
- Clobber(rs_rF11);
Clobber(rs_rF12);
- Clobber(rs_rF13);
Clobber(rs_rF14);
- Clobber(rs_rF15);
if (fpuIs32Bit_) {
+ Clobber(rs_rF1);
+ Clobber(rs_rF3);
+ Clobber(rs_rF5);
+ Clobber(rs_rF7);
+ Clobber(rs_rF9);
+ Clobber(rs_rF11);
+ Clobber(rs_rF13);
+ Clobber(rs_rF15);
Clobber(rs_rD0_fr0);
Clobber(rs_rD1_fr0);
Clobber(rs_rD2_fr0);
@@ -717,24 +720,26 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() {
fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32,
reserved_regs_32, empty_pool, // reserved64
core_temps_32, empty_pool, // core64_temps
- sp_temps_32,
+ fpuIs32Bit_ ? sp_fr0_temps_32 : sp_fr1_temps_32,
fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32));
// Alias single precision floats to appropriate half of overlapping double.
for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
int dp_reg_num = sp_reg_num & ~1;
- RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
- RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
- // Double precision register's master storage should refer to itself.
- DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
- // Redirect single precision's master storage to master.
- info->SetMaster(dp_reg_info);
- // Singles should show a single 32-bit mask bit, at first referring to the low half.
- DCHECK_EQ(info->StorageMask(), 0x1U);
- if (sp_reg_num & 1) {
- // For odd singles, change to user the high word of the backing double.
- info->SetStorageMask(0x2);
+ if (fpuIs32Bit_ || (sp_reg_num == dp_reg_num)) {
+ RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+ RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+ // Double precision register's master storage should refer to itself.
+ DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+ // Redirect single precision's master storage to master.
+ info->SetMaster(dp_reg_info);
+ // Singles should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ if (sp_reg_num & 1) {
+ // For odd singles, change to user the high word of the backing double.
+ info->SetStorageMask(0x2);
+ }
}
}
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4ce3129f55..a03ff755ab 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -972,8 +972,10 @@ void ImageWriter::CalculateNewObjectOffsets() {
size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
native_object_relocations_.emplace(&image_method_array_,
NativeObjectRelocation { offset, image_method_type });
- CHECK_EQ(sizeof(image_method_array_), 8u);
- offset += sizeof(image_method_array_);
+ const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
+ 0, ArtMethod::ObjectSize(target_ptr_size_));
+ CHECK_ALIGNED(array_size, 8u);
+ offset += array_size;
for (auto* m : image_methods_) {
CHECK(m != nullptr);
CHECK(m->IsRuntimeMethod());
@@ -1203,7 +1205,7 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* a
if (elem != nullptr) {
auto it = native_object_relocations_.find(elem);
if (it == native_object_relocations_.end()) {
- if (true) {
+ if (it->second.IsArtMethodRelocation()) {
auto* method = reinterpret_cast<ArtMethod*>(elem);
LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ "
<< method << " idx=" << i << "/" << num_elements << " with declaring class "
@@ -1300,8 +1302,8 @@ void* ImageWriter::NativeLocationInImage(void* obj) {
return nullptr;
}
auto it = native_object_relocations_.find(obj);
- const NativeObjectRelocation& relocation = it->second;
CHECK(it != native_object_relocations_.end()) << obj;
+ const NativeObjectRelocation& relocation = it->second;
return reinterpret_cast<void*>(image_begin_ + relocation.offset);
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index eb6aa6f346..f4e10cc6ea 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -381,7 +381,8 @@ class ImageWriter FINAL {
// Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
- // Fake length prefixed array for image methods.
+ // Fake length prefixed array for image methods. This array does not contain the actual
+ // ArtMethods. We only use it for the header and relocation addresses.
LengthPrefixedArray<ArtMethod> image_method_array_;
// Counters for measurements, used for logging only.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index d89d2b2dda..6c0292c551 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2789,6 +2789,9 @@ void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction)
Location value = locations->InAt(0);
switch (instruction->GetType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
case Primitive::kPrimInt: {
if (value.IsRegister()) {
__ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7fab5cfcaf..b44c5ba9f8 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2012,8 +2012,8 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction
Primitive::Type type = instruction->GetType();
- if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
- LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck.";
+ if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
+ LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
return;
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b6d67de181..b6ebeb4977 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1908,8 +1908,9 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio
Primitive::Type type = instruction->GetType();
- if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
+ if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
+ return;
}
if (value.IsConstant()) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 5ffab33190..4efdbb922e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2995,6 +2995,9 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
switch (instruction->GetType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::Any());
break;
@@ -3022,6 +3025,9 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction)
Location value = locations->InAt(0);
switch (instruction->GetType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
case Primitive::kPrimInt: {
if (value.IsRegister()) {
__ testl(value.AsRegister<Register>(), value.AsRegister<Register>());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2b5fcbd71c..6991414e62 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3179,6 +3179,9 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
Location value = locations->InAt(0);
switch (instruction->GetType()) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
case Primitive::kPrimInt: {
if (value.IsRegister()) {
__ testl(value.AsRegister<CpuRegister>(), value.AsRegister<CpuRegister>());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 01065959d8..4c746798be 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -276,12 +276,12 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
nullptr,
caller_compilation_unit_.GetClassLoader(),
class_linker,
- *resolved_method->GetDexFile(),
+ callee_dex_file,
code_item,
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
- resolved_method->GetDexMethodIndex(),
+ method_index,
resolved_method->GetAccessFlags(),
- nullptr);
+ compiler_driver_->GetVerifiedMethod(&callee_dex_file, method_index));
bool requires_ctor_barrier = false;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 814cebb99d..ca2c9989b0 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3298,6 +3298,8 @@ class HDivZeroCheck : public HExpression<1> {
SetRawInputAt(0, value);
}
+ Primitive::Type GetType() const OVERRIDE { return InputAt(0)->GetType(); }
+
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 48dc88d2b1..0886e327d9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4973,8 +4973,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count, method_size) : 0u;
const size_t new_size = LengthPrefixedArray<ArtMethod>::ComputeSize(new_method_count,
method_size);
- auto* virtuals = new(runtime->GetLinearAlloc()->Realloc(
- self, old_virtuals, old_size, new_size))LengthPrefixedArray<ArtMethod>(new_method_count);
+ auto* virtuals = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
+ runtime->GetLinearAlloc()->Realloc(self, old_virtuals, old_size, new_size));
if (UNLIKELY(virtuals == nullptr)) {
self->AssertPendingOOMException();
self->EndAssertNoThreadSuspension(old_cause);
@@ -5002,6 +5002,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
move_table.emplace(mir_method, &*out);
++out;
}
+ virtuals->SetLength(new_method_count);
UpdateClassVirtualMethods(klass.Get(), virtuals);
// Done copying methods, they are all roots in the class now, so we can end the no thread
// suspension assert.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 59e39df9ee..e9d9065e56 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2912,7 +2912,7 @@ class VerifyReferenceCardVisitor {
if (!obj->IsObjectArray()) {
mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
CHECK(klass != nullptr);
- for (ArtField& field : is_static ? klass->GetSFields() : klass->GetIFields()) {
+ for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
if (field.GetOffset().Int32Value() == offset.Int32Value()) {
LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
<< PrettyField(&field);
diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc
index 555ad7c1d3..a6cbb710af 100644
--- a/runtime/jit/jit_code_cache_test.cc
+++ b/runtime/jit/jit_code_cache_test.cc
@@ -50,7 +50,7 @@ TEST_F(JitCodeCacheTest, TestCoverage) {
ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code));
ASSERT_EQ(code_cache->NumMethods(), 1u);
ClassLinker* const cl = Runtime::Current()->GetClassLinker();
- ArtMethod* method = &cl->AllocArtMethodArray(soa.Self(), 1)->At(0, 0);
+ ArtMethod* method = &cl->AllocArtMethodArray(soa.Self(), 1)->At(0);
ASSERT_FALSE(code_cache->ContainsMethod(method));
method->SetEntryPointFromQuickCompiledCode(reserved_code);
ASSERT_TRUE(code_cache->ContainsMethod(method));
diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h
index 82176e376d..2b2e8d34d2 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/length_prefixed_array.h
@@ -48,16 +48,22 @@ class LengthPrefixedArray {
return offsetof(LengthPrefixedArray<T>, data_) + index * element_size;
}
+ // Alignment is the caller's responsibility.
static size_t ComputeSize(size_t num_elements, size_t element_size = sizeof(T)) {
- return sizeof(LengthPrefixedArray<T>) + num_elements * element_size;
+ return OffsetOfElement(num_elements, element_size);
}
uint64_t Length() const {
return length_;
}
+ // Update the length but does not reallocate storage.
+ void SetLength(uint64_t length) {
+ length_ = length;
+ }
+
private:
- uint64_t length_; // 64 bits for padding reasons.
+ uint64_t length_; // 64 bits for 8 byte alignment of data_.
uint8_t data_[0];
};
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index f13893658c..513ab37033 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1205,7 +1205,8 @@ class MANAGED Class FINAL : public Object {
// listed in ifields; fields declared by a superclass are listed in
// the superclass's Class.ifields.
//
- // ArtField arrays are allocated as an array of fields, and not an array of fields pointers.
+ // ArtFields are allocated as a length prefixed ArtField array, and not an array of pointers to
+ // ArtFields.
uint64_t ifields_;
// Static fields length-prefixed array.
diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h
index c69f30ed5f..a9da51ba29 100644
--- a/runtime/stride_iterator.h
+++ b/runtime/stride_iterator.h
@@ -31,7 +31,7 @@ class StrideIterator : public std::iterator<std::forward_iterator_tag, T> {
StrideIterator(T* ptr, size_t stride)
: ptr_(reinterpret_cast<uintptr_t>(ptr)),
- stride_(reinterpret_cast<uintptr_t>(stride)) {}
+ stride_(stride) {}
bool operator==(const StrideIterator& other) const {
DCHECK_EQ(stride_, other.stride_);
@@ -48,17 +48,22 @@ class StrideIterator : public std::iterator<std::forward_iterator_tag, T> {
}
StrideIterator operator++(int) {
- auto temp = *this;
+ StrideIterator<T> temp = *this;
ptr_ += stride_;
return temp;
}
StrideIterator operator+(ssize_t delta) const {
- auto temp = *this;
- temp.ptr_ += static_cast<ssize_t>(stride_) * delta;
+ StrideIterator<T> temp = *this;
+ temp += delta;
return temp;
}
+ StrideIterator& operator+=(ssize_t delta) {
+ ptr_ += static_cast<ssize_t>(stride_) * delta;
+ return *this;
+ }
+
T& operator*() const {
return *reinterpret_cast<T*>(ptr_);
}
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 5e1feb808f..e14306c0ae 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -325,7 +325,7 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegT
const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
types.Copy(&left_merge->GetUnresolvedTypes());
left_resolved = &left_merge->GetResolvedPart();
- } else if (left.IsUnresolvedReference()) {
+ } else if (left.IsUnresolvedTypes()) {
types.SetBit(left.GetId());
left_resolved = &Zero();
} else {
@@ -337,7 +337,7 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegT
const UnresolvedMergedType* right_merge = down_cast<const UnresolvedMergedType*>(&right);
types.Union(&right_merge->GetUnresolvedTypes());
right_resolved = &right_merge->GetResolvedPart();
- } else if (right.IsUnresolvedReference()) {
+ } else if (right.IsUnresolvedTypes()) {
types.SetBit(right.GetId());
right_resolved = &Zero();
} else {
diff --git a/test/800-smali/smali/b_22881413.smali b/test/800-smali/smali/b_22881413.smali
index f624734353..29dd82a358 100644
--- a/test/800-smali/smali/b_22881413.smali
+++ b/test/800-smali/smali/b_22881413.smali
@@ -47,6 +47,10 @@
# v8 = int, v9 = boolean, v10 = boolean
sget-object v0, LB22881413;->unresBase0:La/b/c/dBase0;
+
+# Test an UnresolvedUninitializedReference type.
+ new-instance v0, La/b/c/dBaseInit;
+
const v1, 0
const v2, 0