diff options
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/dex/gvn_dead_code_elimination.cc | 18 | ||||
-rw-r--r-- | compiler/dex/gvn_dead_code_elimination_test.cc | 105 | ||||
-rw-r--r-- | compiler/dex/quick/mips/target_mips.cc | 49 | ||||
-rw-r--r-- | compiler/image_writer.cc | 10 | ||||
-rw-r--r-- | compiler/image_writer.h | 3 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/inliner.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 2 |
12 files changed, 177 insertions, 35 deletions
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc index d29b865ce9..4de3410616 100644 --- a/compiler/dex/gvn_dead_code_elimination.cc +++ b/compiler/dex/gvn_dead_code_elimination.cc @@ -715,6 +715,7 @@ void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_ // Try to find a MOVE to a vreg that wasn't changed since check_change. uint16_t value_name = data->wide_def ? lvn_->GetSregValueWide(dest_s_reg) : lvn_->GetSregValue(dest_s_reg); + uint32_t dest_v_reg = mir_graph_->SRegToVReg(dest_s_reg); for (size_t c = check_change + 1u, size = vreg_chains_.NumMIRs(); c != size; ++c) { MIRData* d = vreg_chains_.GetMIRData(c); if (d->is_move && d->wide_def == data->wide_def && @@ -731,8 +732,21 @@ void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_ if (!vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg, mir_graph_) && (!d->wide_def || !vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg + 1, mir_graph_))) { - RecordPassKillMoveByRenamingSrcDef(check_change, c); - return; + // If the move's destination vreg changed, check if the vreg we're trying + // to rename is unused after that change. + uint16_t dest_change = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg, c); + if (d->wide_def) { + uint16_t dest_change_high = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg + 1, c); + if (dest_change_high != kNPos && + (dest_change == kNPos || dest_change_high < dest_change)) { + dest_change = dest_change_high; + } + } + if (dest_change == kNPos || + !vreg_chains_.IsVRegUsed(dest_change + 1u, size, dest_v_reg, mir_graph_)) { + RecordPassKillMoveByRenamingSrcDef(check_change, c); + return; + } } } } diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc index 6ba91b64b6..4df0a8b98d 100644 --- a/compiler/dex/gvn_dead_code_elimination_test.cc +++ b/compiler/dex/gvn_dead_code_elimination_test.cc @@ -1933,6 +1933,78 @@ TEST_F(GvnDeadCodeEliminationTestDiamond, LongOverlaps1) { } } +TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps2) { + static const MIRDef mirs[] = { + DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u), + }; + + // The last insn should overlap the first and second. + static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 }; + PrepareSRegToVRegMap(sreg_to_vreg_map); + + PrepareMIRs(mirs); + static const int32_t wide_sregs[] = { 0, 2, 4 }; + MarkAsWideSRegs(wide_sregs); + PerformGVN_DCE(); + + ASSERT_EQ(arraysize(mirs), value_names_.size()); + EXPECT_EQ(value_names_[0], value_names_[1]); + EXPECT_EQ(value_names_[0], value_names_[2]); + + static const bool eliminated[] = { + false, true, true, + }; + static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch"); + for (size_t i = 0; i != arraysize(eliminated); ++i) { + bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop); + EXPECT_EQ(eliminated[i], actually_eliminated) << i; + } + // Check that the CONST_WIDE registers have been correctly renamed. + MIR* const_wide = &mirs_[0]; + ASSERT_EQ(2u, const_wide->ssa_rep->num_defs); + EXPECT_EQ(4, const_wide->ssa_rep->defs[0]); + EXPECT_EQ(5, const_wide->ssa_rep->defs[1]); + EXPECT_EQ(1u, const_wide->dalvikInsn.vA); +} + +TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps3) { + static const MIRDef mirs[] = { + DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u), + }; + + // The last insn should overlap the first and second. + static const int32_t sreg_to_vreg_map[] = { 2, 3, 0, 1, 1, 2 }; + PrepareSRegToVRegMap(sreg_to_vreg_map); + + PrepareMIRs(mirs); + static const int32_t wide_sregs[] = { 0, 2, 4 }; + MarkAsWideSRegs(wide_sregs); + PerformGVN_DCE(); + + ASSERT_EQ(arraysize(mirs), value_names_.size()); + EXPECT_EQ(value_names_[0], value_names_[1]); + EXPECT_EQ(value_names_[0], value_names_[2]); + + static const bool eliminated[] = { + false, true, true, + }; + static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch"); + for (size_t i = 0; i != arraysize(eliminated); ++i) { + bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop); + EXPECT_EQ(eliminated[i], actually_eliminated) << i; + } + // Check that the CONST_WIDE registers have been correctly renamed. + MIR* const_wide = &mirs_[0]; + ASSERT_EQ(2u, const_wide->ssa_rep->num_defs); + EXPECT_EQ(4, const_wide->ssa_rep->defs[0]); + EXPECT_EQ(5, const_wide->ssa_rep->defs[1]); + EXPECT_EQ(1u, const_wide->dalvikInsn.vA); +} + TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) { static const MIRDef mirs[] = { DEF_CONST(3, Instruction::CONST, 0u, 1000u), @@ -2093,4 +2165,37 @@ TEST_F(GvnDeadCodeEliminationTestSimple, ArrayLengthThrows) { } } +TEST_F(GvnDeadCodeEliminationTestSimple, Dependancy) { + static const MIRDef mirs[] = { + DEF_MOVE(3, Instruction::MOVE, 5u, 1u), // move v5,v1 + DEF_MOVE(3, Instruction::MOVE, 6u, 1u), // move v12,v1 + DEF_MOVE(3, Instruction::MOVE, 7u, 0u), // move v13,v0 + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 8u, 2u), // move v0_1,v2_3 + DEF_MOVE(3, Instruction::MOVE, 10u, 6u), // move v3,v12 + DEF_MOVE(3, Instruction::MOVE, 11u, 4u), // move v2,v4 + DEF_MOVE(3, Instruction::MOVE, 12u, 7u), // move v4,v13 + DEF_MOVE(3, Instruction::MOVE, 13, 11u), // move v12,v2 + DEF_MOVE(3, Instruction::MOVE, 14u, 10u), // move v2,v3 + DEF_MOVE(3, Instruction::MOVE, 15u, 5u), // move v3,v5 + DEF_MOVE(3, Instruction::MOVE, 16u, 12u), // move v5,v4 + }; + + static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 12, 13, 0, 1, 3, 2, 4, 12, 2, 3, 5 }; + PrepareSRegToVRegMap(sreg_to_vreg_map); + + PrepareMIRs(mirs); + static const int32_t wide_sregs[] = { 2, 8 }; + MarkAsWideSRegs(wide_sregs); + PerformGVN_DCE(); + + static const bool eliminated[] = { + false, false, false, false, false, false, false, true, true, false, false, + }; + static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch"); + for (size_t i = 0; i != arraysize(eliminated); ++i) { + bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop); + EXPECT_EQ(eliminated[i], actually_eliminated) << i; + } +} + } // namespace art diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index b098bc2b5d..ec4bad778c 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -49,9 +49,11 @@ static constexpr RegStorage reserved_regs_arr_32[] = static constexpr RegStorage core_temps_arr_32[] = {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32, rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8}; -static constexpr RegStorage sp_temps_arr_32[] = +static constexpr RegStorage sp_fr0_temps_arr_32[] = {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10, rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15}; +static constexpr RegStorage sp_fr1_temps_arr_32[] = + {rs_rF0, rs_rF2, rs_rF4, rs_rF6, rs_rF8, rs_rF10, rs_rF12, rs_rF14}; static constexpr RegStorage dp_fr0_temps_arr_32[] = {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0, rs_rD7_fr0}; @@ -130,7 +132,8 @@ static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32); static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32); static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); -static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); +static constexpr ArrayRef<const RegStorage> sp_fr0_temps_32(sp_fr0_temps_arr_32); +static constexpr ArrayRef<const RegStorage> sp_fr1_temps_32(sp_fr1_temps_arr_32); static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32); static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32); @@ -591,22 +594,22 @@ void MipsMir2Lir::ClobberCallerSave() { Clobber(rs_rFP); Clobber(rs_rRA); Clobber(rs_rF0); - Clobber(rs_rF1); Clobber(rs_rF2); - Clobber(rs_rF3); Clobber(rs_rF4); - Clobber(rs_rF5); Clobber(rs_rF6); - Clobber(rs_rF7); Clobber(rs_rF8); - Clobber(rs_rF9); Clobber(rs_rF10); - Clobber(rs_rF11); Clobber(rs_rF12); - Clobber(rs_rF13); Clobber(rs_rF14); - Clobber(rs_rF15); if (fpuIs32Bit_) { + Clobber(rs_rF1); + Clobber(rs_rF3); + Clobber(rs_rF5); + Clobber(rs_rF7); + Clobber(rs_rF9); + Clobber(rs_rF11); + Clobber(rs_rF13); + Clobber(rs_rF15); Clobber(rs_rD0_fr0); Clobber(rs_rD1_fr0); Clobber(rs_rD2_fr0); @@ -717,24 +720,26 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() { fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32, reserved_regs_32, empty_pool, // reserved64 core_temps_32, empty_pool, // core64_temps - sp_temps_32, + fpuIs32Bit_ ? sp_fr0_temps_32 : sp_fr1_temps_32, fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32)); // Alias single precision floats to appropriate half of overlapping double. for (RegisterInfo* info : reg_pool_->sp_regs_) { int sp_reg_num = info->GetReg().GetRegNum(); int dp_reg_num = sp_reg_num & ~1; - RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num); - RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); - // Double precision register's master storage should refer to itself. - DCHECK_EQ(dp_reg_info, dp_reg_info->Master()); - // Redirect single precision's master storage to master. - info->SetMaster(dp_reg_info); - // Singles should show a single 32-bit mask bit, at first referring to the low half. - DCHECK_EQ(info->StorageMask(), 0x1U); - if (sp_reg_num & 1) { - // For odd singles, change to user the high word of the backing double. - info->SetStorageMask(0x2); + if (fpuIs32Bit_ || (sp_reg_num == dp_reg_num)) { + RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num); + RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); + // Double precision register's master storage should refer to itself. + DCHECK_EQ(dp_reg_info, dp_reg_info->Master()); + // Redirect single precision's master storage to master. + info->SetMaster(dp_reg_info); + // Singles should show a single 32-bit mask bit, at first referring to the low half. + DCHECK_EQ(info->StorageMask(), 0x1U); + if (sp_reg_num & 1) { + // For odd singles, change to user the high word of the backing double. + info->SetStorageMask(0x2); + } } } } diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 4ce3129f55..a03ff755ab 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -972,8 +972,10 @@ void ImageWriter::CalculateNewObjectOffsets() { size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)]; native_object_relocations_.emplace(&image_method_array_, NativeObjectRelocation { offset, image_method_type }); - CHECK_EQ(sizeof(image_method_array_), 8u); - offset += sizeof(image_method_array_); + const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize( + 0, ArtMethod::ObjectSize(target_ptr_size_)); + CHECK_ALIGNED(array_size, 8u); + offset += array_size; for (auto* m : image_methods_) { CHECK(m != nullptr); CHECK(m->IsRuntimeMethod()); @@ -1203,7 +1205,7 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* a if (elem != nullptr) { auto it = native_object_relocations_.find(elem); if (it == native_object_relocations_.end()) { - if (true) { + if (it->second.IsArtMethodRelocation()) { auto* method = reinterpret_cast<ArtMethod*>(elem); LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ " << method << " idx=" << i << "/" << num_elements << " with declaring class " @@ -1300,8 +1302,8 @@ void* ImageWriter::NativeLocationInImage(void* obj) { return nullptr; } auto it = native_object_relocations_.find(obj); - const NativeObjectRelocation& relocation = it->second; CHECK(it != native_object_relocations_.end()) << obj; + const NativeObjectRelocation& relocation = it->second; return reinterpret_cast<void*>(image_begin_ + relocation.offset); } diff --git a/compiler/image_writer.h b/compiler/image_writer.h index eb6aa6f346..f4e10cc6ea 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -381,7 +381,8 @@ class ImageWriter FINAL { // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; - // Fake length prefixed array for image methods. + // Fake length prefixed array for image methods. This array does not contain the actual + // ArtMethods. We only use it for the header and relocation addresses. LengthPrefixedArray<ArtMethod> image_method_array_; // Counters for measurements, used for logging only. diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index d89d2b2dda..6c0292c551 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -2789,6 +2789,9 @@ void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) Location value = locations->InAt(0); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { if (value.IsRegister()) { __ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel()); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 7fab5cfcaf..b44c5ba9f8 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2012,8 +2012,8 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction Primitive::Type type = instruction->GetType(); - if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { - LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck."; + if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) { + LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck."; return; } diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index b6d67de181..b6ebeb4977 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -1908,8 +1908,9 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio Primitive::Type type = instruction->GetType(); - if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { + if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) { LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck."; + return; } if (value.IsConstant()) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 5ffab33190..4efdbb922e 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -2995,6 +2995,9 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { locations->SetInAt(0, Location::Any()); break; @@ -3022,6 +3025,9 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) Location value = locations->InAt(0); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { if (value.IsRegister()) { __ testl(value.AsRegister<Register>(), value.AsRegister<Register>()); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index a0f45ed73e..1585104789 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -3161,6 +3161,9 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio Location value = locations->InAt(0); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { if (value.IsRegister()) { __ testl(value.AsRegister<CpuRegister>(), value.AsRegister<CpuRegister>()); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 01065959d8..4c746798be 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -276,12 +276,12 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, nullptr, caller_compilation_unit_.GetClassLoader(), class_linker, - *resolved_method->GetDexFile(), + callee_dex_file, code_item, resolved_method->GetDeclaringClass()->GetDexClassDefIndex(), - resolved_method->GetDexMethodIndex(), + method_index, resolved_method->GetAccessFlags(), - nullptr); + compiler_driver_->GetVerifiedMethod(&callee_dex_file, method_index)); bool requires_ctor_barrier = false; diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 814cebb99d..ca2c9989b0 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -3298,6 +3298,8 @@ class HDivZeroCheck : public HExpression<1> { SetRawInputAt(0, value); } + Primitive::Type GetType() const OVERRIDE { return InputAt(0)->GetType(); } + bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(HInstruction* other) const OVERRIDE { |