summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/verified_method.cc5
-rw-r--r--compiler/driver/compiler_driver-inl.h4
-rw-r--r--compiler/driver/compiler_driver.cc12
-rw-r--r--compiler/elf_writer_debug.cc4
-rw-r--r--compiler/image_test.cc2
-rw-r--r--compiler/image_writer.cc82
-rw-r--r--compiler/image_writer.h15
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/optimizing/code_generator_arm.cc162
-rw-r--r--compiler/optimizing/code_generator_mips64.cc2
-rw-r--r--compiler/optimizing/code_generator_mips64.h9
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/dead_code_elimination.cc12
-rw-r--r--compiler/optimizing/graph_checker.cc37
-rw-r--r--compiler/optimizing/graph_visualizer.cc15
-rw-r--r--compiler/optimizing/inliner.cc17
-rw-r--r--compiler/optimizing/nodes.cc166
-rw-r--r--compiler/optimizing/nodes.h58
-rw-r--r--compiler/optimizing/optimizing_compiler.cc16
-rw-r--r--compiler/optimizing/reference_type_propagation.cc24
-rw-r--r--compiler/optimizing/reference_type_propagation.h2
-rw-r--r--compiler/optimizing/register_allocator.cc6
-rw-r--r--compiler/optimizing/ssa_builder.cc3
23 files changed, 418 insertions, 239 deletions
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 8eb37cf3dc..0355f116f1 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -313,8 +313,9 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(
abstract_method, pointer_size);
}
- if (concrete_method == nullptr || concrete_method->IsAbstract()) {
- // In cases where concrete_method is not found, or is abstract, continue to the next invoke.
+ if (concrete_method == nullptr || !concrete_method->IsInvokable()) {
+ // In cases where concrete_method is not found, or is not invokable, continue to the next
+ // invoke.
continue;
}
if (reg_type.IsPreciseReference() || concrete_method->IsFinal() ||
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 14ba81d193..10841e6700 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -329,7 +329,7 @@ inline int CompilerDriver::IsFastInvoke(
resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
(methods_class->GetVTableEntry(
resolved_method->GetMethodIndex(), pointer_size) == resolved_method) &&
- !resolved_method->IsAbstract();
+ resolved_method->IsInvokable();
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
@@ -374,7 +374,7 @@ inline int CompilerDriver::IsFastInvoke(
class_loader, nullptr, kVirtual);
}
CHECK(called_method != nullptr);
- CHECK(!called_method->IsAbstract());
+ CHECK(called_method->IsInvokable());
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
kDirect, // Sharp type
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index aa5e411ba8..bf3a8658da 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1552,7 +1552,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
*type = sharp_type;
}
} else {
- auto* image_space = heap->GetImageSpace();
+ auto* image_space = heap->GetBootImageSpace();
bool method_in_image = false;
if (image_space != nullptr) {
const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
@@ -2034,8 +2034,14 @@ class VerifyClassVisitor : public CompilationVisitor {
Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
soa.Self(), dex_file, false)));
std::string error_msg;
- if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
- &class_def, true, &error_msg) ==
+ if (verifier::MethodVerifier::VerifyClass(soa.Self(),
+ &dex_file,
+ dex_cache,
+ class_loader,
+ &class_def,
+ true /* allow soft failures */,
+ true /* log hard failures */,
+ &error_msg) ==
verifier::MethodVerifier::kHardFailure) {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 90db7eb41a..73262333b6 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -451,12 +451,12 @@ void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
opcodes.EndSequence();
WriteDebugLineTable(directories, files, opcodes, &debug_line, &debug_line_patches);
}
+ builder->WriteSection(".debug_line", &debug_line);
+ builder->WritePatches(".debug_line.oat_patches", &debug_line_patches);
builder->WriteSection(".debug_info", &debug_info);
builder->WritePatches(".debug_info.oat_patches", &debug_info_patches);
builder->WriteSection(".debug_abbrev", &debug_abbrev);
builder->WriteSection(".debug_str", &debug_str);
- builder->WriteSection(".debug_line", &debug_line);
- builder->WritePatches(".debug_line.oat_patches", &debug_line_patches);
}
// Explicit instantiations
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index a38e1f54c0..6df15279a0 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -181,7 +181,7 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(heap->HasImageSpace());
ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
- gc::space::ImageSpace* image_space = heap->GetImageSpace();
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
ASSERT_TRUE(image_space != nullptr);
ASSERT_LE(image_space->Size(), image_file_size);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 0c85323805..3f18d9aa0f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -372,9 +372,9 @@ void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) {
DCHECK(arr != nullptr);
if (kIsDebugBuild) {
for (size_t i = 0, len = arr->GetLength(); i < len; i++) {
- auto* method = arr->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_);
+ ArtMethod* method = arr->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_);
if (method != nullptr && !method->IsRuntimeMethod()) {
- auto* klass = method->GetDeclaringClass();
+ mirror::Class* klass = method->GetDeclaringClass();
CHECK(klass == nullptr || KeepClass(klass))
<< PrettyClass(klass) << " should be a kept class";
}
@@ -514,7 +514,7 @@ bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const {
size_t offset = lock_word.ForwardingAddress();
BinSlot bin_slot(offset);
DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()])
- << "bin slot offset should not exceed the size of that bin";
+ << "bin slot offset should not exceed the size of that bin";
}
return true;
}
@@ -537,8 +537,13 @@ bool ImageWriter::AllocMemory() {
const size_t length = RoundUp(image_objects_offset_begin_ + GetBinSizeSum() + intern_table_bytes_,
kPageSize);
std::string error_msg;
- image_.reset(MemMap::MapAnonymous("image writer image", nullptr, length, PROT_READ | PROT_WRITE,
- false, false, &error_msg));
+ image_.reset(MemMap::MapAnonymous("image writer image",
+ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
if (UNLIKELY(image_.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
@@ -547,7 +552,9 @@ bool ImageWriter::AllocMemory() {
// Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
CHECK_LE(image_end_, length);
image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
- "image bitmap", image_->Begin(), RoundUp(image_end_, kPageSize)));
+ "image bitmap",
+ image_->Begin(),
+ RoundUp(image_end_, kPageSize)));
if (image_bitmap_.get() == nullptr) {
LOG(ERROR) << "Failed to allocate memory for image bitmap";
return false;
@@ -905,8 +912,8 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
size_t& offset = bin_slot_sizes_[kBinArtField];
DCHECK(!IsInBootImage(cur_fields));
native_object_relocations_.emplace(
- cur_fields, NativeObjectRelocation {
- offset, kNativeObjectRelocationTypeArtFieldArray });
+ cur_fields,
+ NativeObjectRelocation {offset, kNativeObjectRelocationTypeArtFieldArray });
offset += header_size;
// Forward individual fields so that we can quickly find where they belong.
for (size_t i = 0, count = cur_fields->size(); i < count; ++i) {
@@ -917,7 +924,8 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
<< " already assigned " << PrettyField(field) << " static=" << field->IsStatic();
DCHECK(!IsInBootImage(field));
native_object_relocations_.emplace(
- field, NativeObjectRelocation {offset, kNativeObjectRelocationTypeArtField });
+ field,
+ NativeObjectRelocation {offset, kNativeObjectRelocationTypeArtField });
offset += sizeof(ArtField);
}
}
@@ -940,8 +948,9 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
any_dirty = any_dirty || WillMethodBeDirty(&m);
++count;
}
- NativeObjectRelocationType type = any_dirty ? kNativeObjectRelocationTypeArtMethodDirty :
- kNativeObjectRelocationTypeArtMethodClean;
+ NativeObjectRelocationType type = any_dirty
+ ? kNativeObjectRelocationTypeArtMethodDirty
+ : kNativeObjectRelocationTypeArtMethodClean;
Bin bin_type = BinTypeForNativeRelocationType(type);
// Forward the entire array at once, but header first.
const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0,
@@ -1124,8 +1133,9 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size_));
// Add method section.
auto* methods_section = &sections[ImageHeader::kSectionArtMethods];
- *methods_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtMethodClean] +
- bin_slot_sizes_[kBinArtMethodDirty]);
+ *methods_section = ImageSection(cur_pos,
+ bin_slot_sizes_[kBinArtMethodClean] +
+ bin_slot_sizes_[kBinArtMethodDirty]);
CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
cur_pos = methods_section->End();
// Add dex cache arrays section.
@@ -1156,12 +1166,17 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_EQ(AlignUp(image_begin_ + image_end, kPageSize), oat_file_begin) <<
"Oat file should be right after the image.";
// Create the header.
- new (image_->Begin()) ImageHeader(
- PointerToLowMemUInt32(image_begin_), image_end,
- sections, image_roots_address_, oat_file_->GetOatHeader().GetChecksum(),
- PointerToLowMemUInt32(oat_file_begin), PointerToLowMemUInt32(oat_data_begin_),
- PointerToLowMemUInt32(oat_data_end), PointerToLowMemUInt32(oat_file_end), target_ptr_size_,
- compile_pic_);
+ new (image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_begin_),
+ image_end,
+ sections,
+ image_roots_address_,
+ oat_file_->GetOatHeader().GetChecksum(),
+ PointerToLowMemUInt32(oat_file_begin),
+ PointerToLowMemUInt32(oat_data_begin_),
+ PointerToLowMemUInt32(oat_data_end),
+ PointerToLowMemUInt32(oat_file_end),
+ target_ptr_size_,
+ compile_pic_);
}
ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
@@ -1371,14 +1386,16 @@ class FixupVisitor {
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
- offset, image_writer_->GetImageAddress(ref));
+ offset,
+ image_writer_->GetImageAddress(ref));
}
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
- mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent()));
+ mirror::Reference::ReferentOffset(),
+ image_writer_->GetImageAddress(ref->GetReferent()));
}
protected:
@@ -1572,7 +1589,7 @@ const uint8_t* ImageWriter::GetOatAddress(OatAddress type) const {
// If we are compiling an app image, we need to use the stubs of the boot image.
if (compile_app_image_) {
// Use the current image pointers.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
DCHECK(image_space != nullptr);
const OatFile* oat_file = image_space->GetOatFile();
CHECK(oat_file != nullptr);
@@ -1604,7 +1621,7 @@ const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, bool* quick_is_inter
DCHECK(!method->IsResolutionMethod()) << PrettyMethod(method);
DCHECK(!method->IsImtConflictMethod()) << PrettyMethod(method);
DCHECK(!method->IsImtUnimplementedMethod()) << PrettyMethod(method);
- DCHECK(!method->IsAbstract()) << PrettyMethod(method);
+ DCHECK(method->IsInvokable()) << PrettyMethod(method);
DCHECK(!IsInBootImage(method)) << PrettyMethod(method);
// Use original code if it exists. Otherwise, set the code pointer to the resolution
@@ -1651,7 +1668,7 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(ArtMethod* method) {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
- if (UNLIKELY(method->IsAbstract())) {
+ if (UNLIKELY(!method->IsInvokable())) {
return GetOatAddress(kOatAddressQuickToInterpreterBridge);
} else {
bool quick_is_interpreted;
@@ -1697,7 +1714,7 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
- if (UNLIKELY(orig->IsAbstract())) {
+ if (UNLIKELY(!orig->IsInvokable())) {
copy->SetEntryPointFromQuickCompiledCodePtrSize(
GetOatAddress(kOatAddressQuickToInterpreterBridge), target_ptr_size_);
} else {
@@ -1727,8 +1744,10 @@ static OatHeader* GetOatHeaderFromElf(ElfFile* elf) {
void ImageWriter::SetOatChecksumFromElfFile(File* elf_file) {
std::string error_msg;
- std::unique_ptr<ElfFile> elf(ElfFile::Open(elf_file, PROT_READ|PROT_WRITE,
- MAP_SHARED, &error_msg));
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(elf_file,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ &error_msg));
if (elf.get() == nullptr) {
LOG(FATAL) << "Unable open oat file: " << error_msg;
return;
@@ -1771,10 +1790,11 @@ uint32_t ImageWriter::BinSlot::GetIndex() const {
uint8_t* ImageWriter::GetOatFileBegin() const {
DCHECK_GT(intern_table_bytes_, 0u);
- size_t native_sections_size =
- bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
- bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinDexCacheArray] +
- intern_table_bytes_;
+ size_t native_sections_size = bin_slot_sizes_[kBinArtField] +
+ bin_slot_sizes_[kBinArtMethodDirty] +
+ bin_slot_sizes_[kBinArtMethodClean] +
+ bin_slot_sizes_[kBinDexCacheArray] +
+ intern_table_bytes_;
return image_begin_ + RoundUp(image_end_ + native_sections_size, kPageSize);
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 120de97620..a0a785e21c 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -308,8 +308,11 @@ class ImageWriter FINAL {
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
SHARED_REQUIRES(Locks::mutator_lock_);
- void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass,
- Bin array_type) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupPointerArray(mirror::Object* dst,
+ mirror::PointerArray* arr,
+ mirror::Class* klass,
+ Bin array_type)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted)
@@ -331,8 +334,12 @@ class ImageWriter FINAL {
void AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return true if klass is loaded by the boot class loader but not in the boot image.
bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return true if klass depends on a boot class loader non image class live. We want to prune
+ // these classes since we do not want any boot class loader classes in the image. This means that
+ // we also cannot have any classes which refer to these boot class loader non image classes.
bool ContainsBootClassLoaderNonImageClass(mirror::Class* klass)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -394,7 +401,7 @@ class ImageWriter FINAL {
const bool compile_pic_;
const bool compile_app_image_;
- // Boot image space for fast lookups.
+ // Cache the boot image space in this class for faster lookups.
gc::space::ImageSpace* boot_image_space_;
// Size of pointers on the target architecture.
@@ -432,7 +439,7 @@ class ImageWriter FINAL {
uint64_t dirty_methods_;
uint64_t clean_methods_;
- // Prune class memoization table.
+ // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
std::unordered_map<mirror::Class*, bool> prune_class_memo_;
friend class ContainsBootClassLoaderNonImageClassVisitor;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 3f2271ef11..40a3f14f93 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -899,7 +899,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
// NOTE: We're using linker patches for app->boot references when the image can
// be relocated and therefore we need to emit .oat_patches. We're not using this
// for app->app references, so check that the method is an image method.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
CHECK(image_space->GetImageHeader().GetMethodsSection().Contains(method_offset));
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6d05293277..54c6cc8890 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2694,7 +2694,7 @@ void LocationsBuilderARM::VisitDiv(HDiv* div) {
case Primitive::kPrimInt: {
if (div->InputAt(1)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
+ locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
if (abs_imm <= 1) {
@@ -2818,7 +2818,7 @@ void LocationsBuilderARM::VisitRem(HRem* rem) {
case Primitive::kPrimInt: {
if (rem->InputAt(1)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
+ locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
if (abs_imm <= 1) {
@@ -2989,17 +2989,29 @@ void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
- // Make the output overlap, as it will be used to hold the masked
- // second input.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ if (op->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Make the output overlap, as it will be used to hold the masked
+ // second input.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
break;
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ if (op->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
+ // For simplicity, use kOutputOverlap even though we only require that low registers
+ // don't clash with high registers which the register allocator currently guarantees.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
break;
}
default:
@@ -3020,9 +3032,9 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
case Primitive::kPrimInt: {
Register out_reg = out.AsRegister<Register>();
Register first_reg = first.AsRegister<Register>();
- // Arm doesn't mask the shift count so we need to do it ourselves.
if (second.IsRegister()) {
Register second_reg = second.AsRegister<Register>();
+ // Arm doesn't mask the shift count so we need to do it ourselves.
__ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
if (op->IsShl()) {
__ Lsl(out_reg, first_reg, out_reg);
@@ -3050,57 +3062,103 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
Register o_h = out.AsRegisterPairHigh<Register>();
Register o_l = out.AsRegisterPairLow<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
-
Register high = first.AsRegisterPairHigh<Register>();
Register low = first.AsRegisterPairLow<Register>();
- Register second_reg = second.AsRegister<Register>();
-
- if (op->IsShl()) {
- __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
- // Shift the high part
- __ Lsl(o_h, high, o_l);
- // Shift the low part and `or` what overflew on the high part
- __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
- __ Lsr(temp, low, temp);
- __ orr(o_h, o_h, ShifterOperand(temp));
- // If the shift is > 32 bits, override the high part
- __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
- __ it(PL);
- __ Lsl(o_h, low, temp, PL);
- // Shift the low part
- __ Lsl(o_l, low, o_l);
- } else if (op->IsShr()) {
- __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
- // Shift the low part
- __ Lsr(o_l, low, o_h);
- // Shift the high part and `or` what underflew on the low part
- __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ Lsl(temp, high, temp);
- __ orr(o_l, o_l, ShifterOperand(temp));
- // If the shift is > 32 bits, override the low part
- __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ it(PL);
- __ Asr(o_l, high, temp, PL);
- // Shift the high part
- __ Asr(o_h, high, o_h);
+ if (second.IsRegister()) {
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+
+ Register second_reg = second.AsRegister<Register>();
+
+ if (op->IsShl()) {
+ __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
+ // Shift the high part
+ __ Lsl(o_h, high, o_l);
+ // Shift the low part and `or` what overflew on the high part
+ __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
+ __ Lsr(temp, low, temp);
+ __ orr(o_h, o_h, ShifterOperand(temp));
+ // If the shift is > 32 bits, override the high part
+ __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
+ __ it(PL);
+ __ Lsl(o_h, low, temp, PL);
+ // Shift the low part
+ __ Lsl(o_l, low, o_l);
+ } else if (op->IsShr()) {
+ __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
+ // Shift the low part
+ __ Lsr(o_l, low, o_h);
+ // Shift the high part and `or` what underflew on the low part
+ __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ Lsl(temp, high, temp);
+ __ orr(o_l, o_l, ShifterOperand(temp));
+ // If the shift is > 32 bits, override the low part
+ __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ it(PL);
+ __ Asr(o_l, high, temp, PL);
+ // Shift the high part
+ __ Asr(o_h, high, o_h);
+ } else {
+ __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
+ // same as Shr except we use `Lsr`s and not `Asr`s
+ __ Lsr(o_l, low, o_h);
+ __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ Lsl(temp, high, temp);
+ __ orr(o_l, o_l, ShifterOperand(temp));
+ __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
+ __ it(PL);
+ __ Lsr(o_l, high, temp, PL);
+ __ Lsr(o_h, high, o_h);
+ }
} else {
- __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
- // same as Shr except we use `Lsr`s and not `Asr`s
- __ Lsr(o_l, low, o_h);
- __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ Lsl(temp, high, temp);
- __ orr(o_l, o_l, ShifterOperand(temp));
- __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
- __ it(PL);
- __ Lsr(o_l, high, temp, PL);
- __ Lsr(o_h, high, o_h);
+ // Register allocator doesn't create partial overlap.
+ DCHECK_NE(o_l, high);
+ DCHECK_NE(o_h, low);
+ int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
+ uint32_t shift_value = static_cast<uint32_t>(cst & kMaxLongShiftValue);
+ if (shift_value > 32) {
+ if (op->IsShl()) {
+ __ Lsl(o_h, low, shift_value - 32);
+ __ LoadImmediate(o_l, 0);
+ } else if (op->IsShr()) {
+ __ Asr(o_l, high, shift_value - 32);
+ __ Asr(o_h, high, 31);
+ } else {
+ __ Lsr(o_l, high, shift_value - 32);
+ __ LoadImmediate(o_h, 0);
+ }
+ } else if (shift_value == 32) {
+ if (op->IsShl()) {
+ __ mov(o_h, ShifterOperand(low));
+ __ LoadImmediate(o_l, 0);
+ } else if (op->IsShr()) {
+ __ mov(o_l, ShifterOperand(high));
+ __ Asr(o_h, high, 31);
+ } else {
+ __ mov(o_l, ShifterOperand(high));
+ __ LoadImmediate(o_h, 0);
+ }
+ } else { // shift_value < 32
+ if (op->IsShl()) {
+ __ Lsl(o_h, high, shift_value);
+ __ orr(o_h, o_h, ShifterOperand(low, LSR, 32 - shift_value));
+ __ Lsl(o_l, low, shift_value);
+ } else if (op->IsShr()) {
+ __ Lsr(o_l, low, shift_value);
+ __ orr(o_l, o_l, ShifterOperand(high, LSL, 32 - shift_value));
+ __ Asr(o_h, high, shift_value);
+ } else {
+ __ Lsr(o_l, low, shift_value);
+ __ orr(o_l, o_l, ShifterOperand(high, LSL, 32 - shift_value));
+ __ Lsr(o_h, high, shift_value);
+ }
+ }
}
break;
}
default:
LOG(FATAL) << "Unexpected operation type " << type;
+ UNREACHABLE();
}
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b36a04216d..9b78dec6c4 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2977,7 +2977,7 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
CodeGenerator::CreateLoadClassLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(A0));
+ calling_convention.GetReturnLocation(cls->GetType()));
}
void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 58c6e0fa83..ac3162f736 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -119,9 +119,12 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::RegisterLocation(V0);
}
- Location GetSetValueLocation(
- Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
- return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterLocation(A2)
+ : (is_instance
+ ? Location::RegisterLocation(A2)
+ : Location::RegisterLocation(A1));
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::FpuRegisterLocation(F0);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d5d6c210bf..0147b010f2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3024,7 +3024,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
DCHECK_EQ(EAX, first.AsRegister<Register>());
DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>());
- if (instruction->InputAt(1)->IsIntConstant()) {
+ if (second.IsConstant()) {
int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
if (imm == 0) {
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9754043f32..02e5dab3d4 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -123,20 +123,21 @@ void HDeadCodeElimination::RemoveDeadBlocks() {
}
// If we removed at least one block, we need to recompute the full
- // dominator tree.
+ // dominator tree and try block membership.
if (removed_one_or_more_blocks) {
graph_->ClearDominanceInformation();
graph_->ComputeDominanceInformation();
+ graph_->ComputeTryBlockInformation();
}
// Connect successive blocks created by dead branches. Order does not matter.
for (HReversePostOrderIterator it(*graph_); !it.Done();) {
HBasicBlock* block = it.Current();
- if (block->IsEntryBlock() || block->GetSuccessors().size() != 1u) {
+ if (block->IsEntryBlock() || !block->GetLastInstruction()->IsGoto()) {
it.Advance();
continue;
}
- HBasicBlock* successor = block->GetSuccessors()[0];
+ HBasicBlock* successor = block->GetSingleSuccessor();
if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
it.Advance();
continue;
@@ -176,10 +177,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() {
}
void HDeadCodeElimination::Run() {
- if (!graph_->HasTryCatch()) {
- // TODO: Update dead block elimination and enable for try/catch.
- RemoveDeadBlocks();
- }
+ RemoveDeadBlocks();
SsaRedundantPhiElimination(graph_).Run();
RemoveDeadInstructions();
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 0d7c796837..5814d7556f 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -163,12 +163,12 @@ void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) {
}
void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
- // Ensure that all exception handlers are catch blocks and that handlers
- // are not listed multiple times.
+ ArrayRef<HBasicBlock* const> handlers = try_boundary->GetExceptionHandlers();
+
+ // Ensure that all exception handlers are catch blocks.
// Note that a normal-flow successor may be a catch block before CFG
// simplification. We only test normal-flow successors in SsaChecker.
- for (HExceptionHandlerIterator it(*try_boundary); !it.Done(); it.Advance()) {
- HBasicBlock* handler = it.Current();
+ for (HBasicBlock* handler : handlers) {
if (!handler->IsCatchBlock()) {
AddError(StringPrintf("Block %d with %s:%d has exceptional successor %d which "
"is not a catch block.",
@@ -177,9 +177,13 @@ void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
try_boundary->GetId(),
handler->GetBlockId()));
}
- if (current_block_->HasSuccessor(handler, it.CurrentSuccessorIndex() + 1)) {
- AddError(StringPrintf("Exception handler block %d of %s:%d is listed multiple times.",
- handler->GetBlockId(),
+ }
+
+ // Ensure that handlers are not listed multiple times.
+ for (size_t i = 0, e = handlers.size(); i < e; ++i) {
+ if (ContainsElement(handlers, handlers[i], i + 1)) {
+ AddError(StringPrintf("Exception handler block %d of %s:%d is listed multiple times.",
+ handlers[i]->GetBlockId(),
try_boundary->DebugName(),
try_boundary->GetId()));
}
@@ -371,17 +375,14 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// Ensure that catch blocks are not normal successors, and normal blocks are
// never exceptional successors.
- const size_t num_normal_successors = block->NumberOfNormalSuccessors();
- for (size_t j = 0; j < num_normal_successors; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
if (successor->IsCatchBlock()) {
AddError(StringPrintf("Catch block %d is a normal successor of block %d.",
successor->GetBlockId(),
block->GetBlockId()));
}
}
- for (size_t j = num_normal_successors, e = block->GetSuccessors().size(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
+ for (HBasicBlock* successor : block->GetExceptionalSuccessors()) {
if (!successor->IsCatchBlock()) {
AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.",
successor->GetBlockId(),
@@ -393,10 +394,14 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// block with multiple successors to a block with multiple
// predecessors). Exceptional edges are synthesized and hence
// not accounted for.
- if (block->NumberOfNormalSuccessors() > 1) {
- for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
- if (successor->GetPredecessors().size() > 1) {
+ if (block->GetSuccessors().size() > 1) {
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
+ if (successor->IsExitBlock() &&
+ block->IsSingleTryBoundary() &&
+ block->GetPredecessors().size() == 1u &&
+ block->GetSinglePredecessor()->GetLastInstruction()->IsThrow()) {
+ // Allowed critical edge Throw->TryBoundary->Exit.
+ } else if (successor->GetPredecessors().size() > 1) {
AddError(StringPrintf("Critical edge between blocks %d and %d.",
block->GetBlockId(),
successor->GetBlockId()));
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 4111671a9b..2b7790184a 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -24,6 +24,7 @@
#include "code_generator.h"
#include "dead_code_elimination.h"
#include "disassembler.h"
+#include "inliner.h"
#include "licm.h"
#include "nodes.h"
#include "optimization.h"
@@ -252,8 +253,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintSuccessors(HBasicBlock* block) {
AddIndent();
output_ << "successors";
- for (size_t i = 0; i < block->NumberOfNormalSuccessors(); ++i) {
- HBasicBlock* successor = block->GetSuccessors()[i];
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
output_ << " \"B" << successor->GetBlockId() << "\" ";
}
output_<< std::endl;
@@ -262,8 +262,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void PrintExceptionHandlers(HBasicBlock* block) {
AddIndent();
output_ << "xhandlers";
- for (size_t i = block->NumberOfNormalSuccessors(); i < block->GetSuccessors().size(); ++i) {
- HBasicBlock* handler = block->GetSuccessors()[i];
+ for (HBasicBlock* handler : block->GetExceptionalSuccessors()) {
output_ << " \"B" << handler->GetBlockId() << "\" ";
}
if (block->IsExitBlock() &&
@@ -424,11 +423,6 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
return strcmp(pass_name_, name) == 0;
}
- bool IsReferenceTypePropagationPass() {
- return strstr(pass_name_, ReferenceTypePropagation::kReferenceTypePropagationPassName)
- != nullptr;
- }
-
void PrintInstruction(HInstruction* instruction) {
output_ << instruction->DebugName();
if (instruction->InputCount() > 0) {
@@ -492,7 +486,8 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
} else {
StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
}
- } else if (IsReferenceTypePropagationPass()
+ } else if ((IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName)
+ || IsPass(HInliner::kInlinerPassName))
&& (instruction->GetType() == Primitive::kPrimNot)) {
ReferenceTypeInfo info = instruction->IsLoadClass()
? instruction->AsLoadClass()->GetLoadedClassRTI()
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 353881e47a..0363f203b2 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -148,7 +148,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol
// the target method. Since we check above the exact type of the receiver,
// the only reason this can happen is an IncompatibleClassChangeError.
return nullptr;
- } else if (resolved_method->IsAbstract()) {
+ } else if (!resolved_method->IsInvokable()) {
// The information we had on the receiver was not enough to find
// the target method. Since we check above the exact type of the receiver,
// the only reason this can happen is an IncompatibleClassChangeError.
@@ -406,8 +406,8 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
&type_propagation,
&sharpening,
&simplify,
- &dce,
&fold,
+ &dce,
};
for (size_t i = 0; i < arraysize(optimizations); ++i) {
@@ -534,6 +534,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
ReferenceTypeInfo::Create(obj_handle, false /* is_exact */));
}
+ // Check the integrity of reference types and run another type propagation if needed.
if ((return_replacement != nullptr)
&& (return_replacement->GetType() == Primitive::kPrimNot)) {
if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
@@ -544,10 +545,20 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
DCHECK(return_replacement->IsPhi());
size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ReferenceTypeInfo::TypeHandle return_handle =
- handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
+ handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
return_handle, return_handle->CannotBeAssignedFromOtherTypes() /* is_exact */));
}
+
+ // If the return type is a refinement of the declared type run the type propagation again.
+ ReferenceTypeInfo return_rti = return_replacement->GetReferenceTypeInfo();
+ ReferenceTypeInfo invoke_rti = invoke_instruction->GetReferenceTypeInfo();
+ if (invoke_rti.IsStrictSupertypeOf(return_rti)
+ || (return_rti.IsExact() && !invoke_rti.IsExact())
+ || !return_replacement->CanBeNull()) {
+ ReferenceTypePropagation rtp_fixup(graph_, handles_);
+ rtp_fixup.Run();
+ }
}
return true;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 83f53d791b..73a44ee2cb 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -362,7 +362,11 @@ void HGraph::ComputeTryBlockInformation() {
HBasicBlock* first_predecessor = block->GetPredecessors()[0];
DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors();
- if (try_entry != nullptr) {
+ if (try_entry != nullptr &&
+ (block->GetTryCatchInformation() == nullptr ||
+ try_entry != &block->GetTryCatchInformation()->GetTryEntry())) {
+ // We are either setting try block membership for the first time or it
+ // has changed.
block->SetTryCatchInformation(new (arena_) TryCatchInformation(*try_entry));
}
}
@@ -381,8 +385,9 @@ void HGraph::SimplifyCFG() {
// Only split normal-flow edges. We cannot split exceptional edges as they
// are synthesized (approximate real control flow), and we do not need to
// anyway. Moves that would be inserted there are performed by the runtime.
- for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessors()[j];
+ ArrayRef<HBasicBlock* const> normal_successors = block->GetNormalSuccessors();
+ for (size_t j = 0, e = normal_successors.size(); j < e; ++j) {
+ HBasicBlock* successor = normal_successors[j];
DCHECK(!successor->IsCatchBlock());
if (successor == exit_block_) {
// Throw->TryBoundary->Exit. Special case which we do not want to split
@@ -391,7 +396,11 @@ void HGraph::SimplifyCFG() {
DCHECK(block->GetSinglePredecessor()->GetLastInstruction()->IsThrow());
} else if (successor->GetPredecessors().size() > 1) {
SplitCriticalEdge(block, successor);
- --j;
+ // SplitCriticalEdge could have invalidated the `normal_successors`
+ // ArrayRef. We must re-acquire it.
+ normal_successors = block->GetNormalSuccessors();
+ DCHECK_EQ(normal_successors[j]->GetSingleSuccessor(), successor);
+ DCHECK_EQ(e, normal_successors.size());
}
}
}
@@ -1086,6 +1095,8 @@ HConstant* HBinaryOperation::TryStaticEvaluation() const {
} else if (GetRight()->IsLongConstant()) {
return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsLongConstant());
}
+ } else if (GetLeft()->IsNullConstant() && GetRight()->IsNullConstant()) {
+ return Evaluate(GetLeft()->AsNullConstant(), GetRight()->AsNullConstant());
}
return nullptr;
}
@@ -1325,17 +1336,38 @@ bool HBasicBlock::HasSinglePhi() const {
return !GetPhis().IsEmpty() && GetFirstPhi()->GetNext() == nullptr;
}
+ArrayRef<HBasicBlock* const> HBasicBlock::GetNormalSuccessors() const {
+ if (EndsWithTryBoundary()) {
+ // The normal-flow successor of HTryBoundary is always stored at index zero.
+ DCHECK_EQ(successors_[0], GetLastInstruction()->AsTryBoundary()->GetNormalFlowSuccessor());
+ return ArrayRef<HBasicBlock* const>(successors_).SubArray(0u, 1u);
+ } else {
+ // All successors of blocks not ending with TryBoundary are normal.
+ return ArrayRef<HBasicBlock* const>(successors_);
+ }
+}
+
+ArrayRef<HBasicBlock* const> HBasicBlock::GetExceptionalSuccessors() const {
+ if (EndsWithTryBoundary()) {
+ return GetLastInstruction()->AsTryBoundary()->GetExceptionHandlers();
+ } else {
+ // Blocks not ending with TryBoundary do not have exceptional successors.
+ return ArrayRef<HBasicBlock* const>();
+ }
+}
+
bool HTryBoundary::HasSameExceptionHandlersAs(const HTryBoundary& other) const {
- if (GetBlock()->GetSuccessors().size() != other.GetBlock()->GetSuccessors().size()) {
+ ArrayRef<HBasicBlock* const> handlers1 = GetExceptionHandlers();
+ ArrayRef<HBasicBlock* const> handlers2 = other.GetExceptionHandlers();
+
+ size_t length = handlers1.size();
+ if (length != handlers2.size()) {
return false;
}
// Exception handlers need to be stored in the same order.
- for (HExceptionHandlerIterator it1(*this), it2(other);
- !it1.Done();
- it1.Advance(), it2.Advance()) {
- DCHECK(!it2.Done());
- if (it1.Current() != it2.Current()) {
+ for (size_t i = 0; i < length; ++i) {
+ if (handlers1[i] != handlers2[i]) {
return false;
}
}
@@ -1388,7 +1420,7 @@ void HBasicBlock::DisconnectAndDelete() {
// iteration.
DCHECK(dominated_blocks_.empty());
- // Remove the block from all loops it is included in.
+ // (1) Remove the block from all loops it is included in.
for (HLoopInformationOutwardIterator it(*this); !it.Done(); it.Advance()) {
HLoopInformation* loop_info = it.Current();
loop_info->Remove(this);
@@ -1400,17 +1432,34 @@ void HBasicBlock::DisconnectAndDelete() {
}
}
- // Disconnect the block from its predecessors and update their control-flow
- // instructions.
+ // (2) Disconnect the block from its predecessors and update their
+ // control-flow instructions.
for (HBasicBlock* predecessor : predecessors_) {
HInstruction* last_instruction = predecessor->GetLastInstruction();
+ if (last_instruction->IsTryBoundary() && !IsCatchBlock()) {
+ // This block is the only normal-flow successor of the TryBoundary which
+ // makes `predecessor` dead. Since DCE removes blocks in post order,
+ // exception handlers of this TryBoundary were already visited and any
+ // remaining handlers therefore must be live. We remove `predecessor` from
+ // their list of predecessors.
+ DCHECK_EQ(last_instruction->AsTryBoundary()->GetNormalFlowSuccessor(), this);
+ while (predecessor->GetSuccessors().size() > 1) {
+ HBasicBlock* handler = predecessor->GetSuccessors()[1];
+ DCHECK(handler->IsCatchBlock());
+ predecessor->RemoveSuccessor(handler);
+ handler->RemovePredecessor(predecessor);
+ }
+ }
+
predecessor->RemoveSuccessor(this);
uint32_t num_pred_successors = predecessor->GetSuccessors().size();
if (num_pred_successors == 1u) {
// If we have one successor after removing one, then we must have
- // had an HIf or HPackedSwitch, as they have more than one successor.
- // Replace those with a HGoto.
- DCHECK(last_instruction->IsIf() || last_instruction->IsPackedSwitch());
+ // had an HIf, HPackedSwitch or HTryBoundary, as they have more than one
+ // successor. Replace those with a HGoto.
+ DCHECK(last_instruction->IsIf() ||
+ last_instruction->IsPackedSwitch() ||
+ (last_instruction->IsTryBoundary() && IsCatchBlock()));
predecessor->RemoveInstruction(last_instruction);
predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc()));
} else if (num_pred_successors == 0u) {
@@ -1419,15 +1468,17 @@ void HBasicBlock::DisconnectAndDelete() {
// SSAChecker fails unless it is not removed during the pass too.
predecessor->RemoveInstruction(last_instruction);
} else {
- // There are multiple successors left. This must come from a HPackedSwitch
- // and we are in the middle of removing the HPackedSwitch. Like above, leave
- // this alone, and the SSAChecker will fail if it is not removed as well.
- DCHECK(last_instruction->IsPackedSwitch());
+ // There are multiple successors left. The removed block might be a successor
+ // of a PackedSwitch which will be completely removed (perhaps replaced with
+ // a Goto), or we are deleting a catch block from a TryBoundary. In either
+ // case, leave `last_instruction` as is for now.
+ DCHECK(last_instruction->IsPackedSwitch() ||
+ (last_instruction->IsTryBoundary() && IsCatchBlock()));
}
}
predecessors_.clear();
- // Disconnect the block from its successors and update their phis.
+ // (3) Disconnect the block from its successors and update their phis.
for (HBasicBlock* successor : successors_) {
// Delete this block from the list of predecessors.
size_t this_index = successor->GetPredecessorIndexOf(this);
@@ -1437,30 +1488,57 @@ void HBasicBlock::DisconnectAndDelete() {
// dominator of `successor` which violates the order DCHECKed at the top.
DCHECK(!successor->predecessors_.empty());
- // Remove this block's entries in the successor's phis.
- if (successor->predecessors_.size() == 1u) {
- // The successor has just one predecessor left. Replace phis with the only
- // remaining input.
- for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
- HPhi* phi = phi_it.Current()->AsPhi();
- phi->ReplaceWith(phi->InputAt(1 - this_index));
- successor->RemovePhi(phi);
- }
- } else {
- for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
- phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
+ // Remove this block's entries in the successor's phis. Skip exceptional
+ // successors because catch phi inputs do not correspond to predecessor
+ // blocks but throwing instructions. Their inputs will be updated in step (4).
+ if (!successor->IsCatchBlock()) {
+ if (successor->predecessors_.size() == 1u) {
+ // The successor has just one predecessor left. Replace phis with the only
+ // remaining input.
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* phi = phi_it.Current()->AsPhi();
+ phi->ReplaceWith(phi->InputAt(1 - this_index));
+ successor->RemovePhi(phi);
+ }
+ } else {
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
+ }
}
}
}
successors_.clear();
+ // (4) Remove instructions and phis. Instructions should have no remaining uses
+ // except in catch phis. If an instruction is used by a catch phi at `index`,
+ // remove `index`-th input of all phis in the catch block since they are
+ // guaranteed dead. Note that we may miss dead inputs this way but the
+ // graph will always remain consistent.
+ for (HBackwardInstructionIterator it(GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* insn = it.Current();
+ while (insn->HasUses()) {
+ DCHECK(IsTryBlock());
+ HUseListNode<HInstruction*>* use = insn->GetUses().GetFirst();
+ size_t use_index = use->GetIndex();
+ HBasicBlock* user_block = use->GetUser()->GetBlock();
+ DCHECK(use->GetUser()->IsPhi() && user_block->IsCatchBlock());
+ for (HInstructionIterator phi_it(user_block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ phi_it.Current()->AsPhi()->RemoveInputAt(use_index);
+ }
+ }
+
+ RemoveInstruction(insn);
+ }
+ for (HInstructionIterator it(GetPhis()); !it.Done(); it.Advance()) {
+ RemovePhi(it.Current()->AsPhi());
+ }
+
// Disconnect from the dominator.
dominator_->RemoveDominatedBlock(this);
SetDominator(nullptr);
- // Delete from the graph. The function safely deletes remaining instructions
- // and updates the reverse post order.
- graph_->DeleteDeadBlock(this);
+ // Delete from the graph, update reverse post order.
+ graph_->DeleteDeadEmptyBlock(this);
SetGraph(nullptr);
}
@@ -1507,7 +1585,7 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
other->predecessors_.clear();
// Delete `other` from the graph. The function updates reverse post order.
- graph_->DeleteDeadBlock(other);
+ graph_->DeleteDeadEmptyBlock(other);
other->SetGraph(nullptr);
}
@@ -1571,19 +1649,14 @@ static void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
}
-void HGraph::DeleteDeadBlock(HBasicBlock* block) {
+void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) {
DCHECK_EQ(block->GetGraph(), this);
DCHECK(block->GetSuccessors().empty());
DCHECK(block->GetPredecessors().empty());
DCHECK(block->GetDominatedBlocks().empty());
DCHECK(block->GetDominator() == nullptr);
-
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- block->RemoveInstruction(it.Current());
- }
- for (HBackwardInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- block->RemovePhi(it.Current()->AsPhi());
- }
+ DCHECK(block->GetInstructions().IsEmpty());
+ DCHECK(block->GetPhis().IsEmpty());
if (block->IsExitBlock()) {
exit_block_ = nullptr;
@@ -1686,6 +1759,9 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// (2) the reverse post order of that graph,
// (3) the potential loop information they are now in,
// (4) try block membership.
+ // Note that we do not need to update catch phi inputs because they
+ // correspond to the register file of the outer method which the inlinee
+ // cannot modify.
// We don't add the entry block, the exit block, and the first block, which
// has been merged with `at`.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4c4d0f268d..2878ac9899 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -35,6 +35,7 @@
#include "mirror/class.h"
#include "offsets.h"
#include "primitive.h"
+#include "utils/array_ref.h"
namespace art {
@@ -240,8 +241,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// put deoptimization instructions, etc.
void TransformLoopHeaderForBCE(HBasicBlock* header);
- // Removes `block` from the graph.
- void DeleteDeadBlock(HBasicBlock* block);
+ // Removes `block` from the graph. Assumes `block` has been disconnected from
+ // other blocks and has no instructions or phis.
+ void DeleteDeadEmptyBlock(HBasicBlock* block);
// Splits the edge between `block` and `successor` while preserving the
// indices in the predecessor/successor lists. If there are multiple edges
@@ -659,6 +661,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
return successors_;
}
+ ArrayRef<HBasicBlock* const> GetNormalSuccessors() const;
+ ArrayRef<HBasicBlock* const> GetExceptionalSuccessors() const;
+
bool HasSuccessor(const HBasicBlock* block, size_t start_from = 0u) {
return ContainsElement(successors_, block, start_from);
}
@@ -809,12 +814,6 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
return GetPredecessorIndexOf(predecessor) == idx;
}
- // Returns the number of non-exceptional successors. SsaChecker ensures that
- // these are stored at the beginning of the successor list.
- size_t NumberOfNormalSuccessors() const {
- return EndsWithTryBoundary() ? 1 : GetSuccessors().size();
- }
-
// Create a new block between this block and its predecessors. The new block
// is added to the graph, all predecessor edges are relinked to it and an edge
// is created to `this`. Returns the new empty block. Reverse post order or
@@ -1732,6 +1731,13 @@ class ReferenceTypeInfo : ValueObject {
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
+ bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ DCHECK(rti.IsValid());
+ return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
+ GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
+ }
+
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
@@ -2397,6 +2403,10 @@ class HTryBoundary : public HTemplateInstruction<0> {
// Returns the block's non-exceptional successor (index zero).
HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
+ ArrayRef<HBasicBlock* const> GetExceptionHandlers() const {
+ return ArrayRef<HBasicBlock* const>(GetBlock()->GetSuccessors()).SubArray(1u);
+ }
+
// Returns whether `handler` is among its exception handlers (non-zero index
// successors).
bool HasExceptionHandler(const HBasicBlock& handler) const {
@@ -2424,25 +2434,6 @@ class HTryBoundary : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HTryBoundary);
};
-// Iterator over exception handlers of a given HTryBoundary, i.e. over
-// exceptional successors of its basic block.
-class HExceptionHandlerIterator : public ValueObject {
- public:
- explicit HExceptionHandlerIterator(const HTryBoundary& try_boundary)
- : block_(*try_boundary.GetBlock()), index_(block_.NumberOfNormalSuccessors()) {}
-
- bool Done() const { return index_ == block_.GetSuccessors().size(); }
- HBasicBlock* Current() const { return block_.GetSuccessors()[index_]; }
- size_t CurrentSuccessorIndex() const { return index_; }
- void Advance() { ++index_; }
-
- private:
- const HBasicBlock& block_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HExceptionHandlerIterator);
-};
-
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize : public HTemplateInstruction<1> {
public:
@@ -2611,6 +2602,11 @@ class HBinaryOperation : public HExpression<2> {
VLOG(compiler) << DebugName() << " is not defined for the (long, int) case.";
return nullptr;
}
+ virtual HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
+ HNullConstant* y ATTRIBUTE_UNUSED) const {
+ VLOG(compiler) << DebugName() << " is not defined for the (null, null) case.";
+ return nullptr;
+ }
// Returns an input that can legally be used as the right input and is
// constant, or null.
@@ -2701,6 +2697,10 @@ class HEqual : public HCondition {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
+ HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(1);
+ }
DECLARE_INSTRUCTION(Equal);
@@ -2733,6 +2733,10 @@ class HNotEqual : public HCondition {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
+ HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(0);
+ }
DECLARE_INSTRUCTION(NotEqual);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6e85a82849..2be0680561 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -405,20 +405,9 @@ static void MaybeRunInliner(HGraph* graph,
if (!should_inline) {
return;
}
-
- ArenaAllocator* arena = graph->GetArena();
- HInliner* inliner = new (arena) HInliner(
+ HInliner* inliner = new (graph->GetArena()) HInliner(
graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
- ReferenceTypePropagation* type_propagation =
- new (arena) ReferenceTypePropagation(graph, handles,
- "reference_type_propagation_after_inlining");
-
- HOptimization* optimizations[] = {
- inliner,
- // Run another type propagation phase: inlining will open up more opportunities
- // to remove checkcast/instanceof and null checks.
- type_propagation,
- };
+ HOptimization* optimizations[] = { inliner };
RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
}
@@ -530,6 +519,7 @@ static void RunOptimizations(HGraph* graph,
// pipeline for all methods.
if (graph->HasTryCatch()) {
HOptimization* optimizations2[] = {
+ boolean_simplify,
side_effects,
gvn,
dce2,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 659da068a9..ecc085b985 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -99,17 +99,9 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
}
}
-void ReferenceTypePropagation::Run() {
- // To properly propagate type info we need to visit in the dominator-based order.
- // Reverse post order guarantees a node's dominators are visited first.
- // We take advantage of this order in `VisitBasicBlock`.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
- }
- ProcessWorklist();
-
+void ReferenceTypePropagation::ValidateTypes() {
+ // TODO: move this to the graph checker.
if (kIsDebugBuild) {
- // TODO: move this to the graph checker.
ScopedObjectAccess soa(Thread::Current());
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
@@ -135,6 +127,18 @@ void ReferenceTypePropagation::Run() {
}
}
+void ReferenceTypePropagation::Run() {
+ // To properly propagate type info we need to visit in the dominator-based order.
+ // Reverse post order guarantees a node's dominators are visited first.
+ // We take advantage of this order in `VisitBasicBlock`.
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ VisitBasicBlock(it.Current());
+ }
+
+ ProcessWorklist();
+ ValidateTypes();
+}
+
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
RTPVisitor visitor(graph_,
handles_,
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 5493601adc..5c05592726 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -56,6 +56,8 @@ class ReferenceTypePropagation : public HOptimization {
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void ValidateTypes();
+
StackHandleScopeCollection* handles_;
ArenaVector<HInstruction*> worklist_;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index ef22c816a0..d399bc2d7a 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1525,7 +1525,7 @@ void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
DCHECK(IsValidDestination(destination)) << destination;
if (source.Equals(destination)) return;
- DCHECK_EQ(block->NumberOfNormalSuccessors(), 1u);
+ DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
HInstruction* last = block->GetLastInstruction();
// We insert moves at exit for phi predecessors and connecting blocks.
// A block ending with an if or a packed switch cannot branch to a block
@@ -1752,7 +1752,7 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
// If `from` has only one successor, we can put the moves at the exit of it. Otherwise
// we need to put the moves at the entry of `to`.
- if (from->NumberOfNormalSuccessors() == 1) {
+ if (from->GetNormalSuccessors().size() == 1) {
InsertParallelMoveAtExitOf(from,
interval->GetParent()->GetDefinedBy(),
source->ToLocation(),
@@ -1894,7 +1894,7 @@ void RegisterAllocator::Resolve() {
HInstruction* phi = inst_it.Current();
for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
HBasicBlock* predecessor = current->GetPredecessors()[i];
- DCHECK_EQ(predecessor->NumberOfNormalSuccessors(), 1u);
+ DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
HInstruction* input = phi->InputAt(i);
Location source = input->GetLiveInterval()->GetLocationAt(
predecessor->GetLifetimeEnd() - 1);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 4565590bc3..5190eb3b26 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -660,8 +660,7 @@ void SsaBuilder::VisitInstruction(HInstruction* instruction) {
if (instruction->CanThrowIntoCatchBlock()) {
const HTryBoundary& try_entry =
instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry();
- for (HExceptionHandlerIterator it(try_entry); !it.Done(); it.Advance()) {
- HBasicBlock* catch_block = it.Current();
+ for (HBasicBlock* catch_block : try_entry.GetExceptionHandlers()) {
ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
DCHECK_EQ(handler_locals->size(), current_locals_->size());
for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {