summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/common_compiler_test.cc1
-rw-r--r--compiler/dex/verification_results.cc2
-rw-r--r--compiler/dex/verified_method.cc5
-rw-r--r--compiler/driver/compiled_method_storage_test.cc1
-rw-r--r--compiler/driver/compiler_driver.cc39
-rw-r--r--compiler/driver/compiler_driver.h2
-rw-r--r--compiler/image_writer.cc229
-rw-r--r--compiler/image_writer.h24
-rw-r--r--compiler/jit/jit_compiler.cc3
-rw-r--r--compiler/jni/jni_compiler_test.cc13
-rw-r--r--compiler/linker/relative_patcher_test.h1
-rw-r--r--compiler/oat_test.cc1
-rw-r--r--compiler/oat_writer.cc14
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc11
-rw-r--r--compiler/optimizing/code_generator_arm.cc4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc16
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/graph_checker.cc9
-rw-r--r--compiler/optimizing/graph_checker.h1
-rw-r--r--compiler/optimizing/inliner.cc49
-rw-r--r--compiler/optimizing/intrinsics.h4
-rw-r--r--compiler/optimizing/intrinsics_arm.cc52
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc41
-rw-r--r--compiler/optimizing/intrinsics_mips.cc4
-rw-r--r--compiler/optimizing/nodes.cc1
-rw-r--r--compiler/optimizing/reference_type_propagation.cc30
-rw-r--r--compiler/optimizing/sharpening.cc10
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h33
29 files changed, 412 insertions, 207 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index f75a252df2..bf29e1c31d 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -180,6 +180,7 @@ void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind,
isa,
instruction_set_features_.get(),
/* boot_image */ true,
+ /* app_image */ false,
GetImageClasses(),
GetCompiledClasses(),
GetCompiledMethods(),
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 1491a183f1..606302bd78 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -60,7 +60,7 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: "
<< PrettyMethod(ref.dex_method_index, *ref.dex_file);
- if (!Runtime::Current()->UseJit()) {
+ if (!Runtime::Current()->UseJitCompilation()) {
DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
}
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 5c0253c29e..bace014713 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -54,7 +54,8 @@ const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_ve
}
// Only need dequicken info for JIT so far.
- if (Runtime::Current()->UseJit() && !verified_method->GenerateDequickenMap(method_verifier)) {
+ if (Runtime::Current()->UseJitCompilation() &&
+ !verified_method->GenerateDequickenMap(method_verifier)) {
return nullptr;
}
}
@@ -72,7 +73,7 @@ const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const {
}
const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
auto it = dequicken_map_.find(dex_pc);
return (it != dequicken_map_.end()) ? &it->second : nullptr;
}
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 9e0c22c68c..6863f42d11 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -36,6 +36,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
/* instruction_set_ */ kNone,
/* instruction_set_features */ nullptr,
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index be82956e76..1ab1d31f09 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -341,6 +341,7 @@ CompilerDriver::CompilerDriver(
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool boot_image,
+ bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -363,6 +364,7 @@ CompilerDriver::CompilerDriver(
compiled_methods_(MethodTable::key_compare()),
non_relative_linker_patch_count_(0u),
boot_image_(boot_image),
+ app_image_(app_image),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
methods_to_compile_(compiled_methods),
@@ -473,7 +475,7 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
const DexFile& dex_file, const DexFile::ClassDef& class_def)
SHARED_REQUIRES(Locks::mutator_lock_) {
auto* const runtime = Runtime::Current();
- if (runtime->UseJit() || driver.GetCompilerOptions().VerifyAtRuntime()) {
+ if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) {
// Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
}
@@ -945,7 +947,7 @@ bool CompilerDriver::ShouldVerifyClassBasedOnProfile(const DexFile& dex_file,
class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
- ResolveCatchBlockExceptionsClassVisitor(
+ explicit ResolveCatchBlockExceptionsClassVisitor(
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
: exceptions_to_resolve_(exceptions_to_resolve) {}
@@ -1268,7 +1270,7 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) {
Runtime* runtime = Runtime::Current();
if (!runtime->IsAotCompiler()) {
- DCHECK(runtime->UseJit());
+ DCHECK(runtime->UseJitCompilation());
// Having the klass reference here implies that the klass is already loaded.
return true;
}
@@ -1289,7 +1291,7 @@ bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> d
if ((IsBootImage() &&
IsImageClass(dex_cache->GetDexFile()->StringDataByIdx(
dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) ||
- Runtime::Current()->UseJit()) {
+ Runtime::Current()->UseJitCompilation()) {
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
result = (resolved_class != nullptr);
}
@@ -1307,7 +1309,7 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
// See also Compiler::ResolveDexFile
bool result = false;
- if (IsBootImage() || Runtime::Current()->UseJit()) {
+ if (IsBootImage() || Runtime::Current()->UseJitCompilation()) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -1319,7 +1321,7 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
result = true;
} else {
// Just check whether the dex cache already has the string.
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
result = (dex_cache->GetResolvedString(string_idx) != nullptr);
}
}
@@ -1427,7 +1429,7 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
} else {
return false;
}
- } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) {
+ } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) {
*is_type_initialized = resolved_class->IsInitialized();
// If the class may move around, then don't embed it as a direct pointer.
*use_direct_type_ptr = true;
@@ -1604,7 +1606,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
}
}
}
- if (runtime->UseJit()) {
+ if (runtime->UseJitCompilation()) {
// If we are the JIT, then don't allow a direct call to the interpreter bridge since this will
// never be updated even after we compile the method.
if (cl->IsQuickToInterpreterBridge(
@@ -1636,7 +1638,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
bool must_use_direct_pointers = false;
mirror::DexCache* dex_cache = declaring_class->GetDexCache();
if (target_method->dex_file == dex_cache->GetDexFile() &&
- !(runtime->UseJit() && dex_cache->GetResolvedMethod(
+ !(runtime->UseJitCompilation() && dex_cache->GetResolvedMethod(
method->GetDexMethodIndex(), pointer_size) == nullptr)) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
@@ -1673,7 +1675,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
break;
}
}
- if (method_in_image || compiling_boot || runtime->UseJit()) {
+ if (method_in_image || compiling_boot || runtime->UseJitCompilation()) {
// We know we must be able to get to the method in the image, so use that pointer.
// In the case where we are the JIT, we can always use direct pointers since we know where
// the method and its code are / will be. We don't sharpen to interpreter bridge since we
@@ -2440,9 +2442,12 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader,
context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count);
}
-class InitializeArrayClassVisitor : public ClassVisitor {
+class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
public:
virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
+ return true;
+ }
if (klass->IsArrayClass()) {
StackHandleScope<1> hs(Thread::Current());
Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(),
@@ -2450,6 +2455,10 @@ class InitializeArrayClassVisitor : public ClassVisitor {
true,
true);
}
+ // Create the conflict tables.
+ if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) {
+ Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass);
+ }
return true;
}
};
@@ -2462,13 +2471,15 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
CHECK(dex_file != nullptr);
InitializeClasses(class_loader, *dex_file, dex_files, timings);
}
- {
+ if (boot_image_ || app_image_) {
// Make sure that we call EnsureIntiailized on all the array classes to call
// SetVerificationAttempted so that the access flags are set. If we do not do this they get
// changed at runtime resulting in more dirty image pages.
+ // Also create conflict tables.
+ // Only useful if we are compiling an image (image_classes_ is not null).
ScopedObjectAccess soa(Thread::Current());
- InitializeArrayClassVisitor visitor;
- Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+ InitializeArrayClassesAndCreateConflictTablesVisitor visitor;
+ Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor);
}
if (IsBootImage()) {
// Prune garbage objects created during aborted transactions.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index d63dffa49a..19a1ecc494 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -92,6 +92,7 @@ class CompilerDriver {
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool boot_image,
+ bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -652,6 +653,7 @@ class CompilerDriver {
size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_);
const bool boot_image_;
+ const bool app_image_;
// If image_ is true, specifies the classes that will be included in the image.
// Note if image_classes_ is null, all classes are included in the image.
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 8bb462c667..00ff522c9a 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -653,8 +653,7 @@ bool ImageWriter::AllocMemory() {
for (ImageInfo& image_info : image_infos_) {
ImageSection unused_sections[ImageHeader::kSectionCount];
const size_t length = RoundUp(
- image_info.CreateImageSections(target_ptr_size_, unused_sections),
- kPageSize);
+ image_info.CreateImageSections(unused_sections), kPageSize);
std::string error_msg;
image_info.image_.reset(MemMap::MapAnonymous("image writer image",
@@ -1214,6 +1213,20 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
AssignMethodOffset(&m, type, oat_index);
}
(any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
+
+ // Assign offsets for all runtime methods in the IMT since these may hold conflict tables
+ // live.
+ if (as_klass->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_);
+ DCHECK(imt_method != nullptr);
+ if (imt_method->IsRuntimeMethod() &&
+ !IsInBootImage(imt_method) &&
+ !NativeRelocationAssigned(imt_method)) {
+ AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index);
+ }
+ }
+ }
}
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
@@ -1237,13 +1250,37 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
}
+bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
+ return native_object_relocations_.find(ptr) != native_object_relocations_.end();
+}
+
+void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) {
+ // No offset, or already assigned.
+ if (table == nullptr || NativeRelocationAssigned(table)) {
+ return;
+ }
+ CHECK(!IsInBootImage(table));
+ // If the method is a conflict method we also want to assign the conflict table offset.
+ ImageInfo& image_info = GetImageInfo(oat_index);
+ const size_t size = table->ComputeSize(target_ptr_size_);
+ native_object_relocations_.emplace(
+ table,
+ NativeObjectRelocation {
+ oat_index,
+ image_info.bin_slot_sizes_[kBinIMTConflictTable],
+ kNativeObjectRelocationTypeIMTConflictTable});
+ image_info.bin_slot_sizes_[kBinIMTConflictTable] += size;
+}
+
void ImageWriter::AssignMethodOffset(ArtMethod* method,
NativeObjectRelocationType type,
size_t oat_index) {
DCHECK(!IsInBootImage(method));
- auto it = native_object_relocations_.find(method);
- CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned "
+ CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned "
<< PrettyMethod(method);
+ if (method->IsRuntimeMethod()) {
+ TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index);
+ }
ImageInfo& image_info = GetImageInfo(oat_index);
size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type });
@@ -1292,8 +1329,7 @@ void ImageWriter::CalculateNewObjectOffsets() {
// know where image_roots is going to end up
image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
- heap->VisitObjects(WalkFieldsCallback, this);
+ const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
// Write the image runtime methods.
image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod();
image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
@@ -1303,31 +1339,19 @@ void ImageWriter::CalculateNewObjectOffsets() {
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
- // Add room for fake length prefixed array for holding the image methods.
- const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean;
- auto it = native_object_relocations_.find(&image_method_array_);
- CHECK(it == native_object_relocations_.end());
- ImageInfo& default_image_info = GetImageInfo(GetDefaultOatIndex());
- size_t& offset =
- default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
- if (!compile_app_image_) {
- native_object_relocations_.emplace(&image_method_array_,
- NativeObjectRelocation { GetDefaultOatIndex(), offset, image_method_type });
- }
- size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
- const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
- 0, ArtMethod::Size(target_ptr_size_), method_alignment);
- CHECK_ALIGNED_PARAM(array_size, method_alignment);
- offset += array_size;
+ // Visit image methods first to have the main runtime methods in the first image.
for (auto* m : image_methods_) {
CHECK(m != nullptr);
CHECK(m->IsRuntimeMethod());
DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image";
if (!IsInBootImage(m)) {
- AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, GetDefaultOatIndex());
+ AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex());
}
}
+
+ // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
+ heap->VisitObjects(WalkFieldsCallback, this);
+
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
@@ -1346,15 +1370,22 @@ void ImageWriter::CalculateNewObjectOffsets() {
for (ImageInfo& image_info : image_infos_) {
size_t bin_offset = image_objects_offset_begin_;
for (size_t i = 0; i != kBinSize; ++i) {
+ switch (i) {
+ case kBinArtMethodClean:
+ case kBinArtMethodDirty: {
+ bin_offset = RoundUp(bin_offset, method_alignment);
+ break;
+ }
+ case kBinIMTConflictTable: {
+ bin_offset = RoundUp(bin_offset, target_ptr_size_);
+ break;
+ }
+ default: {
+ // Normal alignment.
+ }
+ }
image_info.bin_slot_offsets_[i] = bin_offset;
bin_offset += image_info.bin_slot_sizes_[i];
- if (i == kBinArtField) {
- static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
- static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
- DCHECK_ALIGNED(bin_offset, 4u);
- DCHECK(method_alignment == 4u || method_alignment == 8u);
- bin_offset = RoundUp(bin_offset, method_alignment);
- }
}
// NOTE: There may be additional padding between the bin slots and the intern table.
DCHECK_EQ(image_info.image_end_,
@@ -1367,9 +1398,7 @@ void ImageWriter::CalculateNewObjectOffsets() {
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
ImageSection unused_sections[ImageHeader::kSectionCount];
- image_info.image_size_ = RoundUp(
- image_info.CreateImageSections(target_ptr_size_, unused_sections),
- kPageSize);
+ image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
// There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -1396,42 +1425,52 @@ void ImageWriter::CalculateNewObjectOffsets() {
// Note that image_info.image_end_ is left at end of used mirror object section.
}
-size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size,
- ImageSection* out_sections) const {
+size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
DCHECK(out_sections != nullptr);
+
+ // Do not round up any sections here that are represented by the bins since it will break
+ // offsets.
+
// Objects section
- auto* objects_section = &out_sections[ImageHeader::kSectionObjects];
+ ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects];
*objects_section = ImageSection(0u, image_end_);
- size_t cur_pos = objects_section->End();
+
// Add field section.
- auto* field_section = &out_sections[ImageHeader::kSectionArtFields];
- *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
+ ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields];
+ *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]);
CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
- cur_pos = field_section->End();
- // Round up to the alignment the required by the method section.
- cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size));
+
// Add method section.
- auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
- *methods_section = ImageSection(cur_pos,
- bin_slot_sizes_[kBinArtMethodClean] +
- bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
- cur_pos = methods_section->End();
+ ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
+ *methods_section = ImageSection(
+ bin_slot_offsets_[kBinArtMethodClean],
+ bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]);
+
+ // Conflict tables section.
+ ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
+ *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable],
+ bin_slot_sizes_[kBinIMTConflictTable]);
+
+ // Runtime methods section.
+ ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods];
+ *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod],
+ bin_slot_sizes_[kBinRuntimeMethod]);
+
// Add dex cache arrays section.
- auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
- *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
- CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
- cur_pos = dex_cache_arrays_section->End();
+ ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
+ *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray],
+ bin_slot_sizes_[kBinDexCacheArray]);
+
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
- cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
+ size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
// Calculate the size of the interned strings.
- auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
+ ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
*interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
cur_pos = interned_strings_section->End();
// Round up to the alignment the class table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the class table section.
- auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
+ ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
*class_table_section = ImageSection(cur_pos, class_table_bytes_);
cur_pos = class_table_section->End();
// Image end goes right before the start of the image bitmap.
@@ -1446,7 +1485,7 @@ void ImageWriter::CreateHeader(size_t oat_index) {
// Create the image sections.
ImageSection sections[ImageHeader::kSectionCount];
- const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections);
+ const size_t image_end = image_info.CreateImageSections(sections);
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
@@ -1531,8 +1570,20 @@ class FixupRootVisitor : public RootVisitor {
ImageWriter* const image_writer_;
};
+void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
+ const size_t count = orig->NumEntries(target_ptr_size_);
+ for (size_t i = 0; i < count; ++i) {
+ ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
+ ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
+ copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method));
+ copy->SetImplementationMethod(i,
+ target_ptr_size_,
+ NativeLocationInImage(implementation_method));
+ }
+}
+
void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
- ImageInfo& image_info = GetImageInfo(oat_index);
+ const ImageInfo& image_info = GetImageInfo(oat_index);
// Copy ArtFields and methods to their locations and update the array for convenience.
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
@@ -1550,6 +1601,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
break;
}
+ case kNativeObjectRelocationTypeRuntimeMethod:
case kNativeObjectRelocationTypeArtMethodClean:
case kNativeObjectRelocationTypeArtMethodDirty: {
CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
@@ -1575,26 +1627,22 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
case kNativeObjectRelocationTypeDexCacheArray:
// Nothing to copy here, everything is done in FixupDexCache().
break;
+ case kNativeObjectRelocationTypeIMTConflictTable: {
+ auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
+ CopyAndFixupImtConflictTable(
+ orig_table,
+ new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
+ break;
+ }
}
}
// Fixup the image method roots.
auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
- const ImageSection& methods_section = image_header->GetMethodsSection();
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
- // Only place runtime methods in the image of the default oat file.
- if (method->IsRuntimeMethod() && oat_index != GetDefaultOatIndex()) {
- continue;
- }
if (!IsInBootImage(method)) {
- auto it = native_object_relocations_.find(method);
- CHECK(it != native_object_relocations_.end()) << "No forwarding for " << PrettyMethod(method);
- NativeObjectRelocation& relocation = it->second;
- CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in "
- << methods_section;
- CHECK(relocation.IsArtMethodRelocation()) << relocation.type;
- method = reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset);
+ method = NativeLocationInImage(method);
}
image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), method);
}
@@ -2057,24 +2105,28 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
- if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
- } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
- orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
- } else if (UNLIKELY(orig->IsRuntimeMethod())) {
- bool found_one = false;
- for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
- auto idx = static_cast<Runtime::CalleeSaveType>(i);
- if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
- found_one = true;
- break;
+ if (orig->IsRuntimeMethod()) {
+ ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_);
+ if (orig_table != nullptr) {
+ // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
+ GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
+ copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_);
+ } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
+ GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
+ } else {
+ bool found_one = false;
+ for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
+ auto idx = static_cast<Runtime::CalleeSaveType>(i);
+ if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
+ found_one = true;
+ break;
+ }
}
+ CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
+ CHECK(copy->IsRuntimeMethod());
}
- CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
- CHECK(copy->IsRuntimeMethod());
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
@@ -2141,6 +2193,10 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat
return kBinArtMethodDirty;
case kNativeObjectRelocationTypeDexCacheArray:
return kBinDexCacheArray;
+ case kNativeObjectRelocationTypeRuntimeMethod:
+ return kBinRuntimeMethod;
+ case kNativeObjectRelocationTypeIMTConflictTable:
+ return kBinIMTConflictTable;
}
UNREACHABLE();
}
@@ -2242,7 +2298,6 @@ ImageWriter::ImageWriter(
compile_app_image_(compile_app_image),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
image_infos_(oat_filenames.size()),
- image_method_array_(ImageHeader::kImageMethodsCount),
dirty_methods_(0u),
clean_methods_(0u),
image_storage_mode_(image_storage_mode),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 0cb6aea9b2..51976c511f 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -169,6 +169,10 @@ class ImageWriter FINAL {
// ArtMethods may be dirty if the class has native methods or a declaring class that isn't
// initialized.
kBinArtMethodDirty,
+ // Conflict tables (clean).
+ kBinIMTConflictTable,
+ // Runtime methods (always clean, do not have a length prefix array).
+ kBinRuntimeMethod,
// Dex cache arrays have a special slot for PC-relative addressing. Since they are
// huge, and as such their dirtiness is not important for the clean/dirty separation,
// we arbitrarily keep them at the end of the native data.
@@ -186,6 +190,8 @@ class ImageWriter FINAL {
kNativeObjectRelocationTypeArtMethodArrayClean,
kNativeObjectRelocationTypeArtMethodDirty,
kNativeObjectRelocationTypeArtMethodArrayDirty,
+ kNativeObjectRelocationTypeRuntimeMethod,
+ kNativeObjectRelocationTypeIMTConflictTable,
kNativeObjectRelocationTypeDexCacheArray,
};
friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
@@ -240,7 +246,7 @@ class ImageWriter FINAL {
// Create the image sections into the out sections variable, returns the size of the image
// excluding the bitmap.
- size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const;
+ size_t CreateImageSections(ImageSection* out_sections) const;
std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
@@ -395,6 +401,8 @@ class ImageWriter FINAL {
void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
@@ -425,6 +433,11 @@ class ImageWriter FINAL {
size_t oat_index)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
+ // relocation.
+ void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Return true if klass is loaded by the boot class loader but not in the boot image.
bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -481,6 +494,9 @@ class ImageWriter FINAL {
// remove duplicates in the multi image and app image case.
mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return true if there already exists a native allocation for an object.
+ bool NativeRelocationAssigned(void* ptr) const;
+
const CompilerDriver& compiler_driver_;
// Beginning target image address for the first image.
@@ -517,16 +533,14 @@ class ImageWriter FINAL {
bool IsArtMethodRelocation() const {
return type == kNativeObjectRelocationTypeArtMethodClean ||
- type == kNativeObjectRelocationTypeArtMethodDirty;
+ type == kNativeObjectRelocationTypeArtMethodDirty ||
+ type == kNativeObjectRelocationTypeRuntimeMethod;
}
};
std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
// Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
- // Fake length prefixed array for image methods. This array does not contain the actual
- // ArtMethods. We only use it for the header and relocation addresses.
- LengthPrefixedArray<ArtMethod> image_method_array_;
// Counters for measurements, used for logging only.
uint64_t dirty_methods_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 5de9842d90..c2d7ff7795 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -155,7 +155,8 @@ JitCompiler::JitCompiler() {
Compiler::kOptimizing,
instruction_set,
instruction_set_features_.get(),
- /* image */ false,
+ /* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index cf836a9c9f..251dc39864 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -31,6 +31,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "mirror/stack_trace_element.h"
+#include "nativeloader/native_loader.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
@@ -53,6 +54,11 @@ class JniCompilerTest : public CommonCompilerTest {
check_generic_jni_ = false;
}
+ void TearDown() OVERRIDE {
+ android::ResetNativeLoader();
+ CommonCompilerTest::TearDown();
+ }
+
void SetCheckGenericJni(bool generic) {
check_generic_jni_ = generic;
}
@@ -93,10 +99,12 @@ class JniCompilerTest : public CommonCompilerTest {
// Start runtime.
Thread::Current()->TransitionFromSuspendedToRunnable();
bool started = runtime_->Start();
+ android::InitializeNativeLoader();
CHECK(started);
}
// JNI operations after runtime start.
env_ = Thread::Current()->GetJniEnv();
+ library_search_path_ = env_->NewStringUTF("");
jklass_ = env_->FindClass("MyClassNatives");
ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig;
@@ -168,6 +176,7 @@ class JniCompilerTest : public CommonCompilerTest {
void StackArgsSignExtendedMips64Impl();
JNIEnv* env_;
+ jstring library_search_path_;
jmethodID jmethod_;
bool check_generic_jni_;
};
@@ -220,7 +229,7 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason))
<< reason;
jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
@@ -235,7 +244,7 @@ void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason))
<< reason;
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index c07de79984..ec69107d8f 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -51,6 +51,7 @@ class RelativePatcherTest : public testing::Test {
instruction_set,
/* instruction_set_features*/ nullptr,
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 73b16d5b46..5b192846ba 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -112,6 +112,7 @@ class OatTest : public CommonCompilerTest {
insn_set,
insn_features_.get(),
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index e804beef0d..8da9f06dd9 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1127,17 +1127,23 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return target_offset;
}
- mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile())
+ mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ return (target_dex_file == dex_file_)
? dex_cache_
- : class_linker_->FindDexCache(Thread::Current(), *patch.TargetTypeDexFile());
+ : class_linker_->FindDexCache(Thread::Current(), *target_dex_file);
+ }
+
+ mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
CHECK(type != nullptr);
return type;
}
mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::String* string = dex_cache_->GetResolvedString(patch.TargetStringIndex());
+ mirror::DexCache* dex_cache = GetDexCache(patch.TargetStringDexFile());
+ mirror::String* string = dex_cache->GetResolvedString(patch.TargetStringIndex());
DCHECK(string != nullptr);
DCHECK(writer_->HasBootImage() ||
Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string));
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 659c6f8497..b65e98a120 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -552,7 +552,11 @@ class BCEVisitor : public HGraphVisitor {
DCHECK(!IsAddedBlock(block));
first_index_bounds_check_map_.clear();
HGraphVisitor::VisitBasicBlock(block);
- AddComparesWithDeoptimization(block);
+ // We should never deoptimize from an osr method, otherwise we might wrongly optimize
+ // code dominated by the deoptimization.
+ if (!GetGraph()->IsCompilingOsr()) {
+ AddComparesWithDeoptimization(block);
+ }
}
void Finish() {
@@ -1358,6 +1362,11 @@ class BCEVisitor : public HGraphVisitor {
if (loop->IsIrreducible()) {
return false;
}
+ // We should never deoptimize from an osr method, otherwise we might wrongly optimize
+ // code dominated by the deoptimization.
+ if (GetGraph()->IsCompilingOsr()) {
+ return false;
+ }
// A try boundary preheader is hard to handle.
// TODO: remove this restriction.
if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 45d23fe516..197e473473 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5183,10 +5183,10 @@ HLoadString::LoadKind CodeGeneratorARM::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e8e6b68975..9680f2bf45 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4010,10 +4010,10 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e73e880308..6dc480bbee 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3303,17 +3303,6 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation
int shift;
CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
- NearLabel ndiv;
- NearLabel end;
- // If numerator is 0, the result is 0, no computation needed.
- __ testl(eax, eax);
- __ j(kNotEqual, &ndiv);
-
- __ xorl(out, out);
- __ jmp(&end);
-
- __ Bind(&ndiv);
-
// Save the numerator.
__ movl(num, eax);
@@ -3348,7 +3337,6 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation
} else {
__ movl(eax, edx);
}
- __ Bind(&end);
}
void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instruction) {
@@ -5977,7 +5965,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
DCHECK(GetCompilerOptions().GetCompilePic());
FALLTHROUGH_INTENDED;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit()); // Note: boot image is also non-JIT.
+ DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many X86ComputeBaseMethodAddress instructions as needed for methods
@@ -5989,7 +5977,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5576d839c3..96ec09c2a8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3390,16 +3390,6 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat
__ movl(numerator, eax);
- NearLabel no_div;
- NearLabel end;
- __ testl(eax, eax);
- __ j(kNotEqual, &no_div);
-
- __ xorl(out, out);
- __ jmp(&end);
-
- __ Bind(&no_div);
-
__ movl(eax, Immediate(magic));
__ imull(numerator);
@@ -3425,7 +3415,6 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat
} else {
__ movl(eax, edx);
}
- __ Bind(&end);
} else {
int64_t imm = second.GetConstant()->AsLongConstant()->GetValue();
@@ -5413,10 +5402,10 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 96837a8266..968e26724d 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -258,6 +258,15 @@ void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) {
VisitInstruction(check);
}
+void GraphChecker::VisitDeoptimize(HDeoptimize* deopt) {
+ if (GetGraph()->IsCompilingOsr()) {
+ AddError(StringPrintf("A graph compiled OSR cannot have a HDeoptimize instruction"));
+ }
+
+ // Perform the instruction base checks too.
+ VisitInstruction(deopt);
+}
+
void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
ArrayRef<HBasicBlock* const> handlers = try_boundary->GetExceptionHandlers();
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 83b198474c..3060c80073 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -57,6 +57,7 @@ class GraphChecker : public HGraphDelegateVisitor {
void VisitCheckCast(HCheckCast* check) OVERRIDE;
void VisitCondition(HCondition* op) OVERRIDE;
void VisitConstant(HConstant* instruction) OVERRIDE;
+ void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE;
void VisitIf(HIf* instruction) OVERRIDE;
void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index ff4b9a765c..59de895182 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -308,7 +308,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
// Check if we can use an inline cache.
ArtMethod* caller = graph_->GetArtMethod();
- if (Runtime::Current()->UseJit()) {
+ if (Runtime::Current()->UseJitCompilation()) {
// Under JIT, we should always know the caller.
DCHECK(caller != nullptr);
ScopedProfilingInfoInlineUse spiis(caller, soa.Self());
@@ -322,7 +322,13 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
return false;
} else if (ic.IsMonomorphic()) {
MaybeRecordStat(kMonomorphicCall);
- return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
+ if (outermost_graph_->IsCompilingOsr()) {
+ // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
+ // interpreter and it may have seen different receiver types.
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
+ } else {
+ return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
+ }
} else if (ic.IsPolymorphic()) {
MaybeRecordStat(kPolymorphicCall);
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
@@ -510,6 +516,11 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
bool deoptimize = all_targets_inlined &&
(i != InlineCache::kIndividualCacheSize - 1) &&
(ic.GetTypeAt(i + 1) == nullptr);
+
+ if (outermost_graph_->IsCompilingOsr()) {
+ // We do not support HDeoptimize in OSR methods.
+ deoptimize = false;
+ }
HInstruction* compare = AddTypeGuard(
receiver, cursor, bb_cursor, class_index, is_referrer, invoke_instruction, deoptimize);
if (deoptimize) {
@@ -623,7 +634,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic) {
// This optimization only works under JIT for now.
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
if (graph_->GetInstructionSet() == kMips64) {
// TODO: Support HClassTableGet for mips64.
return false;
@@ -672,7 +683,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- if (!TryInlineAndReplace(invoke_instruction, actual_method, /* do_rtp */ false)) {
+ HInstruction* return_replacement = nullptr;
+ if (!TryBuildAndInline(invoke_instruction, actual_method, &return_replacement)) {
return false;
}
@@ -701,9 +713,6 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
}
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant);
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- compare, invoke_instruction->GetDexPc());
- // TODO: Extend reference type propagation to understand the guard.
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(receiver_class, cursor);
} else {
@@ -711,8 +720,20 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
}
bb_cursor->InsertInstructionAfter(class_table_get, receiver_class);
bb_cursor->InsertInstructionAfter(compare, class_table_get);
- bb_cursor->InsertInstructionAfter(deoptimize, compare);
- deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+
+ if (outermost_graph_->IsCompilingOsr()) {
+ CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
+ } else {
+ // TODO: Extend reference type propagation to understand the guard.
+ HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
+ compare, invoke_instruction->GetDexPc());
+ bb_cursor->InsertInstructionAfter(deoptimize, compare);
+ deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+ if (return_replacement != nullptr) {
+ invoke_instruction->ReplaceWith(return_replacement);
+ }
+ invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ }
// Run type propagation to get the guard typed.
ReferenceTypePropagation rtp_fixup(graph_,
@@ -744,6 +765,12 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
HInstruction** return_replacement) {
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+ if (method->IsProxyMethod()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method)
+ << " is not inlined because of unimplemented inline support for proxy methods.";
+ return false;
+ }
+
// Check whether we're allowed to inline. The outermost compilation unit is the relevant
// dex file here (though the transitivity of an inline chain would allow checking the calller).
if (!compiler_driver_->MayInline(method->GetDexFile(),
@@ -802,7 +829,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
if (!method->GetDeclaringClass()->IsVerified()) {
uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex();
- if (Runtime::Current()->UseJit() ||
+ if (Runtime::Current()->UseJitCompilation() ||
!compiler_driver_->IsMethodVerifiedWithoutFailures(
method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
@@ -1265,6 +1292,8 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
size_t HInliner::RunOptimizations(HGraph* callee_graph,
const DexFile::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit) {
+ // Note: if the outermost_graph_ is being compiled OSR, we should not run any
+ // optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, stats_);
HConstantFolding fold(callee_graph);
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 863dd1c6f6..39a1313ba0 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -30,6 +30,10 @@ class DexFile;
// Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751
static constexpr bool kRoundIsPlusPointFive = false;
+// Positive floating-point infinities.
+static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U;
+static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000);
+
// Recognize intrinsics from HInvoke nodes.
class IntrinsicsRecognizer : public HOptimization {
public:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 86b7bc138c..146fea1fe0 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1985,6 +1985,56 @@ void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) {
__ Bind(&done);
}
+void IntrinsicLocationsBuilderARM::VisitFloatIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitFloatIsInfinite(HInvoke* invoke) {
+ ArmAssembler* const assembler = GetAssembler();
+ LocationSummary* const locations = invoke->GetLocations();
+ const Register out = locations->Out().AsRegister<Register>();
+ // Shifting left by 1 bit makes the value encodable as an immediate operand;
+ // we don't care about the sign bit anyway.
+ constexpr uint32_t infinity = kPositiveInfinityFloat << 1U;
+
+ __ vmovrs(out, locations->InAt(0).AsFpuRegister<SRegister>());
+ // We don't care about the sign bit, so shift left.
+ __ Lsl(out, out, 1);
+ __ eor(out, out, ShifterOperand(infinity));
+ // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
+ __ clz(out, out);
+ // Any number less than 32 logically shifted right by 5 bits results in 0;
+ // the same operation on 32 yields 1.
+ __ Lsr(out, out, 5);
+}
+
+void IntrinsicLocationsBuilderARM::VisitDoubleIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitDoubleIsInfinite(HInvoke* invoke) {
+ ArmAssembler* const assembler = GetAssembler();
+ LocationSummary* const locations = invoke->GetLocations();
+ const Register out = locations->Out().AsRegister<Register>();
+ // The highest 32 bits of double precision positive infinity separated into
+ // two constants encodable as immediate operands.
+ constexpr uint32_t infinity_high = 0x7f000000U;
+ constexpr uint32_t infinity_high2 = 0x00f00000U;
+
+ static_assert((infinity_high | infinity_high2) == static_cast<uint32_t>(kPositiveInfinityDouble >> 32U),
+ "The constants do not add up to the high 32 bits of double precision positive infinity.");
+ __ vmovrrd(IP, out, FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>()));
+ __ eor(out, out, ShifterOperand(infinity_high));
+ __ eor(out, out, ShifterOperand(infinity_high2));
+ // We don't care about the sign bit, so shift left.
+ __ orr(out, IP, ShifterOperand(out, LSL, 1));
+ // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
+ __ clz(out, out);
+ // Any number less than 32 logically shifted right by 5 bits results in 0;
+ // the same operation on 32 yields 1.
+ __ Lsr(out, out, 5);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM, IntegerBitCount)
UNIMPLEMENTED_INTRINSIC(ARM, LongBitCount)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
@@ -2001,8 +2051,6 @@ UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat) // Could be done by changing rou
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(ARM, FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(ARM, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index bf79767822..1d8229674c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2289,9 +2289,46 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+static void GenIsInfinite(LocationSummary* locations,
+ bool is64bit,
+ vixl::MacroAssembler* masm) {
+ Operand infinity;
+ Register out;
+
+ if (is64bit) {
+ infinity = kPositiveInfinityDouble;
+ out = XRegisterFrom(locations->Out());
+ } else {
+ infinity = kPositiveInfinityFloat;
+ out = WRegisterFrom(locations->Out());
+ }
+
+ const Register zero = vixl::Assembler::AppropriateZeroRegFor(out);
+
+ MoveFPToInt(locations, is64bit, masm);
+ __ Eor(out, out, infinity);
+ // We don't care about the sign bit, so shift left.
+ __ Cmp(zero, Operand(out, LSL, 1));
+ __ Cset(out, eq);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
+ GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
+ GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 19c6a225ac..46195c104a 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2283,10 +2283,10 @@ static void GenIsInfinite(LocationSummary* locations,
// If one, or more, of the exponent bits is zero, then the number can't be infinite.
if (type == Primitive::kPrimDouble) {
__ MoveFromFpuHigh(TMP, in);
- __ LoadConst32(AT, 0x7FF00000);
+ __ LoadConst32(AT, High32Bits(kPositiveInfinityDouble));
} else {
__ Mfc1(TMP, in);
- __ LoadConst32(AT, 0x7F800000);
+ __ LoadConst32(AT, kPositiveInfinityFloat);
}
__ Xor(TMP, TMP, AT);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fe75451ad2..6703695484 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2218,6 +2218,7 @@ ReferenceTypeInfo ReferenceTypeInfo::Create(TypeHandle type_handle, bool is_exac
ScopedObjectAccess soa(Thread::Current());
DCHECK(IsValidHandle(type_handle));
DCHECK(!type_handle->IsErroneous());
+ DCHECK(!type_handle->IsArrayClass() || !type_handle->GetComponentType()->IsErroneous());
if (!is_exact) {
DCHECK(!type_handle->CannotBeAssignedFromOtherTypes())
<< "Callers of ReferenceTypeInfo::Create should ensure is_exact is properly computed";
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 04c9ff9d6d..f2394f605a 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -46,6 +46,13 @@ static inline ReferenceTypeInfo::TypeHandle GetRootHandle(StackHandleScopeCollec
return *cache;
}
+// Returns true if klass is admissible to the propagation: non-null and non-erroneous.
+// For an array type, we also check if the component type is admissible.
+static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ return klass != nullptr && !klass->IsErroneous() &&
+ (!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType()));
+}
+
ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetObjectClassHandle() {
return GetRootHandle(handles_, ClassLinker::kJavaLangObject, &object_class_handle_);
}
@@ -453,15 +460,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
}
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
- } else if (klass != nullptr) {
- if (klass->IsErroneous()) {
- // Set inexact object type for erroneous types.
- instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti());
- } else {
- ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
- is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
- instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
- }
+ } else if (IsAdmissible(klass)) {
+ ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
+ is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
+ instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
} else {
instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti());
}
@@ -563,7 +565,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) {
instr->GetDexFile(),
instr->GetTypeIndex(),
hint_dex_cache_);
- if (resolved_class != nullptr && !resolved_class->IsErroneous()) {
+ if (IsAdmissible(resolved_class)) {
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
handle_cache_->NewHandle(resolved_class), /* is_exact */ true));
}
@@ -664,12 +666,6 @@ void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
}
if (phi->GetBlock()->IsLoopHeader()) {
- if (!is_first_run_ && graph_->IsCompilingOsr()) {
- // Don't update the type of a loop phi when compiling OSR: we may have done
- // speculative optimizations dominating that phi, that do not hold at the
- // point the interpreter jumps to that loop header.
- return;
- }
// Set the initial type for the phi. Use the non back edge input for reaching
// a fixed point faster.
HInstruction* first_input = phi->InputAt(0);
@@ -742,7 +738,7 @@ void ReferenceTypePropagation::UpdateArrayGet(HArrayGet* instr, HandleCache* han
}
Handle<mirror::Class> handle = parent_rti.GetTypeHandle();
- if (handle->IsObjectArrayClass() && !handle->GetComponentType()->IsErroneous()) {
+ if (handle->IsObjectArrayClass() && IsAdmissible(handle->GetComponentType())) {
ReferenceTypeInfo::TypeHandle component_handle =
handle_cache->NewHandle(handle->GetComponentType());
bool is_exact = component_handle->CannotBeAssignedFromOtherTypes();
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 7a1bb316e4..08bd35f14a 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -99,7 +99,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
if (direct_method != 0u) { // Should we use a direct pointer to the method?
// Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while
// kDirectAddress would be fine for image methods, we don't support it at the moment.
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now?
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
method_load_data = direct_method;
@@ -109,7 +109,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
} else { // Use dex cache.
DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
if (use_pc_relative_instructions) { // Can we use PC-relative access to the dex cache arrays?
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
&graph_->GetDexFile());
@@ -121,7 +121,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
if (direct_code != 0u) { // Should we use a direct pointer to the code?
// Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and
// while kCallDirect would be fine for image methods, we don't support it at the moment.
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
direct_code_ptr = direct_code;
@@ -174,7 +174,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
if (compiler_driver_->IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed.
- DCHECK(!runtime->UseJit());
+ DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
if (!compiler_driver_->GetSupportBootImageFixup()) {
@@ -187,7 +187,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
? HLoadString::LoadKind::kBootImageLinkTimePcRelative
: HLoadString::LoadKind::kBootImageLinkTimeAddress;
}
- } else if (runtime->UseJit()) {
+ } else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
mirror::String* string = dex_cache->GetResolvedString(string_index);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 40dab74a23..1141fd1c76 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1003,6 +1003,15 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
void AddBackEdgeUses(const HBasicBlock& block_at_use) {
DCHECK(block_at_use.IsInLoop());
+ if (block_at_use.GetGraph()->HasIrreducibleLoops()) {
+ // Linear order may not be well formed when irreducible loops are present,
+ // i.e. loop blocks may not be adjacent and a back edge may not be last,
+ // which violates assumptions made in this method.
+ return;
+ }
+
+ DCHECK(IsLinearOrderWellFormed(*block_at_use.GetGraph()));
+
// Add synthesized uses at the back edge of loops to help the register allocator.
// Note that this method is called in decreasing liveness order, to faciliate adding
// uses at the head of the `first_use_` linked list. Because below
@@ -1027,30 +1036,12 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
if ((first_use_ != nullptr) && (first_use_->GetPosition() <= back_edge_use_position)) {
// There was a use already seen in this loop. Therefore the previous call to `AddUse`
// already inserted the backedge use. We can stop going outward.
- if (kIsDebugBuild) {
- if (!HasSynthesizeUseAt(back_edge_use_position)) {
- // There exists a use prior to `back_edge_use_position` but there is
- // no synthesized use at the back edge. This can happen in the presence
- // of irreducible loops, when blocks of the loop are not adjacent in
- // linear order, i.e. when there is an out-of-loop block between
- // `block_at_use` and `back_edge_position` that uses this interval.
- DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops());
- DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph()));
- }
- }
+ DCHECK(HasSynthesizeUseAt(back_edge_use_position));
break;
}
- if (last_in_new_list != nullptr &&
- back_edge_use_position <= last_in_new_list->GetPosition()) {
- // Loops are not properly nested in the linear order, i.e. the back edge
- // of an outer loop preceeds blocks of an inner loop. This can happen
- // in the presence of irreducible loops.
- DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops());
- DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph()));
- // We must bail out, otherwise we would generate an unsorted use list.
- break;
- }
+ DCHECK(last_in_new_list == nullptr ||
+ back_edge_use_position > last_in_new_list->GetPosition());
UsePosition* new_use = new (allocator_) UsePosition(
/* user */ nullptr,