summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc2
-rw-r--r--compiler/driver/compiler_options.h4
-rw-r--r--compiler/image_writer.cc249
-rw-r--r--compiler/image_writer.h39
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm.cc14
-rw-r--r--compiler/optimizing/code_generator_arm64.cc14
-rw-r--r--compiler/optimizing/code_generator_mips64.cc14
-rw-r--r--compiler/optimizing/code_generator_x86.cc143
-rw-r--r--compiler/optimizing/code_generator_x86.h6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc133
-rw-r--r--compiler/optimizing/code_generator_x86_64.h6
-rw-r--r--compiler/optimizing/instruction_simplifier.cc24
-rw-r--r--compiler/optimizing/register_allocator.cc2
14 files changed, 458 insertions, 194 deletions
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 044989e2a9..d29b865ce9 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -74,7 +74,7 @@ inline void GvnDeadCodeElimination::MIRData::RemovePrevChange(int v_reg, MIRData
GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc)
: num_vregs_(num_vregs),
vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)),
- vreg_high_words_(num_vregs, false, Allocator::GetNoopAllocator(),
+ vreg_high_words_(false, Allocator::GetNoopAllocator(),
BitVector::BitsToWords(num_vregs),
alloc->AllocArray<uint32_t>(BitVector::BitsToWords(num_vregs))),
mir_data_(alloc->Adapter()) {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 17b19dd51e..d2a90ec87f 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -54,6 +54,10 @@ class CompilerOptions FINAL {
static const size_t kDefaultInlineDepthLimit = 3;
static const size_t kDefaultInlineMaxCodeUnits = 18;
+ // Default inlining settings when the space filter is used.
+ static constexpr size_t kSpaceFilterInlineDepthLimit = 3;
+ static constexpr size_t kSpaceFilterInlineMaxCodeUnits = 10;
+
CompilerOptions();
~CompilerOptions();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 0b2607761c..4ce3129f55 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -825,35 +825,68 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
field_offset = MemberOffset(field_offset.Uint32Value() +
sizeof(mirror::HeapReference<mirror::Object>));
}
- // Visit and assign offsets for fields.
+ // Visit and assign offsets for fields and field arrays.
auto* as_klass = h_obj->AsClass();
- ArtField* fields[] = { as_klass->GetSFields(), as_klass->GetIFields() };
- size_t num_fields[] = { as_klass->NumStaticFields(), as_klass->NumInstanceFields() };
- for (size_t i = 0; i < 2; ++i) {
- for (size_t j = 0; j < num_fields[i]; ++j) {
- auto* field = fields[i] + j;
- auto it = native_object_reloc_.find(field);
- CHECK(it == native_object_reloc_.end()) << "Field at index " << i << ":" << j
- << " already assigned " << PrettyField(field);
- native_object_reloc_.emplace(
- field, NativeObjectReloc { bin_slot_sizes_[kBinArtField], kBinArtField });
- bin_slot_sizes_[kBinArtField] += sizeof(ArtField);
+ LengthPrefixedArray<ArtField>* fields[] = {
+ as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(),
+ };
+ for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
+ // Total array length including header.
+ if (cur_fields != nullptr) {
+ const size_t header_size = LengthPrefixedArray<ArtField>::ComputeSize(0);
+ // Forward the entire array at once.
+ auto it = native_object_relocations_.find(cur_fields);
+ CHECK(it == native_object_relocations_.end()) << "Field array " << cur_fields
+ << " already forwarded";
+ size_t& offset = bin_slot_sizes_[kBinArtField];
+ native_object_relocations_.emplace(
+ cur_fields, NativeObjectRelocation {
+ offset, kNativeObjectRelocationTypeArtFieldArray });
+ offset += header_size;
+ // Forward individual fields so that we can quickly find where they belong.
+ for (size_t i = 0, count = cur_fields->Length(); i < count; ++i) {
+ // Need to forward arrays separate of fields.
+ ArtField* field = &cur_fields->At(i);
+ auto it2 = native_object_relocations_.find(field);
+ CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i
+ << " already assigned " << PrettyField(field) << " static=" << field->IsStatic();
+ native_object_relocations_.emplace(
+ field, NativeObjectRelocation {offset, kNativeObjectRelocationTypeArtField });
+ offset += sizeof(ArtField);
+ }
}
}
// Visit and assign offsets for methods.
- IterationRange<StrideIterator<ArtMethod>> method_arrays[] = {
- as_klass->GetDirectMethods(target_ptr_size_),
- as_klass->GetVirtualMethods(target_ptr_size_)
+ LengthPrefixedArray<ArtMethod>* method_arrays[] = {
+ as_klass->GetDirectMethodsPtr(), as_klass->GetVirtualMethodsPtr(),
};
- for (auto& array : method_arrays) {
+ for (LengthPrefixedArray<ArtMethod>* array : method_arrays) {
+ if (array == nullptr) {
+ continue;
+ }
bool any_dirty = false;
size_t count = 0;
- for (auto& m : array) {
+ const size_t method_size = ArtMethod::ObjectSize(target_ptr_size_);
+ auto iteration_range = MakeIterationRangeFromLengthPrefixedArray(array, method_size);
+ for (auto& m : iteration_range) {
any_dirty = any_dirty || WillMethodBeDirty(&m);
++count;
}
- for (auto& m : array) {
- AssignMethodOffset(&m, any_dirty ? kBinArtMethodDirty : kBinArtMethodClean);
+ NativeObjectRelocationType type = any_dirty ? kNativeObjectRelocationTypeArtMethodDirty :
+ kNativeObjectRelocationTypeArtMethodClean;
+ Bin bin_type = BinTypeForNativeRelocationType(type);
+ // Forward the entire array at once, but header first.
+ const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0, method_size);
+ auto it = native_object_relocations_.find(array);
+ CHECK(it == native_object_relocations_.end()) << "Method array " << array
+ << " already forwarded";
+ size_t& offset = bin_slot_sizes_[bin_type];
+ native_object_relocations_.emplace(array, NativeObjectRelocation { offset,
+ any_dirty ? kNativeObjectRelocationTypeArtMethodArrayDirty :
+ kNativeObjectRelocationTypeArtMethodArrayClean });
+ offset += header_size;
+ for (auto& m : iteration_range) {
+ AssignMethodOffset(&m, type);
}
(any_dirty ? dirty_methods_ : clean_methods_) += count;
}
@@ -871,12 +904,13 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
}
-void ImageWriter::AssignMethodOffset(ArtMethod* method, Bin bin) {
- auto it = native_object_reloc_.find(method);
- CHECK(it == native_object_reloc_.end()) << "Method " << method << " already assigned "
+void ImageWriter::AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type) {
+ auto it = native_object_relocations_.find(method);
+ CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned "
<< PrettyMethod(method);
- native_object_reloc_.emplace(method, NativeObjectReloc { bin_slot_sizes_[bin], bin });
- bin_slot_sizes_[bin] += ArtMethod::ObjectSize(target_ptr_size_);
+ size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
+ native_object_relocations_.emplace(method, NativeObjectRelocation { offset, type });
+ offset += ArtMethod::ObjectSize(target_ptr_size_);
}
void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
@@ -930,10 +964,20 @@ void ImageWriter::CalculateNewObjectOffsets() {
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+
+ // Add room for fake length prefixed array.
+ const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean;
+ auto it = native_object_relocations_.find(&image_method_array_);
+ CHECK(it == native_object_relocations_.end());
+ size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
+ native_object_relocations_.emplace(&image_method_array_,
+ NativeObjectRelocation { offset, image_method_type });
+ CHECK_EQ(sizeof(image_method_array_), 8u);
+ offset += sizeof(image_method_array_);
for (auto* m : image_methods_) {
CHECK(m != nullptr);
CHECK(m->IsRuntimeMethod());
- AssignMethodOffset(m, kBinArtMethodDirty);
+ AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean);
}
// Calculate cumulative bin slot sizes.
@@ -953,10 +997,10 @@ void ImageWriter::CalculateNewObjectOffsets() {
image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots.Get()));
// Update the native relocations by adding their bin sums.
- for (auto& pair : native_object_reloc_) {
- auto& native_reloc = pair.second;
- native_reloc.offset += image_objects_offset_begin_ +
- bin_slot_previous_sizes_[native_reloc.bin_type];
+ for (auto& pair : native_object_relocations_) {
+ NativeObjectRelocation& relocation = pair.second;
+ Bin bin_type = BinTypeForNativeRelocationType(relocation.type);
+ relocation.offset += image_objects_offset_begin_ + bin_slot_previous_sizes_[bin_type];
}
// Calculate how big the intern table will be after being serialized.
@@ -1025,8 +1069,8 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
}
ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
- auto it = native_object_reloc_.find(method);
- CHECK(it != native_object_reloc_.end()) << PrettyMethod(method) << " @ " << method;
+ auto it = native_object_relocations_.find(method);
+ CHECK(it != native_object_relocations_.end()) << PrettyMethod(method) << " @ " << method;
CHECK_GE(it->second.offset, image_end_) << "ArtMethods should be after Objects";
return reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
}
@@ -1064,20 +1108,34 @@ class FixupRootVisitor : public RootVisitor {
void ImageWriter::CopyAndFixupNativeData() {
// Copy ArtFields and methods to their locations and update the array for convenience.
- for (auto& pair : native_object_reloc_) {
- auto& native_reloc = pair.second;
- if (native_reloc.bin_type == kBinArtField) {
- auto* dest = image_->Begin() + native_reloc.offset;
- DCHECK_GE(dest, image_->Begin() + image_end_);
- memcpy(dest, pair.first, sizeof(ArtField));
- reinterpret_cast<ArtField*>(dest)->SetDeclaringClass(
- GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
- } else {
- CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type;
- auto* dest = image_->Begin() + native_reloc.offset;
- DCHECK_GE(dest, image_->Begin() + image_end_);
- CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
- reinterpret_cast<ArtMethod*>(dest));
+ for (auto& pair : native_object_relocations_) {
+ NativeObjectRelocation& relocation = pair.second;
+ auto* dest = image_->Begin() + relocation.offset;
+ DCHECK_GE(dest, image_->Begin() + image_end_);
+ switch (relocation.type) {
+ case kNativeObjectRelocationTypeArtField: {
+ memcpy(dest, pair.first, sizeof(ArtField));
+ reinterpret_cast<ArtField*>(dest)->SetDeclaringClass(
+ GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
+ break;
+ }
+ case kNativeObjectRelocationTypeArtMethodClean:
+ case kNativeObjectRelocationTypeArtMethodDirty: {
+ CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
+ reinterpret_cast<ArtMethod*>(dest));
+ break;
+ }
+ // For arrays, copy just the header since the elements will get copied by their corresponding
+ // relocations.
+ case kNativeObjectRelocationTypeArtFieldArray: {
+ memcpy(dest, pair.first, LengthPrefixedArray<ArtField>::ComputeSize(0));
+ break;
+ }
+ case kNativeObjectRelocationTypeArtMethodArrayClean:
+ case kNativeObjectRelocationTypeArtMethodArrayDirty: {
+ memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(0));
+ break;
+ }
}
}
// Fixup the image method roots.
@@ -1086,12 +1144,12 @@ void ImageWriter::CopyAndFixupNativeData() {
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
auto* m = image_methods_[i];
CHECK(m != nullptr);
- auto it = native_object_reloc_.find(m);
- CHECK(it != native_object_reloc_.end()) << "No fowarding for " << PrettyMethod(m);
- auto& native_reloc = it->second;
- CHECK(methods_section.Contains(native_reloc.offset)) << native_reloc.offset << " not in "
+ auto it = native_object_relocations_.find(m);
+ CHECK(it != native_object_relocations_.end()) << "No fowarding for " << PrettyMethod(m);
+ NativeObjectRelocation& relocation = it->second;
+ CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in "
<< methods_section;
- CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type;
+ CHECK(relocation.IsArtMethodRelocation()) << relocation.type;
auto* dest = reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), dest);
}
@@ -1143,9 +1201,9 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* a
for (size_t i = 0, count = num_elements; i < count; ++i) {
auto* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
if (elem != nullptr) {
- auto it = native_object_reloc_.find(elem);
- if (it == native_object_reloc_.end()) {
- if (IsArtMethodBin(array_type)) {
+ auto it = native_object_relocations_.find(elem);
+ if (it == native_object_relocations_.end()) {
+ if (true) {
auto* method = reinterpret_cast<ArtMethod*>(elem);
LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ "
<< method << " idx=" << i << "/" << num_elements << " with declaring class "
@@ -1237,51 +1295,38 @@ class FixupClassVisitor FINAL : public FixupVisitor {
}
};
+void* ImageWriter::NativeLocationInImage(void* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ auto it = native_object_relocations_.find(obj);
+ const NativeObjectRelocation& relocation = it->second;
+ CHECK(it != native_object_relocations_.end()) << obj;
+ return reinterpret_cast<void*>(image_begin_ + relocation.offset);
+}
+
void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
- // Copy and fix up ArtFields in the class.
- ArtField* fields[2] = { orig->GetSFields(), orig->GetIFields() };
- size_t num_fields[2] = { orig->NumStaticFields(), orig->NumInstanceFields() };
// Update the field arrays.
- for (size_t i = 0; i < 2; ++i) {
- if (num_fields[i] == 0) {
- CHECK(fields[i] == nullptr);
- continue;
- }
- auto it = native_object_reloc_.find(fields[i]);
- CHECK(it != native_object_reloc_.end()) << PrettyClass(orig) << " : " << PrettyField(fields[i]);
- auto* image_fields = reinterpret_cast<ArtField*>(image_begin_ + it->second.offset);
- if (i == 0) {
- copy->SetSFieldsUnchecked(image_fields);
- } else {
- copy->SetIFieldsUnchecked(image_fields);
- }
- }
- // Update direct / virtual method arrays.
- auto* direct_methods = orig->GetDirectMethodsPtr();
- if (direct_methods != nullptr) {
- auto it = native_object_reloc_.find(direct_methods);
- CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
- copy->SetDirectMethodsPtrUnchecked(
- reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
- }
- auto* virtual_methods = orig->GetVirtualMethodsPtr();
- if (virtual_methods != nullptr) {
- auto it = native_object_reloc_.find(virtual_methods);
- CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
- copy->SetVirtualMethodsPtr(
- reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
- }
+ copy->SetSFieldsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtField>*>(
+ NativeLocationInImage(orig->GetSFieldsPtr())));
+ copy->SetIFieldsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtField>*>(
+ NativeLocationInImage(orig->GetIFieldsPtr())));
+ // Update direct and virtual method arrays.
+ copy->SetDirectMethodsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
+ NativeLocationInImage(orig->GetDirectMethodsPtr())));
+ copy->SetVirtualMethodsPtr(reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
+ NativeLocationInImage(orig->GetVirtualMethodsPtr())));
// Fix up embedded tables.
if (orig->ShouldHaveEmbeddedImtAndVTable()) {
for (int32_t i = 0; i < orig->GetEmbeddedVTableLength(); ++i) {
- auto it = native_object_reloc_.find(orig->GetEmbeddedVTableEntry(i, target_ptr_size_));
- CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
+ auto it = native_object_relocations_.find(orig->GetEmbeddedVTableEntry(i, target_ptr_size_));
+ CHECK(it != native_object_relocations_.end()) << PrettyClass(orig);
copy->SetEmbeddedVTableEntryUnchecked(
i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_);
}
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- auto it = native_object_reloc_.find(orig->GetEmbeddedImTableEntry(i, target_ptr_size_));
- CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
+ auto it = native_object_relocations_.find(orig->GetEmbeddedImTableEntry(i, target_ptr_size_));
+ CHECK(it != native_object_relocations_.end()) << PrettyClass(orig);
copy->SetEmbeddedImTableEntry(
i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_);
}
@@ -1322,11 +1367,16 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
auto* dest = down_cast<mirror::AbstractMethod*>(copy);
auto* src = down_cast<mirror::AbstractMethod*>(orig);
ArtMethod* src_method = src->GetArtMethod();
- auto it = native_object_reloc_.find(src_method);
- CHECK(it != native_object_reloc_.end()) << "Missing relocation for AbstractMethod.artMethod "
- << PrettyMethod(src_method);
+ auto it = native_object_relocations_.find(src_method);
+ CHECK(it != native_object_relocations_.end())
+ << "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method);
dest->SetArtMethod(
reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
+ } else if (!klass->IsArrayClass() && klass->IsSubClass(down_cast<mirror::Class*>(
+ Thread::Current()->DecodeJObject(WellKnownClasses::java_lang_ClassLoader)))) {
+ // If src is a ClassLoader, set the class table to null so that it gets recreated by the
+ // ClassLoader.
+ down_cast<mirror::ClassLoader*>(copy)->SetClassTable(nullptr);
}
FixupVisitor visitor(this, copy);
orig->VisitReferences<true /*visit class*/>(visitor, visitor);
@@ -1504,4 +1554,19 @@ uint8_t* ImageWriter::GetOatFileBegin() const {
bin_slot_sizes_[kBinArtMethodClean] + intern_table_bytes_, kPageSize);
}
+ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocationType type) {
+ switch (type) {
+ case kNativeObjectRelocationTypeArtField:
+ case kNativeObjectRelocationTypeArtFieldArray:
+ return kBinArtField;
+ case kNativeObjectRelocationTypeArtMethodClean:
+ case kNativeObjectRelocationTypeArtMethodArrayClean:
+ return kBinArtMethodClean;
+ case kNativeObjectRelocationTypeArtMethodDirty:
+ case kNativeObjectRelocationTypeArtMethodArrayDirty:
+ return kBinArtMethodDirty;
+ }
+ UNREACHABLE();
+}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index cabd918354..eb6aa6f346 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -30,6 +30,7 @@
#include "base/macros.h"
#include "driver/compiler_driver.h"
#include "gc/space/space.h"
+#include "length_prefixed_array.h"
#include "lock_word.h"
#include "mem_map.h"
#include "oat_file.h"
@@ -54,7 +55,8 @@ class ImageWriter FINAL {
quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(),
- intern_table_bytes_(0u), dirty_methods_(0u), clean_methods_(0u) {
+ intern_table_bytes_(0u), image_method_array_(ImageHeader::kImageMethodsCount),
+ dirty_methods_(0u), clean_methods_(0u) {
CHECK_NE(image_begin, 0U);
std::fill(image_methods_, image_methods_ + arraysize(image_methods_), nullptr);
}
@@ -129,9 +131,18 @@ class ImageWriter FINAL {
// Number of bins which are for mirror objects.
kBinMirrorCount = kBinArtField,
};
-
friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
+ enum NativeObjectRelocationType {
+ kNativeObjectRelocationTypeArtField,
+ kNativeObjectRelocationTypeArtFieldArray,
+ kNativeObjectRelocationTypeArtMethodClean,
+ kNativeObjectRelocationTypeArtMethodArrayClean,
+ kNativeObjectRelocationTypeArtMethodDirty,
+ kNativeObjectRelocationTypeArtMethodArrayDirty,
+ };
+ friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
+
static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
// uint32 = typeof(lockword_)
// Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
@@ -204,10 +215,6 @@ class ImageWriter FINAL {
return offset == 0u ? nullptr : oat_data_begin_ + offset;
}
- static bool IsArtMethodBin(Bin bin) {
- return bin == kBinArtMethodClean || bin == kBinArtMethodDirty;
- }
-
// Returns true if the class was in the original requested image classes list.
bool IsImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -284,7 +291,12 @@ class ImageWriter FINAL {
bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
// Assign the offset for an ArtMethod.
- void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
+
+ void* NativeLocationInImage(void* obj);
const CompilerDriver& compiler_driver_;
@@ -356,14 +368,21 @@ class ImageWriter FINAL {
// ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
- struct NativeObjectReloc {
+ struct NativeObjectRelocation {
uintptr_t offset;
- Bin bin_type;
+ NativeObjectRelocationType type;
+
+ bool IsArtMethodRelocation() const {
+ return type == kNativeObjectRelocationTypeArtMethodClean ||
+ type == kNativeObjectRelocationTypeArtMethodDirty;
+ }
};
- std::unordered_map<void*, NativeObjectReloc> native_object_reloc_;
+ std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
// Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
+ // Fake length prefixed array for image methods.
+ LengthPrefixedArray<ArtMethod> image_method_array_;
// Counters for measurements, used for logging only.
uint64_t dirty_methods_;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index eb63b49884..540da1c866 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -98,6 +98,8 @@ class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
return saved_fpu_stack_offsets_[reg];
}
+ virtual bool IsFatal() const { return false; }
+
virtual const char* GetDescription() const = 0;
protected:
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index b0a4ce2b60..d89d2b2dda 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -69,6 +69,8 @@ class NullCheckSlowPathARM : public SlowPathCodeARM {
QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
private:
@@ -87,6 +89,8 @@ class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
private:
@@ -161,6 +165,8 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM {
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
private:
@@ -947,6 +953,14 @@ void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
__ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
__ blx(LR);
RecordPcInfo(instruction, dex_pc, slow_path);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index bbde7e8efd..7fab5cfcaf 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -212,6 +212,8 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
private:
@@ -234,6 +236,8 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
private:
@@ -344,6 +348,8 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
private:
@@ -1097,6 +1103,14 @@ void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
BlockPoolsScope block_pools(GetVIXLAssembler());
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index a5bad654c9..b6d67de181 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -138,6 +138,8 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
private:
@@ -162,6 +164,8 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
private:
@@ -278,6 +282,8 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
private:
@@ -971,6 +977,14 @@ void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
// TODO: anything related to T9/GP/GOT/PIC/.so's?
__ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
__ Jalr(T9);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index fdef06b116..5ffab33190 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -45,17 +45,23 @@ static constexpr int kC2ConditionMask = 0x400;
static constexpr int kFakeReturnRegister = Register(8);
#define __ down_cast<X86Assembler*>(codegen->GetAssembler())->
+#define QUICK_ENTRY_POINT(x) Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x))
class NullCheckSlowPathX86 : public SlowPathCodeX86 {
public:
explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowNullPointer)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; }
private:
@@ -68,11 +74,16 @@ class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 {
explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; }
private:
@@ -124,10 +135,14 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
length_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; }
private:
@@ -147,8 +162,10 @@ class SuspendCheckSlowPathX86 : public SlowPathCodeX86 {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
@@ -190,8 +207,10 @@ class LoadStringSlowPathX86 : public SlowPathCodeX86 {
InvokeRuntimeCallingConvention calling_convention;
__ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
RestoreLiveRegisters(codegen, locations);
@@ -224,10 +243,9 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
InvokeRuntimeCallingConvention calling_convention;
__ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
- __ fs()->call(Address::Absolute(do_clinit_
- ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
- : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
- RecordPcInfo(codegen, at_, dex_pc_);
+ x86_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType),
+ at_, dex_pc_, this);
// Move the class to the desired location.
Location out = locations->Out();
@@ -291,11 +309,16 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize,
- pInstanceofNonTrivial)));
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
} else {
DCHECK(instruction_->IsCheckCast());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
RecordPcInfo(codegen, instruction_, dex_pc_);
@@ -324,9 +347,13 @@ class DeoptimizationSlowPathX86 : public SlowPathCodeX86 {
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeoptimize)));
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
// No need to restore live registers.
DCHECK(instruction_->IsDeoptimize());
HDeoptimize* deoptimize = instruction_->AsDeoptimize();
@@ -398,6 +425,27 @@ size_t CodeGeneratorX86::RestoreFloatingPointRegister(size_t stack_index, uint32
return GetFloatingPointSpillSlotSize();
}
+void CodeGeneratorX86::InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
+ __ fs()->call(entry_point);
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
const X86InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options)
@@ -2015,14 +2063,18 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimFloat:
// Processing a Dex `float-to-long' instruction.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pF2l)));
- codegen_->RecordPcInfo(conversion, conversion->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
+ conversion,
+ conversion->GetDexPc(),
+ nullptr);
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-long' instruction.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pD2l)));
- codegen_->RecordPcInfo(conversion, conversion->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
+ conversion,
+ conversion->GetDexPc(),
+ nullptr);
break;
default:
@@ -2779,9 +2831,15 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>());
if (is_div) {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv)));
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
} else {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLmod)));
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
uint32_t dex_pc = is_div
? instruction->AsDiv()->GetDexPc()
@@ -3233,9 +3291,11 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
__ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())));
-
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
}
@@ -3255,9 +3315,11 @@ void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())));
-
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
}
@@ -4160,8 +4222,10 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
DCHECK(!codegen_->IsLeafMethod());
// Note: if heap poisoning is enabled, pAputObject takes cares
// of poisoning the reference.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject)));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
break;
}
@@ -4723,8 +4787,10 @@ void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
}
void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeliverException)));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4835,10 +4901,11 @@ void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction)
}
void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) {
- __ fs()->call(Address::Absolute(instruction->IsEnter()
- ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject)
- : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject)));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject)
+ : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 65d6e0a6c4..2e3d4d4bf7 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -232,6 +232,12 @@ class CodeGeneratorX86 : public CodeGenerator {
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path);
+
size_t GetWordSize() const OVERRIDE {
return kX86WordSize;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 4fe93f99b0..2b5fcbd71c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -47,18 +47,23 @@ static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15
static constexpr int kC2ConditionMask = 0x400;
#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())->
+#define QUICK_ENTRY_POINT(x) Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x), true)
class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; }
private:
@@ -71,12 +76,16 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; }
private:
@@ -127,8 +136,10 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
@@ -166,6 +177,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
length_location_(length_location) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -177,11 +189,12 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
length_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- __ gs()->call(Address::Absolute(
- QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_, instruction_->GetDexPc(), this);
}
+ bool IsFatal() const OVERRIDE { return true; }
+
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; }
private:
@@ -211,10 +224,9 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 {
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
- __ gs()->call(Address::Absolute((do_clinit_
- ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
- : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)), true));
- RecordPcInfo(codegen, at_, dex_pc_);
+ x64_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType),
+ at_, dex_pc_, this);
Location out = locations->Out();
// Move the class to the desired location.
@@ -261,9 +273,10 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
Immediate(instruction_->GetStringIndex()));
- __ gs()->call(Address::Absolute(
- QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true));
- RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
@@ -309,14 +322,17 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ dex_pc_,
+ this);
} else {
DCHECK(instruction_->IsCheckCast());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ dex_pc_,
+ this);
}
- RecordPcInfo(codegen, instruction_, dex_pc_);
if (instruction_->IsInstanceOf()) {
x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
@@ -343,14 +359,15 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCodeX86_64 {
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeoptimize), true));
DCHECK(instruction_->IsDeoptimize());
HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- codegen->RecordPcInfo(instruction_, dex_pc, this);
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ deoptimize,
+ deoptimize->GetDexPc(),
+ this);
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
@@ -463,6 +480,27 @@ size_t CodeGeneratorX86_64::RestoreFloatingPointRegister(size_t stack_index, uin
return kX86_64WordSize;
}
+void CodeGeneratorX86_64::InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ // Ensure that the call kind indication given to the register allocator is
+ // coherent with the runtime call generated.
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetLocations()->WillCall());
+ } else {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal());
+ }
+
+ __ gs()->call(entry_point);
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
static constexpr int kNumberOfCpuRegisterPairs = 0;
// Use a fake return address register to mimic Quick.
static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1);
@@ -3292,11 +3330,14 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction)
instruction->GetTypeIndex());
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ gs()->call(
- Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
+
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
@@ -3316,11 +3357,13 @@ void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- __ gs()->call(
- Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
+ codegen_->InvokeRuntime(
+ Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
@@ -4007,10 +4050,11 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
// Note: if heap poisoning is enabled, pAputObject takes cares
// of poisoning the reference.
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject),
- true));
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
break;
}
@@ -4557,9 +4601,10 @@ void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeliverException), true));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4669,11 +4714,11 @@ void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instructio
}
void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
- __ gs()->call(Address::Absolute(instruction->IsEnter()
- ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pLockObject)
- : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pUnlockObject),
- true));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject)
+ : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
}
void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 4b90381f00..3b3915f2ae 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -232,6 +232,12 @@ class CodeGeneratorX86_64 : public CodeGenerator {
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(Address entry_point,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path);
+
size_t GetWordSize() const OVERRIDE {
return kX86_64WordSize;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d3911456fb..931a1c3cb7 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -438,9 +438,14 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) {
// ADD dst, src, 0
// with
// src
- instruction->ReplaceWith(input_other);
- instruction->GetBlock()->RemoveInstruction(instruction);
- return;
+ // Note that we cannot optimize `x + 0.0` to `x` for floating-point. When
+ // `x` is `-0.0`, the former expression yields `0.0`, while the later
+ // yields `-0.0`.
+ if (Primitive::IsIntegralType(instruction->GetType())) {
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
}
HInstruction* left = instruction->GetLeft();
@@ -800,21 +805,24 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) {
HConstant* input_cst = instruction->GetConstantRight();
HInstruction* input_other = instruction->GetLeastConstantLeft();
+ Primitive::Type type = instruction->GetType();
+ if (Primitive::IsFloatingPointType(type)) {
+ return;
+ }
+
if ((input_cst != nullptr) && input_cst->IsZero()) {
// Replace code looking like
// SUB dst, src, 0
// with
// src
+ // Note that we cannot optimize `x - 0.0` to `x` for floating-point. When
+ // `x` is `-0.0`, the former expression yields `0.0`, while the later
+ // yields `-0.0`.
instruction->ReplaceWith(input_other);
instruction->GetBlock()->RemoveInstruction(instruction);
return;
}
- Primitive::Type type = instruction->GetType();
- if (!Primitive::IsIntegralType(type)) {
- return;
- }
-
HBasicBlock* block = instruction->GetBlock();
ArenaAllocator* allocator = GetGraph()->GetArena();
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 72ddabe559..de625301df 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -248,7 +248,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
bool core_register = (instruction->GetType() != Primitive::kPrimDouble)
&& (instruction->GetType() != Primitive::kPrimFloat);
- if (locations->CanCall()) {
+ if (locations->NeedsSafepoint()) {
if (codegen_->IsLeafMethod()) {
// TODO: We do this here because we do not want the suspend check to artificially
// create live registers. We should find another place, but this is currently the