summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/verified_method.cc2
-rw-r--r--compiler/dex/verified_method.h2
-rw-r--r--compiler/image_writer.cc190
-rw-r--r--compiler/image_writer.h26
-rw-r--r--compiler/optimizing/code_generator.cc70
-rw-r--r--compiler/optimizing/code_generator.h7
-rw-r--r--compiler/optimizing/stack_map_stream.cc12
-rw-r--r--compiler/optimizing/stack_map_stream.h13
-rw-r--r--oatdump/oatdump.cc22
-rw-r--r--patchoat/patchoat.cc40
-rw-r--r--patchoat/patchoat.h9
-rw-r--r--runtime/base/hash_set.h100
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/debugger.cc269
-rw-r--r--runtime/debugger.h86
-rw-r--r--runtime/dex_file.h17
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h1
-rw-r--r--runtime/gc/accounting/space_bitmap.cc5
-rw-r--r--runtime/gc/accounting/space_bitmap.h7
-rw-r--r--runtime/gc/heap.cc21
-rw-r--r--runtime/gc/heap.h1
-rw-r--r--runtime/gc/space/image_space.cc8
-rw-r--r--runtime/hprof/hprof.cc30
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/image.h1
-rw-r--r--runtime/intern_table.cc80
-rw-r--r--runtime/intern_table.h28
-rw-r--r--runtime/interpreter/interpreter_common.cc5
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc2
-rw-r--r--runtime/jdwp/jdwp.h2
-rw-r--r--runtime/jdwp/jdwp_event.cc19
-rw-r--r--runtime/jdwp/jdwp_handler.cc139
-rw-r--r--runtime/jdwp/jdwp_main.cc13
-rw-r--r--runtime/jdwp/jdwp_priv.h31
-rw-r--r--runtime/mirror/class-inl.h2
-rw-r--r--runtime/mirror/string-inl.h8
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/native/java_lang_Class.cc4
-rw-r--r--runtime/read_barrier-inl.h2
-rw-r--r--runtime/runtime.cc4
-rw-r--r--runtime/runtime_options.def13
-rw-r--r--runtime/stack.h16
-rw-r--r--runtime/thread.cc5
-rw-r--r--runtime/thread.h11
-rw-r--r--runtime/thread_list.cc27
-rw-r--r--test/098-ddmc/src/Main.java16
-rw-r--r--test/Android.run-test.mk14
-rwxr-xr-xtools/buildbot-build.sh4
-rwxr-xr-xtools/run-libcore-tests.sh3
50 files changed, 891 insertions, 503 deletions
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index ac7a4a7758..6d485986f6 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -98,7 +98,7 @@ bool VerifiedMethod::GenerateGcMap(verifier::MethodVerifier* method_verifier) {
}
size_t ref_bitmap_bytes = RoundUp(ref_bitmap_bits, kBitsPerByte) / kBitsPerByte;
// There are 2 bytes to encode the number of entries.
- if (num_entries >= 65536) {
+ if (num_entries > std::numeric_limits<uint16_t>::max()) {
LOG(WARNING) << "Cannot encode GC map for method with " << num_entries << " entries: "
<< PrettyMethod(method_verifier->GetMethodReference().dex_method_index,
*method_verifier->GetMethodReference().dex_file);
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 242e3dfe6e..07f9a9bd9f 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -120,7 +120,7 @@ class VerifiedMethod {
DequickenMap dequicken_map_;
SafeCastSet safe_cast_set_;
- bool has_verification_failures_;
+ bool has_verification_failures_ = false;
// Copy of mapping generated by verifier of dex PCs of string init invocations
// to the set of other registers that the receiver has been copied into.
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 32bde8e3b4..73e121f1cd 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -110,10 +110,6 @@ bool ImageWriter::PrepareImageAddressSpace() {
CheckNoDexObjects();
}
- if (!AllocMemory()) {
- return false;
- }
-
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
CheckNonImageClassesRemoved();
@@ -123,6 +119,12 @@ bool ImageWriter::PrepareImageAddressSpace() {
CalculateNewObjectOffsets();
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ // This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
+ // bin size sums being calculated.
+ if (!AllocMemory()) {
+ return false;
+ }
+
return true;
}
@@ -205,7 +207,7 @@ bool ImageWriter::Write(const std::string& image_filename,
}
// Write out the image bitmap at the page aligned start of the image end.
- const auto& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
+ const ImageSection& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()),
bitmap_section.Size(), bitmap_section.Offset())) {
@@ -222,26 +224,10 @@ bool ImageWriter::Write(const std::string& image_filename,
return true;
}
-void ImageWriter::SetImageOffset(mirror::Object* object,
- ImageWriter::BinSlot bin_slot,
- size_t offset) {
+void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
DCHECK(object != nullptr);
DCHECK_NE(offset, 0U);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset);
- DCHECK_ALIGNED(obj, kObjectAlignment);
- static size_t max_offset = 0;
- max_offset = std::max(max_offset, offset);
- image_bitmap_->Set(obj); // Mark the obj as mutated, since we will end up changing it.
- {
- // Remember the object-inside-of-the-image's hash code so we can restore it after the copy.
- auto hash_it = saved_hashes_map_.find(bin_slot);
- if (hash_it != saved_hashes_map_.end()) {
- std::pair<BinSlot, uint32_t> slot_hash = *hash_it;
- saved_hashes_.push_back(std::make_pair(obj, slot_hash.second));
- saved_hashes_map_.erase(hash_it);
- }
- }
// The object is already deflated from when we set the bin slot. Just overwrite the lock word.
object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
@@ -262,7 +248,7 @@ void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot
size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex();
DCHECK_ALIGNED(new_offset, kObjectAlignment);
- SetImageOffset(object, bin_slot, new_offset);
+ SetImageOffset(object, new_offset);
DCHECK_LT(new_offset, image_end_);
}
@@ -302,14 +288,14 @@ void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) {
// No hash, don't need to save it.
break;
case LockWord::kHashCode:
- saved_hashes_map_[bin_slot] = lw.GetHashCode();
+ DCHECK(saved_hashcode_map_.find(object) == saved_hashcode_map_.end());
+ saved_hashcode_map_.emplace(object, lw.GetHashCode());
break;
default:
LOG(FATAL) << "Unreachable.";
UNREACHABLE();
}
- object->SetLockWord(LockWord::FromForwardingAddress(static_cast<uint32_t>(bin_slot)),
- false);
+ object->SetLockWord(LockWord::FromForwardingAddress(bin_slot.Uint32Value()), false);
DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
DCHECK(IsImageBinSlotAssigned(object));
}
@@ -487,11 +473,8 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
++bin_slot_count_[bin];
- DCHECK_LT(GetBinSizeSum(), image_->Size());
-
// Grow the image closer to the end by the object we just assigned.
image_end_ += offset_delta;
- DCHECK_LT(image_end_, image_->Size());
}
bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const {
@@ -535,10 +518,8 @@ ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const
}
bool ImageWriter::AllocMemory() {
- auto* runtime = Runtime::Current();
- const size_t heap_size = runtime->GetHeap()->GetTotalMemory();
- // Add linear alloc usage since we need to have room for the ArtFields.
- const size_t length = RoundUp(heap_size + runtime->GetLinearAlloc()->GetUsedMemory(), kPageSize);
+ const size_t length = RoundUp(image_objects_offset_begin_ + GetBinSizeSum() + intern_table_bytes_,
+ kPageSize);
std::string error_msg;
image_.reset(MemMap::MapAnonymous("image writer image", nullptr, length, PROT_READ | PROT_WRITE,
false, false, &error_msg));
@@ -547,9 +528,10 @@ bool ImageWriter::AllocMemory() {
return false;
}
- // Create the image bitmap.
- image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(),
- RoundUp(length, kPageSize)));
+ // Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
+ CHECK_LE(image_end_, length);
+ image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
+ "image bitmap", image_->Begin(), RoundUp(image_end_, kPageSize)));
if (image_bitmap_.get() == nullptr) {
LOG(ERROR) << "Failed to allocate memory for image bitmap";
return false;
@@ -569,42 +551,6 @@ bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
return true;
}
-// Collect all the java.lang.String in the heap and put them in the output strings_ array.
-class StringCollector {
- public:
- StringCollector(Handle<mirror::ObjectArray<mirror::String>> strings, size_t index)
- : strings_(strings), index_(index) {
- }
- static void Callback(Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- auto* collector = reinterpret_cast<StringCollector*>(arg);
- if (obj->GetClass()->IsStringClass()) {
- collector->strings_->SetWithoutChecks<false>(collector->index_++, obj->AsString());
- }
- }
- size_t GetIndex() const {
- return index_;
- }
-
- private:
- Handle<mirror::ObjectArray<mirror::String>> strings_;
- size_t index_;
-};
-
-// Compare strings based on length, used for sorting strings by length / reverse length.
-class LexicographicalStringComparator {
- public:
- bool operator()(const mirror::HeapReference<mirror::String>& lhs,
- const mirror::HeapReference<mirror::String>& rhs) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::String* lhs_s = lhs.AsMirrorPtr();
- mirror::String* rhs_s = rhs.AsMirrorPtr();
- uint16_t* lhs_begin = lhs_s->GetValue();
- uint16_t* rhs_begin = rhs_s->GetValue();
- return std::lexicographical_compare(lhs_begin, lhs_begin + lhs_s->GetLength(),
- rhs_begin, rhs_begin + rhs_s->GetLength());
- }
-};
-
void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
@@ -769,7 +715,8 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) {
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- mirror::String* const interned = obj->AsString()->Intern();
+ mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrong(
+ obj->AsString()->Intern());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
// interned obj is after us, allocate its location early
@@ -965,7 +912,6 @@ void ImageWriter::CalculateNewObjectOffsets() {
// know where image_roots is going to end up
image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- DCHECK_LT(image_end_, image_->Size());
image_objects_offset_begin_ = image_end_;
// Prepare bin slots for dex cache arrays.
PrepareDexCacheArraySlots();
@@ -997,7 +943,6 @@ void ImageWriter::CalculateNewObjectOffsets() {
// Transform each object's bin slot into an offset which will be used to do the final copy.
heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
- DCHECK(saved_hashes_map_.empty()); // All binslot hashes should've been put into vector by now.
DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
@@ -1010,6 +955,11 @@ void ImageWriter::CalculateNewObjectOffsets() {
bin_slot_previous_sizes_[native_reloc.bin_type];
}
+ // Calculate how big the intern table will be after being serialized.
+ auto* const intern_table = Runtime::Current()->GetInternTable();
+ CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
+ intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+
// Note that image_end_ is left at end of used mirror object section.
}
@@ -1039,6 +989,10 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean],
methods_section->Offset());
cur_pos = methods_section->End();
+ // Calculate the size of the interned strings.
+ auto* interned_strings_section = &sections[ImageHeader::kSectionInternedStrings];
+ *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
+ cur_pos = interned_strings_section->End();
// Finally bitmap section.
const size_t bitmap_bytes = image_bitmap_->Size();
auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
@@ -1046,16 +1000,19 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
cur_pos = bitmap_section->End();
if (kIsDebugBuild) {
size_t idx = 0;
- for (auto& section : sections) {
+ for (const ImageSection& section : sections) {
LOG(INFO) << static_cast<ImageHeader::ImageSections>(idx) << " " << section;
++idx;
}
LOG(INFO) << "Methods: clean=" << clean_methods_ << " dirty=" << dirty_methods_;
}
+ const size_t image_end = static_cast<uint32_t>(interned_strings_section->End());
+ CHECK_EQ(AlignUp(image_begin_ + image_end, kPageSize), oat_file_begin) <<
+ "Oat file should be right after the image.";
// Create the header.
new (image_->Begin()) ImageHeader(
- PointerToLowMemUInt32(image_begin_), static_cast<uint32_t>(methods_section->End()), sections,
- image_roots_address_, oat_file_->GetOatHeader().GetChecksum(),
+ PointerToLowMemUInt32(image_begin_), image_end,
+ sections, image_roots_address_, oat_file_->GetOatHeader().GetChecksum(),
PointerToLowMemUInt32(oat_file_begin), PointerToLowMemUInt32(oat_data_begin_),
PointerToLowMemUInt32(oat_data_end), PointerToLowMemUInt32(oat_file_end), target_ptr_size_,
compile_pic_);
@@ -1068,6 +1025,37 @@ ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
return reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
}
+class FixupRootVisitor : public RootVisitor {
+ public:
+ explicit FixupRootVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {
+ }
+
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ *roots[i] = ImageAddress(*roots[i]);
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
+ }
+ }
+
+ private:
+ ImageWriter* const image_writer_;
+
+ mirror::Object* ImageAddress(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const size_t offset = image_writer_->GetImageOffset(obj);
+ auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset);
+ VLOG(compiler) << "Update root from " << obj << " to " << dest;
+ return dest;
+ }
+};
+
void ImageWriter::CopyAndFixupNativeData() {
// Copy ArtFields and methods to their locations and update the array for convenience.
for (auto& pair : native_object_reloc_) {
@@ -1088,7 +1076,7 @@ void ImageWriter::CopyAndFixupNativeData() {
}
// Fixup the image method roots.
auto* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- const auto& methods_section = image_header->GetMethodsSection();
+ const ImageSection& methods_section = image_header->GetMethodsSection();
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
auto* m = image_methods_[i];
CHECK(m != nullptr);
@@ -1101,18 +1089,35 @@ void ImageWriter::CopyAndFixupNativeData() {
auto* dest = reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), dest);
}
+ // Write the intern table into the image.
+ const ImageSection& intern_table_section = image_header->GetImageSection(
+ ImageHeader::kSectionInternedStrings);
+ InternTable* const intern_table = Runtime::Current()->GetInternTable();
+ uint8_t* const memory_ptr = image_->Begin() + intern_table_section.Offset();
+ const size_t intern_table_bytes = intern_table->WriteToMemory(memory_ptr);
+ // Fixup the pointers in the newly written intern table to contain image addresses.
+ InternTable temp_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
+ // the VisitRoots() will update the memory directly rather than the copies.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ temp_table.ReadFromMemory(memory_ptr);
+ CHECK_EQ(temp_table.Size(), intern_table->Size());
+ FixupRootVisitor visitor(this);
+ temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
+ CHECK_EQ(intern_table_bytes, intern_table_bytes_);
}
void ImageWriter::CopyAndFixupObjects() {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->VisitObjects(CopyAndFixupObjectsCallback, this);
// Fix up the object previously had hash codes.
- for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
+ for (const auto& hash_pair : saved_hashcode_map_) {
Object* obj = hash_pair.first;
DCHECK_EQ(obj->GetLockWord<kVerifyNone>(false).ReadBarrierState(), 0U);
obj->SetLockWord<kVerifyNone>(LockWord::FromHashCode(hash_pair.second, 0U), false);
}
- saved_hashes_.clear();
+ saved_hashcode_map_.clear();
}
void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
@@ -1155,18 +1160,22 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* a
}
void ImageWriter::CopyAndFixupObject(Object* obj) {
- // see GetLocalAddress for similar computation
size_t offset = GetImageOffset(obj);
auto* dst = reinterpret_cast<Object*>(image_->Begin() + offset);
- const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
+ DCHECK_LT(offset, image_end_);
+ const auto* src = reinterpret_cast<const uint8_t*>(obj);
+
+ image_bitmap_->Set(dst); // Mark the obj as live.
- size_t n = obj->SizeOf();
+ const size_t n = obj->SizeOf();
DCHECK_LE(offset + n, image_->Size());
memcpy(dst, src, n);
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
// word.
- dst->SetLockWord(LockWord::Default(), false);
+ const auto it = saved_hashcode_map_.find(obj);
+ dst->SetLockWord(it != saved_hashcode_map_.end() ?
+ LockWord::FromHashCode(it->second, 0u) : LockWord::Default(), false);
FixupObject(obj, dst);
}
@@ -1176,7 +1185,7 @@ class FixupVisitor {
FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
}
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
@@ -1186,7 +1195,7 @@ class FixupVisitor {
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
@@ -1490,4 +1499,11 @@ uint32_t ImageWriter::BinSlot::GetIndex() const {
return lockword_ & ~kBinMask;
}
+uint8_t* ImageWriter::GetOatFileBegin() const {
+ DCHECK_GT(intern_table_bytes_, 0u);
+ return image_begin_ + RoundUp(
+ image_end_ + bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
+ bin_slot_sizes_[kBinArtMethodClean] + intern_table_bytes_, kPageSize);
+}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index a35d6ad9c9..9d45ce2bd4 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -54,7 +54,7 @@ class ImageWriter FINAL {
quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(),
- dirty_methods_(0u), clean_methods_(0u) {
+ intern_table_bytes_(0u), dirty_methods_(0u), clean_methods_(0u) {
CHECK_NE(image_begin, 0U);
std::fill(image_methods_, image_methods_ + arraysize(image_methods_), nullptr);
}
@@ -84,11 +84,7 @@ class ImageWriter FINAL {
image_begin_ + RoundUp(sizeof(ImageHeader), kObjectAlignment) + it->second + offset);
}
- uint8_t* GetOatFileBegin() const {
- return image_begin_ + RoundUp(
- image_end_ + bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
- bin_slot_sizes_[kBinArtMethodClean], kPageSize);
- }
+ uint8_t* GetOatFileBegin() const;
bool Write(const std::string& image_filename, const std::string& oat_filename,
const std::string& oat_location)
@@ -158,7 +154,7 @@ class ImageWriter FINAL {
// The offset in bytes from the beginning of the bin. Aligned to object size.
uint32_t GetIndex() const;
// Pack into a single uint32_t, for storing into a lock word.
- explicit operator uint32_t() const { return lockword_; }
+ uint32_t Uint32Value() const { return lockword_; }
// Comparison operator for map support
bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; }
@@ -170,7 +166,7 @@ class ImageWriter FINAL {
// We use the lock word to store the offset of the object in the image.
void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetImageOffset(mirror::Object* object, BinSlot bin_slot, size_t offset)
+ void SetImageOffset(mirror::Object* object, size_t offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsImageOffsetAssigned(mirror::Object* object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -330,11 +326,9 @@ class ImageWriter FINAL {
// The start offsets of the dex cache arrays.
SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
- // Saved hashes (objects are inside of the image so that they don't move).
- std::vector<std::pair<mirror::Object*, uint32_t>> saved_hashes_;
-
- // Saved hashes (objects are bin slots to inside of the image, not yet allocated an address).
- std::map<BinSlot, uint32_t> saved_hashes_map_;
+ // Saved hash codes. We use these to restore lockwords which were temporarily used to have
+ // forwarding addresses as well as copying over hash codes.
+ std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
// Beginning target oat address for the pointers from the output image to its oat file.
const uint8_t* oat_data_begin_;
@@ -360,6 +354,9 @@ class ImageWriter FINAL {
size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins.
size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
+ // Cached size of the intern table for when we allocate memory.
+ size_t intern_table_bytes_;
+
// ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
@@ -376,8 +373,9 @@ class ImageWriter FINAL {
uint64_t dirty_methods_;
uint64_t clean_methods_;
- friend class FixupVisitor;
friend class FixupClassVisitor;
+ friend class FixupRootVisitor;
+ friend class FixupVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 049b3e3a40..130f0e970f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -508,19 +508,14 @@ void CodeGenerator::BuildNativeGCMap(
dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
- uint32_t max_native_offset = 0;
- for (size_t i = 0; i < pc_infos_.Size(); i++) {
- uint32_t native_offset = pc_infos_.Get(i).native_pc;
- if (native_offset > max_native_offset) {
- max_native_offset = native_offset;
- }
- }
-
- GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth());
- for (size_t i = 0; i < pc_infos_.Size(); i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- uint32_t native_offset = pc_info.native_pc;
- uint32_t dex_pc = pc_info.dex_pc;
+ uint32_t max_native_offset = stack_map_stream_.ComputeMaxNativePcOffset();
+
+ size_t num_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
+ GcMapBuilder builder(data, num_stack_maps, max_native_offset, dex_gc_map.RegWidth());
+ for (size_t i = 0; i != num_stack_maps; ++i) {
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ uint32_t native_offset = stack_map_entry.native_pc_offset;
+ uint32_t dex_pc = stack_map_entry.dex_pc;
const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
builder.AddEntry(native_offset, references);
@@ -528,17 +523,17 @@ void CodeGenerator::BuildNativeGCMap(
}
void CodeGenerator::BuildSourceMap(DefaultSrcMap* src_map) const {
- for (size_t i = 0; i < pc_infos_.Size(); i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- uint32_t pc2dex_offset = pc_info.native_pc;
- int32_t pc2dex_dalvik_offset = pc_info.dex_pc;
+ for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ uint32_t pc2dex_offset = stack_map_entry.native_pc_offset;
+ int32_t pc2dex_dalvik_offset = stack_map_entry.dex_pc;
src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
}
}
void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
uint32_t pc2dex_data_size = 0u;
- uint32_t pc2dex_entries = pc_infos_.Size();
+ uint32_t pc2dex_entries = stack_map_stream_.GetNumberOfStackMaps();
uint32_t pc2dex_offset = 0u;
int32_t pc2dex_dalvik_offset = 0;
uint32_t dex2pc_data_size = 0u;
@@ -547,11 +542,11 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
int32_t dex2pc_dalvik_offset = 0;
for (size_t i = 0; i < pc2dex_entries; i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
- pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = pc_info.native_pc;
- pc2dex_dalvik_offset = pc_info.dex_pc;
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ pc2dex_data_size += UnsignedLeb128Size(stack_map_entry.native_pc_offset - pc2dex_offset);
+ pc2dex_data_size += SignedLeb128Size(stack_map_entry.dex_pc - pc2dex_dalvik_offset);
+ pc2dex_offset = stack_map_entry.native_pc_offset;
+ pc2dex_dalvik_offset = stack_map_entry.dex_pc;
}
// Walk over the blocks and find which ones correspond to catch block entries.
@@ -586,12 +581,12 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
dex2pc_dalvik_offset = 0u;
for (size_t i = 0; i < pc2dex_entries; i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- DCHECK(pc2dex_offset <= pc_info.native_pc);
- write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset);
- write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = pc_info.native_pc;
- pc2dex_dalvik_offset = pc_info.dex_pc;
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ DCHECK(pc2dex_offset <= stack_map_entry.native_pc_offset);
+ write_pos = EncodeUnsignedLeb128(write_pos, stack_map_entry.native_pc_offset - pc2dex_offset);
+ write_pos = EncodeSignedLeb128(write_pos, stack_map_entry.dex_pc - pc2dex_dalvik_offset);
+ pc2dex_offset = stack_map_entry.native_pc_offset;
+ pc2dex_dalvik_offset = stack_map_entry.dex_pc;
}
for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
@@ -617,9 +612,9 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
auto it = table.PcToDexBegin();
auto it2 = table.DexToPcBegin();
for (size_t i = 0; i < pc2dex_entries; i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- CHECK_EQ(pc_info.native_pc, it.NativePcOffset());
- CHECK_EQ(pc_info.dex_pc, it.DexPc());
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ CHECK_EQ(stack_map_entry.native_pc_offset, it.NativePcOffset());
+ CHECK_EQ(stack_map_entry.dex_pc, it.DexPc());
++it;
}
for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
@@ -695,14 +690,11 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
}
// Collect PC infos for the mapping table.
- struct PcInfo pc_info;
- pc_info.dex_pc = outer_dex_pc;
- pc_info.native_pc = GetAssembler()->CodeSize();
- pc_infos_.Add(pc_info);
+ uint32_t native_pc = GetAssembler()->CodeSize();
if (instruction == nullptr) {
// For stack overflow checks.
- stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc, pc_info.native_pc, 0, 0, 0, 0);
+ stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
stack_map_stream_.EndStackMapEntry();
return;
}
@@ -719,8 +711,8 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
}
// The register mask must be a subset of callee-save registers.
DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
- stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc,
- pc_info.native_pc,
+ stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
+ native_pc,
register_mask,
locations->GetStackMask(),
outer_environment_size,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index c6ebf6dbd8..e6b1f7c6aa 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -64,11 +64,6 @@ class CodeAllocator {
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
};
-struct PcInfo {
- uint32_t dex_pc;
- uintptr_t native_pc;
-};
-
class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
SlowPathCode() {
@@ -366,7 +361,6 @@ class CodeGenerator {
is_baseline_(false),
graph_(graph),
compiler_options_(compiler_options),
- pc_infos_(graph->GetArena(), 32),
slow_paths_(graph->GetArena(), 8),
block_order_(nullptr),
current_block_index_(0),
@@ -455,7 +449,6 @@ class CodeGenerator {
HGraph* const graph_;
const CompilerOptions& compiler_options_;
- GrowableArray<PcInfo> pc_infos_;
GrowableArray<SlowPathCode*> slow_paths_;
// The order to use for code generation.
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 42b9182d55..65610d54a6 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -49,7 +49,6 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
}
dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
- native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
register_mask_max_ = std::max(register_mask_max_, register_mask);
current_dex_register_ = 0;
}
@@ -128,16 +127,25 @@ void StackMapStream::EndInlineInfoEntry() {
current_inline_info_ = InlineInfoEntry();
}
+uint32_t StackMapStream::ComputeMaxNativePcOffset() const {
+ uint32_t max_native_pc_offset = 0u;
+ for (size_t i = 0, size = stack_maps_.Size(); i != size; ++i) {
+ max_native_pc_offset = std::max(max_native_pc_offset, stack_maps_.Get(i).native_pc_offset);
+ }
+ return max_native_pc_offset;
+}
+
size_t StackMapStream::PrepareForFillIn() {
int stack_mask_number_of_bits = stack_mask_max_ + 1; // Need room for max element too.
stack_mask_size_ = RoundUp(stack_mask_number_of_bits, kBitsPerByte) / kBitsPerByte;
inline_info_size_ = ComputeInlineInfoSize();
dex_register_maps_size_ = ComputeDexRegisterMapsSize();
+ uint32_t max_native_pc_offset = ComputeMaxNativePcOffset();
stack_map_encoding_ = StackMapEncoding::CreateFromSizes(stack_mask_size_,
inline_info_size_,
dex_register_maps_size_,
dex_pc_max_,
- native_pc_offset_max_,
+ max_native_pc_offset,
register_mask_max_);
stack_maps_size_ = stack_maps_.Size() * stack_map_encoding_.ComputeStackMapSize();
dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 274d573350..bc3653d7ea 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -67,7 +67,6 @@ class StackMapStream : public ValueObject {
inline_infos_(allocator, 2),
stack_mask_max_(-1),
dex_pc_max_(0),
- native_pc_offset_max_(0),
register_mask_max_(0),
number_of_stack_maps_with_inline_info_(0),
dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()),
@@ -126,6 +125,17 @@ class StackMapStream : public ValueObject {
uint32_t num_dex_registers);
void EndInlineInfoEntry();
+ size_t GetNumberOfStackMaps() const {
+ return stack_maps_.Size();
+ }
+
+ const StackMapEntry& GetStackMap(size_t i) const {
+ DCHECK_LT(i, stack_maps_.Size());
+ return stack_maps_.GetRawStorage()[i];
+ }
+
+ uint32_t ComputeMaxNativePcOffset() const;
+
// Prepares the stream to fill in a memory region. Must be called before FillIn.
// Returns the size (in bytes) needed to store this stream.
size_t PrepareForFillIn();
@@ -163,7 +173,6 @@ class StackMapStream : public ValueObject {
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
uint32_t dex_pc_max_;
- uint32_t native_pc_offset_max_;
uint32_t register_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 96d5654d65..9e9dea64c6 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1610,6 +1610,8 @@ class ImageDumper {
const auto& bitmap_section = image_header_.GetImageSection(ImageHeader::kSectionImageBitmap);
const auto& field_section = image_header_.GetImageSection(ImageHeader::kSectionArtFields);
const auto& method_section = image_header_.GetMethodsSection();
+ const auto& intern_section = image_header_.GetImageSection(
+ ImageHeader::kSectionInternedStrings);
stats_.header_bytes = header_bytes;
size_t alignment_bytes = RoundUp(header_bytes, kObjectAlignment) - header_bytes;
stats_.alignment_bytes += alignment_bytes;
@@ -1617,6 +1619,7 @@ class ImageDumper {
stats_.bitmap_bytes += bitmap_section.Size();
stats_.art_field_bytes += field_section.Size();
stats_.art_method_bytes += method_section.Size();
+ stats_.interned_strings_bytes += intern_section.Size();
stats_.Dump(os);
os << "\n";
@@ -1945,6 +1948,7 @@ class ImageDumper {
size_t object_bytes;
size_t art_field_bytes;
size_t art_method_bytes;
+ size_t interned_strings_bytes;
size_t bitmap_bytes;
size_t alignment_bytes;
@@ -1974,6 +1978,7 @@ class ImageDumper {
object_bytes(0),
art_field_bytes(0),
art_method_bytes(0),
+ interned_strings_bytes(0),
bitmap_bytes(0),
alignment_bytes(0),
managed_code_bytes(0),
@@ -2131,21 +2136,24 @@ class ImageDumper {
<< "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n";
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent_os(&indent_filter);
- indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "object_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "art_field_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "art_method_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n",
+ indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "object_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "art_field_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "art_method_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "interned_string_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n",
header_bytes, PercentOfFileBytes(header_bytes),
object_bytes, PercentOfFileBytes(object_bytes),
art_field_bytes, PercentOfFileBytes(art_field_bytes),
art_method_bytes, PercentOfFileBytes(art_method_bytes),
+ interned_strings_bytes,
+ PercentOfFileBytes(interned_strings_bytes),
bitmap_bytes, PercentOfFileBytes(bitmap_bytes),
alignment_bytes, PercentOfFileBytes(alignment_bytes))
<< std::flush;
CHECK_EQ(file_bytes, header_bytes + object_bytes + art_field_bytes + art_method_bytes +
- bitmap_bytes + alignment_bytes);
+ interned_strings_bytes + bitmap_bytes + alignment_bytes);
}
os << "object_bytes breakdown:\n";
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 007125cfbe..04017273a8 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -437,6 +437,41 @@ void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
}
}
+class FixupRootVisitor : public RootVisitor {
+ public:
+ explicit FixupRootVisitor(const PatchOat* patch_oat) : patch_oat_(patch_oat) {
+ }
+
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
+ }
+ }
+
+ private:
+ const PatchOat* const patch_oat_;
+};
+
+void PatchOat::PatchInternedStrings(const ImageHeader* image_header) {
+ const auto& section = image_header->GetImageSection(ImageHeader::kSectionInternedStrings);
+ InternTable temp_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ temp_table.ReadFromMemory(image_->Begin() + section.Offset());
+ FixupRootVisitor visitor(this);
+ temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
+}
+
void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
img_roots->Get(ImageHeader::kDexCaches));
@@ -483,12 +518,9 @@ bool PatchOat::PatchImage() {
auto* img_roots = image_header->GetImageRoots();
image_header->RelocateImage(delta_);
- // Patch and update ArtFields.
PatchArtFields(image_header);
-
- // Patch and update ArtMethods.
PatchArtMethods(image_header);
-
+ PatchInternedStrings(image_header);
// Patch dex file int/long arrays which point to ArtFields.
PatchDexFileArrays(img_roots);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 7b9c8bd508..23abca8c7e 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -116,6 +116,8 @@ class PatchOat {
bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PatchArtMethods(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PatchInternedStrings(const ImageHeader* image_header)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -123,7 +125,7 @@ class PatchOat {
bool WriteImage(File* out);
template <typename T>
- T* RelocatedCopyOf(T* obj) {
+ T* RelocatedCopyOf(T* obj) const {
if (obj == nullptr) {
return nullptr;
}
@@ -136,7 +138,7 @@ class PatchOat {
}
template <typename T>
- T* RelocatedAddressOfPointer(T* obj) {
+ T* RelocatedAddressOfPointer(T* obj) const {
if (obj == nullptr) {
return obj;
}
@@ -149,7 +151,7 @@ class PatchOat {
}
template <typename T>
- T RelocatedAddressOfIntPointer(T obj) {
+ T RelocatedAddressOfIntPointer(T obj) const {
if (obj == 0) {
return obj;
}
@@ -199,6 +201,7 @@ class PatchOat {
TimingLogger* timings_;
+ friend class FixupRootVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
};
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index ab63dddaff..8daf6d4c9e 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -22,6 +22,7 @@
#include <stdint.h>
#include <utility>
+#include "bit_utils.h"
#include "logging.h"
namespace art {
@@ -121,6 +122,7 @@ class HashSet {
typedef BaseIterator<T, HashSet> Iterator;
typedef BaseIterator<const T, const HashSet> ConstIterator;
+ // If we don't own the data, this will create a new array which owns the data.
void Clear() {
DeallocateStorage();
AllocateStorage(1);
@@ -128,19 +130,70 @@ class HashSet {
elements_until_expand_ = 0;
}
- HashSet() : num_elements_(0), num_buckets_(0), data_(nullptr),
+ HashSet() : num_elements_(0), num_buckets_(0), owns_data_(false), data_(nullptr),
min_load_factor_(kDefaultMinLoadFactor), max_load_factor_(kDefaultMaxLoadFactor) {
Clear();
}
- HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+ HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), owns_data_(false),
+ data_(nullptr) {
*this = other;
}
- HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+ HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), owns_data_(false),
+ data_(nullptr) {
*this = std::move(other);
}
+ // Construct from existing data.
+ // Read from a block of memory, if make_copy_of_data is false, then data_ points to within the
+ // passed in ptr_.
+ HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) {
+ uint64_t temp;
+ size_t offset = 0;
+ offset = ReadFromBytes(ptr, offset, &temp);
+ num_elements_ = static_cast<uint64_t>(temp);
+ offset = ReadFromBytes(ptr, offset, &temp);
+ num_buckets_ = static_cast<uint64_t>(temp);
+ CHECK_LE(num_elements_, num_buckets_);
+ offset = ReadFromBytes(ptr, offset, &temp);
+ elements_until_expand_ = static_cast<uint64_t>(temp);
+ offset = ReadFromBytes(ptr, offset, &min_load_factor_);
+ offset = ReadFromBytes(ptr, offset, &max_load_factor_);
+ if (!make_copy_of_data) {
+ owns_data_ = false;
+ data_ = const_cast<T*>(reinterpret_cast<const T*>(ptr + offset));
+ offset += sizeof(*data_) * num_buckets_;
+ } else {
+ AllocateStorage(num_buckets_);
+ // Write elements, not that this may not be safe for cross compilation if the elements are
+ // pointer sized.
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ offset = ReadFromBytes(ptr, offset, &data_[i]);
+ }
+ }
+ // Caller responsible for aligning.
+ *read_count = offset;
+ }
+
+ // Returns how large the table is after being written. If target is null, then no writing happens
+ // but the size is still returned. Target must be 8 byte aligned.
+ size_t WriteToMemory(uint8_t* ptr) {
+ size_t offset = 0;
+ offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_elements_));
+ offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_buckets_));
+ offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(elements_until_expand_));
+ offset = WriteToBytes(ptr, offset, min_load_factor_);
+ offset = WriteToBytes(ptr, offset, max_load_factor_);
+ // Write elements, not that this may not be safe for cross compilation if the elements are
+ // pointer sized.
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ offset = WriteToBytes(ptr, offset, data_[i]);
+ }
+ // Caller responsible for aligning.
+ return offset;
+ }
+
~HashSet() {
DeallocateStorage();
}
@@ -152,6 +205,7 @@ class HashSet {
std::swap(elements_until_expand_, other.elements_until_expand_);
std::swap(min_load_factor_, other.min_load_factor_);
std::swap(max_load_factor_, other.max_load_factor_);
+ std::swap(owns_data_, other.owns_data_);
return *this;
}
@@ -386,6 +440,7 @@ class HashSet {
void AllocateStorage(size_t num_buckets) {
num_buckets_ = num_buckets;
data_ = allocfn_.allocate(num_buckets_);
+ owns_data_ = true;
for (size_t i = 0; i < num_buckets_; ++i) {
allocfn_.construct(allocfn_.address(data_[i]));
emptyfn_.MakeEmpty(data_[i]);
@@ -394,10 +449,13 @@ class HashSet {
void DeallocateStorage() {
if (num_buckets_ != 0) {
- for (size_t i = 0; i < NumBuckets(); ++i) {
- allocfn_.destroy(allocfn_.address(data_[i]));
+ if (owns_data_) {
+ for (size_t i = 0; i < NumBuckets(); ++i) {
+ allocfn_.destroy(allocfn_.address(data_[i]));
+ }
+ allocfn_.deallocate(data_, NumBuckets());
+ owns_data_ = false;
}
- allocfn_.deallocate(data_, NumBuckets());
data_ = nullptr;
num_buckets_ = 0;
}
@@ -418,18 +476,23 @@ class HashSet {
// Expand / shrink the table to the new specified size.
void Resize(size_t new_size) {
DCHECK_GE(new_size, Size());
- T* old_data = data_;
+ T* const old_data = data_;
size_t old_num_buckets = num_buckets_;
// Reinsert all of the old elements.
+ const bool owned_data = owns_data_;
AllocateStorage(new_size);
for (size_t i = 0; i < old_num_buckets; ++i) {
T& element = old_data[i];
if (!emptyfn_.IsEmpty(element)) {
data_[FirstAvailableSlot(IndexForHash(hashfn_(element)))] = std::move(element);
}
- allocfn_.destroy(allocfn_.address(element));
+ if (owned_data) {
+ allocfn_.destroy(allocfn_.address(element));
+ }
+ }
+ if (owned_data) {
+ allocfn_.deallocate(old_data, old_num_buckets);
}
- allocfn_.deallocate(old_data, old_num_buckets);
}
ALWAYS_INLINE size_t FirstAvailableSlot(size_t index) const {
@@ -439,6 +502,24 @@ class HashSet {
return index;
}
+ // Return new offset.
+ template <typename Elem>
+ static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
+ DCHECK_ALIGNED(ptr + offset, sizeof(n));
+ if (ptr != nullptr) {
+ *reinterpret_cast<Elem*>(ptr + offset) = n;
+ }
+ return offset + sizeof(n);
+ }
+
+ template <typename Elem>
+ static size_t ReadFromBytes(const uint8_t* ptr, size_t offset, Elem* out) {
+ DCHECK(ptr != nullptr);
+ DCHECK_ALIGNED(ptr + offset, sizeof(*out));
+ *out = *reinterpret_cast<const Elem*>(ptr + offset);
+ return offset + sizeof(*out);
+ }
+
Alloc allocfn_; // Allocator function.
HashFn hashfn_; // Hashing function.
EmptyFn emptyfn_; // IsEmpty/SetEmpty function.
@@ -446,6 +527,7 @@ class HashSet {
size_t num_elements_; // Number of inserted elements.
size_t num_buckets_; // Number of hash table buckets.
size_t elements_until_expand_; // Maxmimum number of elements until we expand the table.
+ bool owns_data_; // If we own data_ and are responsible for freeing it.
T* data_; // Backing storage.
double min_load_factor_;
double max_load_factor_;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index f2be85e277..0ab148e37e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -94,7 +94,6 @@ enum LockLevel {
kMonitorListLock,
kJniLoadLibraryLock,
kThreadListLock,
- kBreakpointInvokeLock,
kAllocTrackerLock,
kDeoptimizationLock,
kProfilerLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 429fa5bfe0..dc8a3d158e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1055,7 +1055,7 @@ static void SanityCheckArtMethodPointerArray(
static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
- CHECK(obj->GetClass() != nullptr) << "Null class " << obj;
+ CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
if (obj->IsClass()) {
auto klass = obj->AsClass();
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 3c75daf7b0..b2e40b4e92 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -33,6 +33,7 @@
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "handle_scope.h"
+#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
@@ -3660,17 +3661,16 @@ static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
}
}
-JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
- JDWP::RefTypeId class_id, JDWP::MethodId method_id,
- uint32_t arg_count, uint64_t* arg_values,
- JDWP::JdwpTag* arg_types, uint32_t options,
- JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
- JDWP::ObjectId* pExceptionId) {
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
+JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
+ JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
+ JDWP::MethodId method_id, uint32_t arg_count,
+ uint64_t arg_values[], JDWP::JdwpTag* arg_types,
+ uint32_t options) {
+ Thread* const self = Thread::Current();
+ CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* targetThread = nullptr;
- std::unique_ptr<DebugInvokeReq> req;
- Thread* self = Thread::Current();
{
ScopedObjectAccessUnchecked soa(self);
JDWP::JdwpError error;
@@ -3780,99 +3780,82 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
}
// Allocates a DebugInvokeReq.
- req.reset(new (std::nothrow) DebugInvokeReq(receiver, c, m, options, arg_values, arg_count));
- if (req.get() == nullptr) {
+ DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
+ options, arg_values, arg_count);
+ if (req == nullptr) {
LOG(ERROR) << "Failed to allocate DebugInvokeReq";
return JDWP::ERR_OUT_OF_MEMORY;
}
- // Attach the DebugInvokeReq to the target thread so it executes the method when
- // it is resumed. Once the invocation completes, it will detach it and signal us
- // before suspending itself.
- targetThread->SetDebugInvokeReq(req.get());
+ // Attaches the DebugInvokeReq to the target thread so it executes the method when
+ // it is resumed. Once the invocation completes, the target thread will delete it before
+ // suspending itself (see ThreadList::SuspendSelfForDebugger).
+ targetThread->SetDebugInvokeReq(req);
}
// The fact that we've released the thread list lock is a bit risky --- if the thread goes
- // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
- // call, and it's unwise to hold it during WaitForSuspend.
-
- {
- /*
- * We change our (JDWP thread) status, which should be THREAD_RUNNING,
- * so we can suspend for a GC if the invoke request causes us to
- * run out of memory. It's also a good idea to change it before locking
- * the invokeReq mutex, although that should never be held for long.
- */
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
-
- VLOG(jdwp) << " Transferring control to event thread";
- {
- MutexLock mu(self, req->lock);
-
- if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
- VLOG(jdwp) << " Resuming all threads";
- thread_list->UndoDebuggerSuspensions();
- } else {
- VLOG(jdwp) << " Resuming event thread only";
- thread_list->Resume(targetThread, true);
- }
-
- // The target thread is resumed but needs the JDWP token we're holding.
- // We release it now and will acquire it again when the invocation is
- // complete and the target thread suspends itself.
- gJdwpState->ReleaseJdwpTokenForCommand();
-
- // Wait for the request to finish executing.
- while (targetThread->GetInvokeReq() != nullptr) {
- req->cond.Wait(self);
- }
- }
- VLOG(jdwp) << " Control has returned from event thread";
-
- /* wait for thread to re-suspend itself */
- SuspendThread(thread_id, false /* request_suspension */);
-
- // Now the thread is suspended again, we can re-acquire the JDWP token.
- gJdwpState->AcquireJdwpTokenForCommand();
+ // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
+ // call.
- self->TransitionFromSuspendedToRunnable();
- }
-
- /*
- * Suspend the threads. We waited for the target thread to suspend
- * itself, so all we need to do is suspend the others.
- *
- * The SuspendAllForDebugger() call will double-suspend the event thread,
- * so we want to resume the target thread once to keep the books straight.
- */
if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
- VLOG(jdwp) << " Suspending all threads";
- thread_list->SuspendAllForDebugger();
- self->TransitionFromSuspendedToRunnable();
- VLOG(jdwp) << " Resuming event thread to balance the count";
+ VLOG(jdwp) << " Resuming all threads";
+ thread_list->UndoDebuggerSuspensions();
+ } else {
+ VLOG(jdwp) << " Resuming event thread only";
thread_list->Resume(targetThread, true);
}
- // Copy the result.
- *pResultTag = req->result_tag;
- *pResultValue = req->result_value;
- *pExceptionId = req->exception;
- return req->error;
+ return JDWP::ERR_NONE;
}
void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
- ScopedObjectAccess soa(Thread::Current());
+ Thread* const self = Thread::Current();
+ CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
+
+ ScopedObjectAccess soa(self);
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
- StackHandleScope<3> hs(soa.Self());
- auto old_exception = hs.NewHandle<mirror::Throwable>(soa.Self()->GetException());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
+ // Execute the method then sends reply to the debugger.
+ ExecuteMethodWithoutPendingException(soa, pReq);
+
+ // If an exception was pending before the invoke, restore it now.
+ if (old_exception.Get() != nullptr) {
+ soa.Self()->SetException(old_exception.Get());
+ }
+}
+
+// Helper function: write a variable-width value into the output input buffer.
+static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
+ switch (width) {
+ case 1:
+ expandBufAdd1(pReply, value);
+ break;
+ case 2:
+ expandBufAdd2BE(pReply, value);
+ break;
+ case 4:
+ expandBufAdd4BE(pReply, value);
+ break;
+ case 8:
+ expandBufAdd8BE(pReply, value);
+ break;
+ default:
+ LOG(FATAL) << width;
+ UNREACHABLE();
+ }
+}
+
+void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
+ soa.Self()->AssertNoPendingException();
+
// Translate the method through the vtable, unless the debugger wants to suppress it.
- auto* m = pReq->method;
- auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ ArtMethod* m = pReq->method;
+ size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
ArtMethod* actual_method =
pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
@@ -3889,39 +3872,133 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
+ // Invoke the method.
ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
- reinterpret_cast<jvalue*>(pReq->arg_values));
+ reinterpret_cast<jvalue*>(pReq->arg_values.get()));
- pReq->result_tag = BasicTagFromDescriptor(m->GetShorty());
- const bool is_object_result = (pReq->result_tag == JDWP::JT_OBJECT);
+ // Prepare JDWP ids for the reply.
+ JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
+ const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
- pReq->exception = gRegistry->Add(exception);
- if (pReq->exception != 0) {
+
+ if (!IsDebuggerActive()) {
+ // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
+ // because it won't be sent either.
+ return;
+ }
+
+ JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
+ uint64_t result_value = 0;
+ if (exceptionObjectId != 0) {
VLOG(jdwp) << " JDWP invocation returning with exception=" << exception.Get()
<< " " << exception->Dump();
- pReq->result_value = 0;
+ result_value = 0;
} else if (is_object_result) {
- /* if no exception thrown, examine object result more closely */
+ /* if no exception was thrown, examine object result more closely */
JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
- if (new_tag != pReq->result_tag) {
- VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
- pReq->result_tag = new_tag;
+ if (new_tag != result_tag) {
+ VLOG(jdwp) << " JDWP promoted result from " << result_tag << " to " << new_tag;
+ result_tag = new_tag;
}
// Register the object in the registry and reference its ObjectId. This ensures
// GC safety and prevents from accessing stale reference if the object is moved.
- pReq->result_value = gRegistry->Add(object_result.Get());
+ result_value = gRegistry->Add(object_result.Get());
} else {
// Primitive result.
- DCHECK(IsPrimitiveTag(pReq->result_tag));
- pReq->result_value = result.GetJ();
+ DCHECK(IsPrimitiveTag(result_tag));
+ result_value = result.GetJ();
+ }
+ const bool is_constructor = m->IsConstructor() && !m->IsStatic();
+ if (is_constructor) {
+ // If we invoked a constructor (which actually returns void), return the receiver,
+ // unless we threw, in which case we return null.
+ result_tag = JDWP::JT_OBJECT;
+ if (exceptionObjectId == 0) {
+ // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
+ // object registry.
+ result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
+ } else {
+ result_value = 0;
+ }
}
- if (old_exception.Get() != nullptr) {
- soa.Self()->SetException(old_exception.Get());
+ // Suspend other threads if the invoke is not single-threaded.
+ if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
+ soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
+ VLOG(jdwp) << " Suspending all threads";
+ Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
+ soa.Self()->TransitionFromSuspendedToRunnable();
+ }
+
+ VLOG(jdwp) << " --> returned " << result_tag
+ << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
+ exceptionObjectId);
+
+ // Show detailed debug output.
+ if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
+ if (result_value != 0) {
+ if (VLOG_IS_ON(jdwp)) {
+ std::string result_string;
+ JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
+ CHECK_EQ(error, JDWP::ERR_NONE);
+ VLOG(jdwp) << " string '" << result_string << "'";
+ }
+ } else {
+ VLOG(jdwp) << " string (null)";
+ }
+ }
+
+ // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
+ // is ready to suspend.
+ BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
+}
+
+void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
+ uint64_t result_value, JDWP::ObjectId exception) {
+ // Make room for the JDWP header since we do not know the size of the reply yet.
+ JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
+
+ size_t width = GetTagWidth(result_tag);
+ JDWP::expandBufAdd1(pReply, result_tag);
+ if (width != 0) {
+ WriteValue(pReply, width, result_value);
+ }
+ JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
+ JDWP::expandBufAddObjectId(pReply, exception);
+
+ // Now we know the size, we can complete the JDWP header.
+ uint8_t* buf = expandBufGetBuffer(pReply);
+ JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
+ JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
+ JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply); // flags
+ JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
+}
+
+void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
+ CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
+
+ JDWP::ExpandBuf* const pReply = pReq->reply;
+ CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
+
+ // We need to prevent other threads (including JDWP thread) from interacting with the debugger
+ // while we send the reply but are not yet suspended. The JDWP token will be released just before
+ // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
+ gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
+
+ // Send the reply unless the debugger detached before the completion of the method.
+ if (IsDebuggerActive()) {
+ const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
+ VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
+ pReq->request_id, replyDataLength);
+
+ gJdwpState->SendRequest(pReply);
+ } else {
+ VLOG(jdwp) << "Not sending invoke reply because debugger detached";
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index c23b1b88f2..fd7d46c37e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -23,8 +23,6 @@
#include <pthread.h>
-#include <list>
-#include <map>
#include <set>
#include <string>
#include <vector>
@@ -44,6 +42,7 @@ class Throwable;
class ArtField;
class ArtMethod;
class ObjectRegistry;
+class ScopedObjectAccess;
class ScopedObjectAccessUnchecked;
class StackVisitor;
class Thread;
@@ -52,33 +51,32 @@ class Thread;
* Invoke-during-breakpoint support.
*/
struct DebugInvokeReq {
- DebugInvokeReq(mirror::Object* invoke_receiver, mirror::Class* invoke_class,
+ DebugInvokeReq(uint32_t invoke_request_id, JDWP::ObjectId invoke_thread_id,
+ mirror::Object* invoke_receiver, mirror::Class* invoke_class,
ArtMethod* invoke_method, uint32_t invoke_options,
- uint64_t* args, uint32_t args_count)
- : receiver(invoke_receiver), klass(invoke_class), method(invoke_method),
- arg_count(args_count), arg_values(args), options(invoke_options),
- error(JDWP::ERR_NONE), result_tag(JDWP::JT_VOID), result_value(0), exception(0),
- lock("a DebugInvokeReq lock", kBreakpointInvokeLock),
- cond("a DebugInvokeReq condition variable", lock) {
+ uint64_t args[], uint32_t args_count)
+ : request_id(invoke_request_id), thread_id(invoke_thread_id), receiver(invoke_receiver),
+ klass(invoke_class), method(invoke_method), arg_count(args_count), arg_values(args),
+ options(invoke_options), reply(JDWP::expandBufAlloc()) {
}
- /* request */
- GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod
+ ~DebugInvokeReq() {
+ JDWP::expandBufFree(reply);
+ }
+
+ // Request
+ const uint32_t request_id;
+ const JDWP::ObjectId thread_id;
+ GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod.
GcRoot<mirror::Class> klass;
- ArtMethod* method;
+ ArtMethod* const method;
const uint32_t arg_count;
- uint64_t* const arg_values; // will be null if arg_count_ == 0
+ std::unique_ptr<uint64_t[]> arg_values; // will be null if arg_count_ == 0. We take ownership
+ // of this array so we must delete it upon destruction.
const uint32_t options;
- /* result */
- JDWP::JdwpError error;
- JDWP::JdwpTag result_tag;
- uint64_t result_value; // either a primitive value or an ObjectId
- JDWP::ObjectId exception;
-
- /* condition variable to wait on while the method executes */
- Mutex lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- ConditionVariable cond GUARDED_BY(lock);
+ // Reply
+ JDWP::ExpandBuf* const reply;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -607,19 +605,39 @@ class Dbg {
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Invoke support for commands ClassType.InvokeMethod, ClassType.NewInstance and
- // ObjectReference.InvokeMethod.
- static JDWP::JdwpError InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
- JDWP::RefTypeId class_id, JDWP::MethodId method_id,
- uint32_t arg_count, uint64_t* arg_values,
- JDWP::JdwpTag* arg_types, uint32_t options,
- JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
- JDWP::ObjectId* pExceptObj)
+ /*
+ * Invoke support
+ */
+
+ // Called by the JDWP thread to prepare invocation in the event thread (suspended on an event).
+ // If the information sent by the debugger is incorrect, it will send a reply with the
+ // appropriate error code. Otherwise, it will attach a DebugInvokeReq object to the event thread
+ // and resume it (and possibly other threads depending on the invoke options).
+ // Unlike other commands, the JDWP thread will not send the reply to the debugger (see
+ // JdwpState::ProcessRequest). The reply will be sent by the event thread itself after method
+ // invocation completes (see FinishInvokeMethod). This is required to allow the JDWP thread to
+ // process incoming commands from the debugger while the invocation is still in progress in the
+ // event thread, especially if it gets suspended by a debug event occurring in another thread.
+ static JDWP::JdwpError PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
+ JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
+ JDWP::MethodId method_id, uint32_t arg_count,
+ uint64_t arg_values[], JDWP::JdwpTag* arg_types,
+ uint32_t options)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Called by the event thread to execute a method prepared by the JDWP thread in the given
+ // DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply
+ // to that DebugInvokeReq object so it can be sent to the debugger only when the event thread
+ // is ready to suspend (see FinishInvokeMethod).
static void ExecuteMethod(DebugInvokeReq* pReq);
+ // Called by the event thread to send the reply of the invoke (created in ExecuteMethod)
+ // before suspending itself. This is to ensure the thread is ready to suspend before the
+ // debugger receives the reply.
+ static void FinishInvokeMethod(DebugInvokeReq* pReq);
+
/*
* DDM support.
*/
@@ -696,6 +714,14 @@ class Dbg {
}
private:
+ static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id,
+ JDWP::JdwpTag result_tag, uint64_t result_value,
+ JDWP::ObjectId exception)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
ScopedObjectAccessUnchecked& soa, int slot,
JDWP::JdwpTag tag, uint8_t* buf, size_t width)
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index d017601565..7ac264a0c5 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -264,13 +264,18 @@ class DexFile {
// Raw code_item.
struct CodeItem {
- uint16_t registers_size_;
- uint16_t ins_size_;
- uint16_t outs_size_;
- uint16_t tries_size_;
- uint32_t debug_info_off_; // file offset to debug info stream
+ uint16_t registers_size_; // the number of registers used by this code
+ // (locals + parameters)
+ uint16_t ins_size_; // the number of words of incoming arguments to the method
+ // that this code is for
+ uint16_t outs_size_; // the number of words of outgoing argument space required
+ // by this code for method invocation
+ uint16_t tries_size_; // the number of try_items for this instance. If non-zero,
+ // then these appear as the tries array just after the
+ // insns in this instance.
+ uint32_t debug_info_off_; // file offset to debug info stream
uint32_t insns_size_in_code_units_; // size of the insns array, in 2 byte code units
- uint16_t insns_[1];
+ uint16_t insns_[1]; // actual array of bytecode.
private:
DISALLOW_COPY_AND_ASSIGN(CodeItem);
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index c16f5d35e0..006d2c7d30 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -159,6 +159,7 @@ template<size_t kAlignment> template<bool kSetBit>
inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK_GE(addr, heap_begin_);
+ DCHECK(HasAddress(obj)) << obj;
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
const uintptr_t mask = OffsetToMask(offset);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index fe2b284fcb..6546eb4245 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -35,6 +35,11 @@ size_t SpaceBitmap<kAlignment>::ComputeBitmapSize(uint64_t capacity) {
}
template<size_t kAlignment>
+size_t SpaceBitmap<kAlignment>::ComputeHeapSize(uint64_t bitmap_bytes) {
+ return bitmap_bytes * kBitsPerByte * kAlignment;
+}
+
+template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
CHECK(mem_map != nullptr);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index d6b3ed4f26..35faff3774 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -188,15 +188,16 @@ class SpaceBitmap {
std::string Dump() const;
+ // Helper function for computing bitmap size based on a 64 bit capacity.
+ static size_t ComputeBitmapSize(uint64_t capacity);
+ static size_t ComputeHeapSize(uint64_t bitmap_bytes);
+
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin, size_t bitmap_size,
const void* heap_begin);
- // Helper function for computing bitmap size based on a 64 bit capacity.
- static size_t ComputeBitmapSize(uint64_t capacity);
-
template<bool kSetBit>
bool Modify(const mirror::Object* obj);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 14fa740728..22207ee21c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1001,6 +1001,27 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
BaseMutex::DumpAll(os);
}
+void Heap::ResetGcPerformanceInfo() {
+ for (auto& collector : garbage_collectors_) {
+ collector->ResetMeasurements();
+ }
+ total_allocation_time_.StoreRelaxed(0);
+ total_bytes_freed_ever_ = 0;
+ total_objects_freed_ever_ = 0;
+ total_wait_time_ = 0;
+ blocking_gc_count_ = 0;
+ blocking_gc_time_ = 0;
+ gc_count_last_window_ = 0;
+ blocking_gc_count_last_window_ = 0;
+ last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
+ (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
+ {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ gc_count_rate_histogram_.Reset();
+ blocking_gc_count_rate_histogram_.Reset();
+ }
+}
+
uint64_t Heap::GetGcCount() const {
uint64_t gc_count = 0U;
for (auto& collector : garbage_collectors_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 09bd3701e5..18244c856b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -598,6 +598,7 @@ class Heap {
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os);
+ void ResetGcPerformanceInfo();
// Returns true if we currently care about pause times.
bool CareAboutPauseTimes() const {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 437fd8c5c9..f7ceb841c2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -694,7 +694,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
auto& section = image_header.GetImageSection(section_idx);
LOG(INFO) << section_idx << " start="
- << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset())
+ << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset()) << " "
<< section;
}
}
@@ -730,9 +730,9 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
bitmap_index));
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
- accounting::ContinuousSpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
- reinterpret_cast<uint8_t*>(map->Begin()),
- map->Size()));
+ accounting::ContinuousSpaceBitmap::CreateFromMemMap(
+ bitmap_name, image_map.release(), reinterpret_cast<uint8_t*>(map->Begin()),
+ accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_section.Size())));
if (bitmap.get() == nullptr) {
*error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
return nullptr;
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 85eefb0957..f32d5a1b81 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -75,7 +75,7 @@ static constexpr size_t kMaxObjectsPerSegment = 128;
static constexpr size_t kMaxBytesPerSegment = 4096;
// The static field-name for the synthetic object generated to account for class static overhead.
-static constexpr const char* kStaticOverheadName = "$staticOverhead";
+static constexpr const char* kClassOverheadName = "$classOverhead";
enum HprofTag {
HPROF_TAG_STRING = 0x01,
@@ -1092,17 +1092,23 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
// Class is allocated but not yet loaded: we cannot access its fields or super class.
return;
}
- size_t sFieldCount = klass->NumStaticFields();
- if (sFieldCount != 0) {
- int byteLength = sFieldCount * sizeof(JValue); // TODO bogus; fields are packed
+ const size_t num_static_fields = klass->NumStaticFields();
+ // Total class size including embedded IMT, embedded vtable, and static fields.
+ const size_t class_size = klass->GetClassSize();
+ // Class size excluding static fields (relies on reference fields being the first static fields).
+ const size_t class_size_without_overhead = sizeof(mirror::Class);
+ CHECK_LE(class_size_without_overhead, class_size);
+ const size_t overhead_size = class_size - class_size_without_overhead;
+
+ if (overhead_size != 0) {
// Create a byte array to reflect the allocation of the
// StaticField array at the end of this class.
__ AddU1(HPROF_PRIMITIVE_ARRAY_DUMP);
__ AddClassStaticsId(klass);
__ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(klass));
- __ AddU4(byteLength);
+ __ AddU4(overhead_size);
__ AddU1(hprof_basic_byte);
- for (int i = 0; i < byteLength; ++i) {
+ for (size_t i = 0; i < overhead_size; ++i) {
__ AddU1(0);
}
}
@@ -1119,7 +1125,7 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
if (klass->IsClassClass()) {
// ClassObjects have their static fields appended, so aren't all the same size.
// But they're at least this size.
- __ AddU4(sizeof(mirror::Class)); // instance size
+ __ AddU4(class_size_without_overhead); // instance size
} else if (klass->IsStringClass()) {
// Strings are variable length with character data at the end like arrays.
// This outputs the size of an empty string.
@@ -1133,15 +1139,15 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
__ AddU2(0); // empty const pool
// Static fields
- if (sFieldCount == 0) {
- __ AddU2((uint16_t)0);
+ if (overhead_size == 0) {
+ __ AddU2(static_cast<uint16_t>(0));
} else {
- __ AddU2((uint16_t)(sFieldCount+1));
- __ AddStringId(LookupStringId(kStaticOverheadName));
+ __ AddU2(static_cast<uint16_t>(num_static_fields + 1));
+ __ AddStringId(LookupStringId(kClassOverheadName));
__ AddU1(hprof_basic_object);
__ AddClassStaticsId(klass);
- for (size_t i = 0; i < sFieldCount; ++i) {
+ for (size_t i = 0; i < num_static_fields; ++i) {
ArtField* f = klass->GetStaticField(i);
size_t size;
diff --git a/runtime/image.cc b/runtime/image.cc
index 947c914de6..44193da4ee 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '6', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '7', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index c6be7ef3f7..d856f218af 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -142,6 +142,7 @@ class PACKED(4) ImageHeader {
kSectionObjects,
kSectionArtFields,
kSectionArtMethods,
+ kSectionInternedStrings,
kSectionImageBitmap,
kSectionCount, // Number of elements in enum.
};
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 9abbca8460..2a962784ca 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -152,20 +152,28 @@ void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
CHECK(image_space != nullptr);
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
if (!image_added_to_intern_table_) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- const size_t num_strings = dex_file->NumStringIds();
- for (size_t j = 0; j < num_strings; ++j) {
- mirror::String* image_string = dex_cache->GetResolvedString(j);
- if (image_string != nullptr) {
- mirror::String* found = LookupStrong(image_string);
- if (found == nullptr) {
- InsertStrong(image_string);
- } else {
- DCHECK_EQ(found, image_string);
+ const ImageHeader* const header = &image_space->GetImageHeader();
+ // Check if we have the interned strings section.
+ const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
+ if (section.Size() > 0) {
+ ReadFromMemoryLocked(image_space->Begin() + section.Offset());
+ } else {
+ // TODO: Delete this logic?
+ mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
+ mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
+ for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ const size_t num_strings = dex_file->NumStringIds();
+ for (size_t j = 0; j < num_strings; ++j) {
+ mirror::String* image_string = dex_cache->GetResolvedString(j);
+ if (image_string != nullptr) {
+ mirror::String* found = LookupStrong(image_string);
+ if (found == nullptr) {
+ InsertStrong(image_string);
+ } else {
+ DCHECK_EQ(found, image_string);
+ }
}
}
}
@@ -285,6 +293,29 @@ void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
weak_interns_.SweepWeaks(callback, arg);
}
+void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
+ const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection(
+ ImageHeader::kSectionInternedStrings);
+ // Read the string tables from the image.
+ const uint8_t* ptr = image_space->Begin() + intern_section.Offset();
+ const size_t offset = ReadFromMemory(ptr);
+ CHECK_LE(offset, intern_section.Size());
+}
+
+size_t InternTable::ReadFromMemory(const uint8_t* ptr) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ return ReadFromMemoryLocked(ptr);
+}
+
+size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) {
+ return strong_interns_.ReadIntoPreZygoteTable(ptr);
+}
+
+size_t InternTable::WriteToMemory(uint8_t* ptr) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ return strong_interns_.WriteFromPostZygoteTable(ptr);
+}
+
std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
if (kIsDebugBuild) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -300,6 +331,17 @@ bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
return a.Read()->Equals(b.Read());
}
+size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) {
+ CHECK_EQ(pre_zygote_table_.Size(), 0u);
+ size_t read_count = 0;
+ pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count);
+ return read_count;
+}
+
+size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) {
+ return post_zygote_table_.WriteToMemory(ptr);
+}
+
void InternTable::Table::Remove(mirror::String* s) {
auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
if (it != post_zygote_table_.end()) {
@@ -325,9 +367,13 @@ mirror::String* InternTable::Table::Find(mirror::String* s) {
}
void InternTable::Table::SwapPostZygoteWithPreZygote() {
- CHECK(pre_zygote_table_.Empty());
- std::swap(pre_zygote_table_, post_zygote_table_);
- VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
+ if (pre_zygote_table_.Empty()) {
+ std::swap(pre_zygote_table_, post_zygote_table_);
+ VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
+ } else {
+ // This case happens if read the intern table from the image.
+ VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_";
+ }
}
void InternTable::Table::Insert(mirror::String* s) {
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 1e5d3c22c9..97ce73c52e 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -97,6 +97,20 @@ class InternTable {
void SwapPostZygoteWithPreZygote()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Add an intern table which was serialized to the image.
+ void AddImageInternTable(gc::space::ImageSpace* image_space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
+ // Read the intern table from memory. The elements aren't copied, the intern hash set data will
+ // point to somewhere within ptr. Only reads the strong interns.
+ size_t ReadFromMemory(const uint8_t* ptr) LOCKS_EXCLUDED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Write the post zygote intern table to a pointer. Only writes the strong interns since it is
+ // expected that there is no weak interns since this is called from the image writer.
+ size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
private:
class StringHashEquals {
public:
@@ -133,6 +147,16 @@ class InternTable {
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ // Read pre zygote table is called from ReadFromMemory which happens during runtime creation
+ // when we load the image intern table. Returns how many bytes were read.
+ size_t ReadIntoPreZygoteTable(const uint8_t* ptr)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in
+ // the post zygote table. Returns how many bytes were written.
+ size_t WriteFromPostZygoteTable(uint8_t* ptr)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
@@ -192,6 +216,10 @@ class InternTable {
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
friend class Transaction;
+ size_t ReadFromMemoryLocked(const uint8_t* ptr)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 86a79cead9..0f6f788013 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -450,10 +450,13 @@ void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
size_t dest_reg, size_t src_reg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // If both register locations contains the same value, the register probably holds a reference.
// Uint required, so that sign extension does not make this wrong on 64b systems
uint32_t src_value = shadow_frame.GetVReg(src_reg);
mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
+
+ // If both register locations contains the same value, the register probably holds a reference.
+ // Note: As an optimization, non-moving collectors leave a stale reference value
+ // in the references array even after the original vreg was overwritten to a non-reference.
if (src_value == reinterpret_cast<uintptr_t>(o)) {
new_shadow_frame->SetVRegReference(dest_reg, o);
} else {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index dd7aa40368..fcf083cbe1 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -56,7 +56,7 @@ namespace interpreter {
template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register) {
- bool do_assignability_check = do_access_check;
+ constexpr bool do_assignability_check = do_access_check;
if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
LOG(FATAL) << "Invalid shadow frame for interpreter use";
return JValue();
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index e18d10fa0a..7c48985dfe 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -297,7 +297,7 @@ struct JdwpState {
private:
explicit JdwpState(const JdwpOptions* options);
- size_t ProcessRequest(Request* request, ExpandBuf* pReply);
+ size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply);
bool InvokeInProgress();
bool IsConnected();
void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id)
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 612af8bc99..14f097f72a 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -99,10 +99,6 @@ put ourselves to sleep. That way we don't interfere with anyone else and
don't allow anyone else to interfere with us.
*/
-
-#define kJdwpEventCommandSet 64
-#define kJdwpCompositeCommand 100
-
namespace art {
namespace JDWP {
@@ -612,13 +608,10 @@ void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId
*/
DebugInvokeReq* const pReq = Dbg::GetInvokeReq();
if (pReq == nullptr) {
- /*LOGD("SuspendByPolicy: no invoke needed");*/
break;
}
- /* grab this before posting/suspending again */
- AcquireJdwpTokenForEvent(thread_self_id);
-
+ // Execute method.
Dbg::ExecuteMethod(pReq);
}
}
@@ -749,11 +742,11 @@ static ExpandBuf* eventPrep() {
void JdwpState::EventFinish(ExpandBuf* pReq) {
uint8_t* buf = expandBufGetBuffer(pReq);
- Set4BE(buf, expandBufGetLength(pReq));
- Set4BE(buf + 4, NextRequestSerial());
- Set1(buf + 8, 0); /* flags */
- Set1(buf + 9, kJdwpEventCommandSet);
- Set1(buf + 10, kJdwpCompositeCommand);
+ Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReq));
+ Set4BE(buf + kJDWPHeaderIdOffset, NextRequestSerial());
+ Set1(buf + kJDWPHeaderFlagsOffset, 0); /* flags */
+ Set1(buf + kJDWPHeaderCmdSetOffset, kJDWPEventCmdSet);
+ Set1(buf + kJDWPHeaderCmdOffset, kJDWPEventCompositeCmd);
SendRequest(pReq);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index f7f70f6ed7..d4e2656b7e 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -52,17 +52,6 @@ std::string DescribeRefTypeId(const RefTypeId& ref_type_id) {
return StringPrintf("%#" PRIx64 " (%s)", ref_type_id, signature.c_str());
}
-// Helper function: write a variable-width value into the output input buffer.
-static void WriteValue(ExpandBuf* pReply, int width, uint64_t value) {
- switch (width) {
- case 1: expandBufAdd1(pReply, value); break;
- case 2: expandBufAdd2BE(pReply, value); break;
- case 4: expandBufAdd4BE(pReply, value); break;
- case 8: expandBufAdd8BE(pReply, value); break;
- default: LOG(FATAL) << width; break;
- }
-}
-
static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint8_t tag;
@@ -92,7 +81,7 @@ static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<Objec
* If "is_constructor" is set, this returns "object_id" rather than the
* expected-to-be-void return value of the called function.
*/
-static JdwpError RequestInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
+static JdwpError RequestInvoke(JdwpState*, Request* request,
ObjectId thread_id, ObjectId object_id,
RefTypeId class_id, MethodId method_id, bool is_constructor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -122,49 +111,15 @@ static JdwpError RequestInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
(options & INVOKE_SINGLE_THREADED) ? " (SINGLE_THREADED)" : "",
(options & INVOKE_NONVIRTUAL) ? " (NONVIRTUAL)" : "");
- JdwpTag resultTag;
- uint64_t resultValue;
- ObjectId exceptObjId;
- JdwpError err = Dbg::InvokeMethod(thread_id, object_id, class_id, method_id, arg_count,
- argValues.get(), argTypes.get(), options, &resultTag,
- &resultValue, &exceptObjId);
- if (err != ERR_NONE) {
- return err;
- }
-
- if (is_constructor) {
- // If we invoked a constructor (which actually returns void), return the receiver,
- // unless we threw, in which case we return null.
- resultTag = JT_OBJECT;
- resultValue = (exceptObjId == 0) ? object_id : 0;
- }
-
- size_t width = Dbg::GetTagWidth(resultTag);
- expandBufAdd1(pReply, resultTag);
- if (width != 0) {
- WriteValue(pReply, width, resultValue);
- }
- expandBufAdd1(pReply, JT_OBJECT);
- expandBufAddObjectId(pReply, exceptObjId);
-
- VLOG(jdwp) << " --> returned " << resultTag
- << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", resultValue, exceptObjId);
-
- /* show detailed debug output */
- if (resultTag == JT_STRING && exceptObjId == 0) {
- if (resultValue != 0) {
- if (VLOG_IS_ON(jdwp)) {
- std::string result_string;
- JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
- CHECK_EQ(error, JDWP::ERR_NONE);
- VLOG(jdwp) << " string '" << result_string << "'";
- }
- } else {
- VLOG(jdwp) << " string (null)";
- }
+ JDWP::JdwpError error = Dbg::PrepareInvokeMethod(request->GetId(), thread_id, object_id,
+ class_id, method_id, arg_count,
+ argValues.get(), argTypes.get(), options);
+ if (error == JDWP::ERR_NONE) {
+ // We successfully requested the invoke. The event thread now owns the arguments array in its
+ // DebugInvokeReq mailbox.
+ argValues.release();
}
-
- return err;
+ return error;
}
static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply)
@@ -684,13 +639,14 @@ static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*)
* Example: Eclipse sometimes uses java/lang/Class.forName(String s) on
* values in the "variables" display.
*/
-static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply)
+static JdwpError CT_InvokeMethod(JdwpState* state, Request* request,
+ ExpandBuf* pReply ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
- return RequestInvoke(state, request, pReply, thread_id, 0, class_id, method_id, false);
+ return RequestInvoke(state, request, thread_id, 0, class_id, method_id, false);
}
/*
@@ -700,7 +656,8 @@ static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, ExpandBuf*
* Example: in IntelliJ, create a watch on "new String(myByteArray)" to
* see the contents of a byte[] as a string.
*/
-static JdwpError CT_NewInstance(JdwpState* state, Request* request, ExpandBuf* pReply)
+static JdwpError CT_NewInstance(JdwpState* state, Request* request,
+ ExpandBuf* pReply ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
@@ -711,7 +668,7 @@ static JdwpError CT_NewInstance(JdwpState* state, Request* request, ExpandBuf* p
if (status != ERR_NONE) {
return status;
}
- return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, true);
+ return RequestInvoke(state, request, thread_id, object_id, class_id, method_id, true);
}
/*
@@ -863,14 +820,15 @@ static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply)
* object), it will try to invoke the object's toString() function. This
* feature becomes crucial when examining ArrayLists with Eclipse.
*/
-static JdwpError OR_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply)
+static JdwpError OR_InvokeMethod(JdwpState* state, Request* request,
+ ExpandBuf* pReply ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
ObjectId thread_id = request->ReadThreadId();
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
- return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, false);
+ return RequestInvoke(state, request, thread_id, object_id, class_id, method_id, false);
}
static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
@@ -1602,13 +1560,27 @@ static std::string DescribeCommand(Request* request) {
return result;
}
+// Returns true if the given command_set and command identify an "invoke" command.
+static bool IsInvokeCommand(uint8_t command_set, uint8_t command) {
+ if (command_set == kJDWPClassTypeCmdSet) {
+ return command == kJDWPClassTypeInvokeMethodCmd || command == kJDWPClassTypeNewInstanceCmd;
+ } else if (command_set == kJDWPObjectReferenceCmdSet) {
+ return command == kJDWPObjectReferenceInvokeCmd;
+ } else {
+ return false;
+ }
+}
+
/*
- * Process a request from the debugger.
+ * Process a request from the debugger. The skip_reply flag is set to true to indicate to the
+ * caller the reply must not be sent to the debugger. This is used for invoke commands where the
+ * reply is sent by the event thread after completing the invoke.
*
* On entry, the JDWP thread is in VMWAIT.
*/
-size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply) {
+size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply) {
JdwpError result = ERR_NONE;
+ *skip_reply = false;
if (request->GetCommandSet() != kJDWPDdmCmdSet) {
/*
@@ -1661,24 +1633,31 @@ size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply) {
result = ERR_NOT_IMPLEMENTED;
}
- /*
- * Set up the reply header.
- *
- * If we encountered an error, only send the header back.
- */
- uint8_t* replyBuf = expandBufGetBuffer(pReply);
- size_t replyLength = (result == ERR_NONE) ? expandBufGetLength(pReply) : kJDWPHeaderLen;
- Set4BE(replyBuf + 0, replyLength);
- Set4BE(replyBuf + 4, request->GetId());
- Set1(replyBuf + 8, kJDWPFlagReply);
- Set2BE(replyBuf + 9, result);
-
- CHECK_GT(expandBufGetLength(pReply), 0U) << GetCommandName(request) << " " << request->GetId();
-
- size_t respLen = expandBufGetLength(pReply) - kJDWPHeaderLen;
- VLOG(jdwp) << "REPLY: " << GetCommandName(request) << " " << result << " (length=" << respLen << ")";
- if (false) {
- VLOG(jdwp) << HexDump(expandBufGetBuffer(pReply) + kJDWPHeaderLen, respLen, false, "");
+ size_t replyLength = 0U;
+ if (result == ERR_NONE && IsInvokeCommand(request->GetCommandSet(), request->GetCommand())) {
+ // We successfully request an invoke in the event thread. It will send the reply once the
+ // invoke completes so we must not send it now.
+ *skip_reply = true;
+ } else {
+ /*
+ * Set up the reply header.
+ *
+ * If we encountered an error, only send the header back.
+ */
+ uint8_t* replyBuf = expandBufGetBuffer(pReply);
+ replyLength = (result == ERR_NONE) ? expandBufGetLength(pReply) : kJDWPHeaderLen;
+ Set4BE(replyBuf + kJDWPHeaderSizeOffset, replyLength);
+ Set4BE(replyBuf + kJDWPHeaderIdOffset, request->GetId());
+ Set1(replyBuf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);
+ Set2BE(replyBuf + kJDWPHeaderErrorCodeOffset, result);
+
+ CHECK_GT(expandBufGetLength(pReply), 0U) << GetCommandName(request) << " " << request->GetId();
+
+ size_t respLen = expandBufGetLength(pReply) - kJDWPHeaderLen;
+ VLOG(jdwp) << "REPLY: " << GetCommandName(request) << " " << result << " (length=" << respLen << ")";
+ if (false) {
+ VLOG(jdwp) << HexDump(expandBufGetBuffer(pReply) + kJDWPHeaderLen, respLen, false, "");
+ }
}
VLOG(jdwp) << "----------";
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index e6b97a2083..6bc5e27f85 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -395,8 +395,15 @@ bool JdwpState::HandlePacket() {
JDWP::Request request(netStateBase->input_buffer_, netStateBase->input_count_);
ExpandBuf* pReply = expandBufAlloc();
- size_t replyLength = ProcessRequest(&request, pReply);
- ssize_t cc = netStateBase->WritePacket(pReply, replyLength);
+ bool skip_reply = false;
+ size_t replyLength = ProcessRequest(&request, pReply, &skip_reply);
+ ssize_t cc = 0;
+ if (!skip_reply) {
+ cc = netStateBase->WritePacket(pReply, replyLength);
+ } else {
+ DCHECK_EQ(replyLength, 0U);
+ }
+ expandBufFree(pReply);
/*
* We processed this request and sent its reply so we can release the JDWP token.
@@ -405,10 +412,8 @@ bool JdwpState::HandlePacket() {
if (cc != static_cast<ssize_t>(replyLength)) {
PLOG(ERROR) << "Failed sending reply to debugger";
- expandBufFree(pReply);
return false;
}
- expandBufFree(pReply);
netStateBase->ConsumeBytes(request.GetLength());
{
MutexLock mu(self, shutdown_lock_);
diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h
index f290be0f52..d58467d108 100644
--- a/runtime/jdwp/jdwp_priv.h
+++ b/runtime/jdwp/jdwp_priv.h
@@ -29,15 +29,32 @@
/*
* JDWP constants.
*/
-#define kJDWPHeaderLen 11
-#define kJDWPFlagReply 0x80
-
-#define kMagicHandshake "JDWP-Handshake"
-#define kMagicHandshakeLen (sizeof(kMagicHandshake)-1)
+static constexpr size_t kJDWPHeaderSizeOffset = 0U;
+static constexpr size_t kJDWPHeaderIdOffset = 4U;
+static constexpr size_t kJDWPHeaderFlagsOffset = 8U;
+static constexpr size_t kJDWPHeaderErrorCodeOffset = 9U;
+static constexpr size_t kJDWPHeaderCmdSetOffset = 9U;
+static constexpr size_t kJDWPHeaderCmdOffset = 10U;
+static constexpr size_t kJDWPHeaderLen = 11U;
+static constexpr uint8_t kJDWPFlagReply = 0x80;
+
+static constexpr const char kMagicHandshake[] = "JDWP-Handshake";
+static constexpr size_t kMagicHandshakeLen = sizeof(kMagicHandshake) - 1;
+
+/* Invoke commands */
+static constexpr uint8_t kJDWPClassTypeCmdSet = 3U;
+static constexpr uint8_t kJDWPClassTypeInvokeMethodCmd = 3U;
+static constexpr uint8_t kJDWPClassTypeNewInstanceCmd = 4U;
+static constexpr uint8_t kJDWPObjectReferenceCmdSet = 9U;
+static constexpr uint8_t kJDWPObjectReferenceInvokeCmd = 6U;
+
+/* Event command */
+static constexpr uint8_t kJDWPEventCmdSet = 64U;
+static constexpr uint8_t kJDWPEventCompositeCmd = 100U;
/* DDM support */
-#define kJDWPDdmCmdSet 199 /* 0xc7, or 'G'+128 */
-#define kJDWPDdmCmd 1
+static constexpr uint8_t kJDWPDdmCmdSet = 199U; // 0xc7, or 'G'+128
+static constexpr uint8_t kJDWPDdmCmd = 1U;
namespace art {
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 835b94ade4..8c9222f6a4 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -666,7 +666,7 @@ template <bool kVisitClass, typename Visitor>
inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
// Right after a class is allocated, but not yet loaded
- // (kStatusNotReady, see ClassLinkder::LoadClass()), GC may find it
+ // (kStatusNotReady, see ClassLinker::LoadClass()), GC may find it
// and scan it. IsTemp() may call Class::GetAccessFlags() but may
// fail in the DCHECK in Class::GetAccessFlags() because the class
// status is kStatusNotReady. To avoid it, rely on IsResolved()
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 9f6cd11c3e..8d9c08d9d5 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -176,11 +176,13 @@ inline String* String::AllocFromByteArray(Thread* self, int32_t byte_length,
}
template <bool kIsInstrumented>
-inline String* String::AllocFromCharArray(Thread* self, int32_t array_length,
+inline String* String::AllocFromCharArray(Thread* self, int32_t count,
Handle<CharArray> array, int32_t offset,
gc::AllocatorType allocator_type) {
- SetStringCountAndValueVisitorFromCharArray visitor(array_length, array, offset);
- String* new_string = Alloc<kIsInstrumented>(self, array_length, allocator_type, visitor);
+ // It is a caller error to have a count less than the actual array's size.
+ DCHECK_GE(array->GetLength(), count);
+ SetStringCountAndValueVisitorFromCharArray visitor(count, array, offset);
+ String* new_string = Alloc<kIsInstrumented>(self, count, allocator_type, visitor);
return new_string;
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index a8f16d78ff..af06385401 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -95,7 +95,7 @@ class MANAGED String FINAL : public Object {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kIsInstrumented>
- ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t array_length,
+ ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count,
Handle<CharArray> array, int32_t offset,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 94024ef4b2..67dcc9c6af 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -380,8 +380,8 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
- StackHandleScope<3> hs(soa.Self());
- auto* klass = DecodeClass(soa, javaThis);
+ StackHandleScope<2> hs(soa.Self());
+ auto klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t num_methods = 0;
for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
auto modifiers = m.GetAccessFlags();
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index d341ee1017..8d84c35bd9 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -31,7 +31,7 @@ namespace art {
template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
- const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
+ constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
// The higher bits of the rb ptr, rb_ptr_high_bits (must be zero)
// is used to create artificial data dependency from the is_gray
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cb7f5f37ce..66ec7ccf7a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -646,6 +646,10 @@ void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const ch
// Create the thread pools.
heap_->CreateThreadPool();
+ // Reset the gc performance data at zygote fork so that the GCs
+ // before fork aren't attributed to an app.
+ heap_->ResetGcPerformanceInfo();
+
if (jit_.get() == nullptr && jit_options_->UseJIT()) {
// Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
CreateJit();
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 922334ee99..4a307d5ed6 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -30,6 +30,8 @@
// If a default value is omitted here, T{} is used as the default value, which is
// almost-always the value of the type as if it was memset to all 0.
//
+// Please keep the columns aligned if possible when adding new rows.
+//
// Parse-able keys from the command line.
RUNTIME_OPTIONS_KEY (Unit, Zygote)
@@ -64,9 +66,9 @@ RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
-RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
-RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
-RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheCapacity, jit::JitCodeCache::kDefaultCapacity)
+RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
+RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
+RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheCapacity, jit::JitCodeCache::kDefaultCapacity)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
HSpaceCompactForOOMMinIntervalsMs,\
MsToNs(100 * 1000)) // 100s
@@ -105,9 +107,12 @@ RUNTIME_OPTIONS_KEY (std::vector<std::string>, \
ImageCompilerOptions) // -Ximage-compiler-option ...
RUNTIME_OPTIONS_KEY (bool, Verify, true)
RUNTIME_OPTIONS_KEY (std::string, NativeBridge)
+RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
+RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
// Not parse-able from command line, but can be provided explicitly.
+// (Do not add anything here that is defined in ParsedOptions::MakeParser)
RUNTIME_OPTIONS_KEY (const std::vector<const DexFile*>*, \
BootClassPathDexList) // TODO: make unique_ptr
RUNTIME_OPTIONS_KEY (InstructionSet, ImageInstructionSet, kRuntimeISA)
@@ -120,7 +125,5 @@ RUNTIME_OPTIONS_KEY (void (*)(int32_t status), \
// We don't call abort(3) by default; see
// Runtime::Abort.
RUNTIME_OPTIONS_KEY (void (*)(), HookAbort, nullptr)
-RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
-RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
#undef RUNTIME_OPTIONS_KEY
diff --git a/runtime/stack.h b/runtime/stack.h
index 79d2f40d73..d60714f7a3 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -95,6 +95,8 @@ class ShadowFrame {
}
~ShadowFrame() {}
+ // TODO(iam): Clean references array up since they're always there,
+ // we don't need to do conditionals.
bool HasReferenceArray() const {
return true;
}
@@ -149,6 +151,9 @@ class ShadowFrame {
return *reinterpret_cast<unaligned_double*>(vreg);
}
+ // Look up the reference given its virtual register number.
+ // If this returns non-null then this does not mean the vreg is currently a reference
+ // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
@@ -283,6 +288,8 @@ class ShadowFrame {
ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
uint32_t dex_pc, bool has_reference_array)
: number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
+ // TODO(iam): Remove this parameter, it's an an artifact of portable removal
+ DCHECK(has_reference_array);
if (has_reference_array) {
memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
} else {
@@ -306,6 +313,15 @@ class ShadowFrame {
ShadowFrame* link_;
ArtMethod* method_;
uint32_t dex_pc_;
+
+ // This is a two-part array:
+ // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
+ // bytes.
+ // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is
+ // ptr-sized.
+ // In other words when a primitive is stored in vX, the second (reference) part of the array will
+ // be null. When a reference is stored in vX, the second (reference) part of the array will be a
+ // copy of vX.
uint32_t vregs_[0];
DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fe98b0a98a..fe8b0d8c60 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2578,12 +2578,11 @@ void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
}
void Thread::ClearDebugInvokeReq() {
- CHECK(Dbg::IsDebuggerActive());
CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
- // We do not own the DebugInvokeReq* so we must not delete it, it is the responsibility of
- // the owner (the JDWP thread).
+ DebugInvokeReq* req = tlsPtr_.debug_invoke_req;
tlsPtr_.debug_invoke_req = nullptr;
+ delete req;
}
void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 9311bef48a..0e71c08b07 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -781,15 +781,14 @@ class Thread {
void DeactivateSingleStepControl();
// Sets debug invoke request for debugging. When the thread is resumed,
- // it executes the method described by this request then suspends itself.
- // The thread does not take ownership of the given DebugInvokeReq*, it is
- // owned by the JDWP thread which is waiting for the execution of the
- // method.
+ // it executes the method described by this request then sends the reply
+ // before suspending itself. The thread takes the ownership of the given
+ // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
void SetDebugInvokeReq(DebugInvokeReq* req);
// Clears debug invoke request for debugging. When the thread completes
- // method invocation, it clears its debug invoke request, signals the
- // JDWP thread and suspends itself.
+ // method invocation, it deletes its debug invoke request and suspends
+ // itself.
void ClearDebugInvokeReq();
// Returns the fake exception used to activate deoptimization.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index af9ba6848b..b697b43a77 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -875,31 +875,36 @@ void ThreadList::SuspendSelfForDebugger() {
// The debugger thread must not suspend itself due to debugger activity!
Thread* debug_thread = Dbg::GetDebugThread();
- CHECK(debug_thread != nullptr);
CHECK(self != debug_thread);
CHECK_NE(self->GetState(), kRunnable);
Locks::mutator_lock_->AssertNotHeld(self);
- {
+ // The debugger may have detached while we were executing an invoke request. In that case, we
+ // must not suspend ourself.
+ DebugInvokeReq* pReq = self->GetInvokeReq();
+ const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
+ if (!skip_thread_suspension) {
// Collisions with other suspends aren't really interesting. We want
// to ensure that we're the only one fiddling with the suspend count
// though.
MutexLock mu(self, *Locks::thread_suspend_count_lock_);
self->ModifySuspendCount(self, +1, true);
CHECK_GT(self->GetSuspendCount(), 0);
- }
- VLOG(threads) << *self << " self-suspending (debugger)";
+ VLOG(threads) << *self << " self-suspending (debugger)";
+ } else {
+ // We must no longer be subject to debugger suspension.
+ MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
- // Tell JDWP we've completed invocation and are ready to suspend.
- DebugInvokeReq* const pReq = self->GetInvokeReq();
+ VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
+ }
+
+ // If the debugger requested an invoke, we need to send the reply and clear the request.
if (pReq != nullptr) {
- // Clear debug invoke request before signaling.
+ Dbg::FinishInvokeMethod(pReq);
self->ClearDebugInvokeReq();
-
- VLOG(jdwp) << "invoke complete, signaling";
- MutexLock mu(self, pReq->lock);
- pReq->cond.Signal(self);
+ pReq = nullptr; // object has been deleted, clear it for safety.
}
// Tell JDWP that we've completed suspension. The JDWP thread can't
diff --git a/test/098-ddmc/src/Main.java b/test/098-ddmc/src/Main.java
index f41ff2a94a..4914ba2289 100644
--- a/test/098-ddmc/src/Main.java
+++ b/test/098-ddmc/src/Main.java
@@ -43,14 +43,24 @@ public class Main {
System.out.println("Confirm when we overflow, we don't roll over to zero. b/17392248");
final int overflowAllocations = 64 * 1024; // Won't fit in unsigned 16-bit value.
+ // TODO: Temporary fix. Keep the new objects live so they are not garbage collected.
+ // This will cause OOM exception for GC stress tests. The root cause is changed behaviour of
+ // getRecentAllocations(). Working on restoring its old behaviour. b/20037135
+ Object[] objects = new Object[overflowAllocations];
for (int i = 0; i < overflowAllocations; i++) {
- new Object();
+ objects[i] = new Object();
}
Allocations after = new Allocations(DdmVmInternal.getRecentAllocations());
System.out.println("before < overflowAllocations=" + (before.numberOfEntries < overflowAllocations));
System.out.println("after > before=" + (after.numberOfEntries > before.numberOfEntries));
System.out.println("after.numberOfEntries=" + after.numberOfEntries);
+ // TODO: Temporary fix as above. b/20037135
+ objects = null;
+ Runtime.getRuntime().gc();
+ final int fillerStrings = 16 * 1024;
+ String[] strings = new String[fillerStrings];
+
System.out.println("Disable and confirm back to empty");
DdmVmInternal.enableRecentAllocations(false);
System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
@@ -66,8 +76,8 @@ public class Main {
System.out.println("Confirm we can reenable twice in a row without losing allocations");
DdmVmInternal.enableRecentAllocations(true);
System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
- for (int i = 0; i < 16 * 1024; i++) {
- new String("fnord");
+ for (int i = 0; i < fillerStrings; i++) {
+ strings[i] = new String("fnord");
}
Allocations first = new Allocations(DdmVmInternal.getRecentAllocations());
DdmVmInternal.enableRecentAllocations(true);
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 714953cff8..82a62953d5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -229,16 +229,10 @@ endif
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
-# Tests that are broken with GC stress.
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
-
-ifneq (,$(filter gcstress,$(GC_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),gcstress,$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DBEUGGABLE_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
+# 098-ddmc is broken until we restore the old behavior of getRecentAllocation() of DDMS. b/20037135
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), 098-ddmc, $(ALL_ADDRESS_SIZES))
# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 77e6b1ad14..62fd67bfd7 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -60,7 +60,7 @@ while true; do
done
if [[ $mode == "host" ]]; then
- make_command="make $j_arg build-art-host-tests $common_targets"
+ make_command="make $j_arg build-art-host-tests $common_targets out/host/linux-x86/lib/libjavacoretests.so out/host/linux-x86/lib64/libjavacoretests.so"
echo "Executing $make_command"
$make_command
elif [[ $mode == "target" ]]; then
@@ -70,7 +70,7 @@ elif [[ $mode == "target" ]]; then
# Use '-e' to force the override of TARGET_GLOBAL_LDFLAGS.
# Also, we build extra tools that will be used by tests, so that
# they are compiled with our own linker.
- make_command="make -e $j_arg build-art-target-tests $common_targets libjavacrypto linker toybox toolbox sh"
+ make_command="make -e $j_arg build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh"
echo "Executing env $env $make_command"
env $env $make_command
fi
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 344d2dedb3..4e76eb4354 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -33,7 +33,8 @@ if [ ! -f $test_jar ]; then
fi
# Packages that currently work correctly with the expectation files.
-working_packages=("libcore.icu"
+working_packages=("dalvik.system"
+ "libcore.icu"
"libcore.io"
"libcore.java.lang"
"libcore.java.math"