diff options
| -rw-r--r-- | compiler/image_writer.cc | 112 | ||||
| -rw-r--r-- | compiler/image_writer.h | 80 | ||||
| -rw-r--r-- | patchoat/patchoat.cc | 2 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 2 | ||||
| -rw-r--r-- | runtime/intern_table.cc | 190 | ||||
| -rw-r--r-- | runtime/intern_table.h | 44 | ||||
| -rw-r--r-- | runtime/runtime.cc | 3 |
7 files changed, 222 insertions, 211 deletions
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 17d0f61a34..503b75b6cc 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -637,7 +637,7 @@ bool ImageWriter::AllocMemory() { ImageInfo& image_info = GetImageInfo(oat_filename); const size_t length = RoundUp(image_objects_offset_begin_ + GetBinSizeSum(image_info) + - intern_table_bytes_ + + image_info.intern_table_bytes_ + class_table_bytes_, kPageSize); std::string error_msg; @@ -909,14 +909,17 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) { DCHECK(obj != nullptr); // if it is a string, we want to intern it if its not interned. if (obj->GetClass()->IsStringClass()) { + const char* oat_filename = GetOatFilename(obj); + ImageInfo& image_info = GetImageInfo(oat_filename); + // we must be an interned string that was forward referenced and already assigned if (IsImageBinSlotAssigned(obj)) { - DCHECK_EQ(obj, obj->AsString()->Intern()); + DCHECK_EQ(obj, image_info.intern_table_->InternStrongImageString(obj->AsString())); return; } // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since // we are guaranteed to not have GC during image writing. - mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString( + mirror::String* const interned = image_info.intern_table_->InternStrongImageString( obj->AsString()); if (obj != interned) { if (!IsImageBinSlotAssigned(interned)) { @@ -1249,6 +1252,15 @@ void ImageWriter::CalculateNewObjectOffsets() { // Calculate size of the dex cache arrays slot and prepare offsets. PrepareDexCacheArraySlots(); + // Calculate the sizes of the intern tables. + for (const char* oat_filename : oat_filenames_) { + ImageInfo& image_info = GetImageInfo(oat_filename); + // Calculate how big the intern table will be after being serialized. + InternTable* const intern_table = image_info.intern_table_.get(); + CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings"; + image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr); + } + // Calculate bin slot offsets. for (const char* oat_filename : oat_filenames_) { ImageInfo& image_info = GetImageInfo(oat_filename); @@ -1279,7 +1291,7 @@ void ImageWriter::CalculateNewObjectOffsets() { image_info.bin_slot_sizes_[kBinArtMethodDirty] + image_info.bin_slot_sizes_[kBinArtMethodClean] + image_info.bin_slot_sizes_[kBinDexCacheArray] + - intern_table_bytes_ + + image_info.intern_table_bytes_ + class_table_bytes_; size_t image_objects = RoundUp(image_info.image_end_, kPageSize); size_t bitmap_size = @@ -1310,12 +1322,7 @@ void ImageWriter::CalculateNewObjectOffsets() { relocation.offset += image_info.bin_slot_offsets_[bin_type]; } - /* TODO: Reenable the intern table and class table. b/26317072 - // Calculate how big the intern table will be after being serialized. - InternTable* const intern_table = runtime->GetInternTable(); - CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings"; - intern_table_bytes_ = intern_table->WriteToMemory(nullptr); - + /* TODO: Reenable the class table. b/26317072 // Write out the class table. ClassLinker* class_linker = runtime->GetClassLinker(); if (boot_image_space_ == nullptr) { @@ -1378,7 +1385,7 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) { cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); // Calculate the size of the interned strings. auto* interned_strings_section = §ions[ImageHeader::kSectionInternedStrings]; - *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_); + *interned_strings_section = ImageSection(cur_pos, image_info.intern_table_bytes_); cur_pos = interned_strings_section->End(); // Round up to the alignment the class table expects. See HashSet::WriteToMemory. cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); @@ -1444,7 +1451,7 @@ class FixupRootVisitor : public RootVisitor { void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { - *roots[i] = ImageAddress(*roots[i]); + *roots[i] = image_writer_->GetImageAddress(*roots[i]); } } @@ -1452,19 +1459,12 @@ class FixupRootVisitor : public RootVisitor { const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { - roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr())); + roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr())); } } private: ImageWriter* const image_writer_; - - mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { - const size_t offset = image_writer_->GetImageOffset(obj); - auto* const dest = reinterpret_cast<Object*>(image_writer_->global_image_begin_ + offset); - VLOG(compiler) << "Update root from " << obj << " to " << dest; - return dest; - } }; void ImageWriter::CopyAndFixupNativeData() { @@ -1536,26 +1536,26 @@ void ImageWriter::CopyAndFixupNativeData() { } FixupRootVisitor root_visitor(this); - /* TODO: Reenable the intern table and class table // Write the intern table into the image. - const ImageSection& intern_table_section = image_header->GetImageSection( - ImageHeader::kSectionInternedStrings); - Runtime* const runtime = Runtime::Current(); - InternTable* const intern_table = runtime->GetInternTable(); - uint8_t* const intern_table_memory_ptr = - image_info.image_->Begin() + intern_table_section.Offset(); - const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr); - CHECK_EQ(intern_table_bytes, intern_table_bytes_); - // Fixup the pointers in the newly written intern table to contain image addresses. - InternTable temp_intern_table; - // Note that we require that ReadFromMemory does not make an internal copy of the elements so that - // the VisitRoots() will update the memory directly rather than the copies. - // This also relies on visit roots not doing any verification which could fail after we update - // the roots to be the image addresses. - temp_intern_table.ReadFromMemory(intern_table_memory_ptr); - CHECK_EQ(temp_intern_table.Size(), intern_table->Size()); - temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots); - + if (image_info.intern_table_bytes_ > 0) { + const ImageSection& intern_table_section = image_header->GetImageSection( + ImageHeader::kSectionInternedStrings); + InternTable* const intern_table = image_info.intern_table_.get(); + uint8_t* const intern_table_memory_ptr = + image_info.image_->Begin() + intern_table_section.Offset(); + const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr); + CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_); + // Fixup the pointers in the newly written intern table to contain image addresses. + InternTable temp_intern_table; + // Note that we require that ReadFromMemory does not make an internal copy of the elements so that + // the VisitRoots() will update the memory directly rather than the copies. + // This also relies on visit roots not doing any verification which could fail after we update + // the roots to be the image addresses. + temp_intern_table.AddTableFromMemory(intern_table_memory_ptr); + CHECK_EQ(temp_intern_table.Size(), intern_table->Size()); + temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots); + } + /* TODO: Reenable the class table writing. // Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple // class loaders. Writing multiple class tables into the image is currently unsupported. if (class_table_bytes_ > 0u) { @@ -2110,7 +2110,6 @@ uint32_t ImageWriter::BinSlot::GetIndex() const { } uint8_t* ImageWriter::GetOatFileBegin(const char* oat_filename) const { - // DCHECK_GT(intern_table_bytes_, 0u); TODO: Reenable intern table and class table. uintptr_t last_image_end = 0; for (const char* oat_fn : oat_filenames_) { const ImageInfo& image_info = GetConstImageInfo(oat_fn); @@ -2197,4 +2196,37 @@ void ImageWriter::UpdateOatFile(const char* oat_filename) { } } +ImageWriter::ImageWriter( + const CompilerDriver& compiler_driver, + uintptr_t image_begin, + bool compile_pic, + bool compile_app_image, + ImageHeader::StorageMode image_storage_mode, + const std::vector<const char*> oat_filenames, + const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map) + : compiler_driver_(compiler_driver), + global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)), + image_objects_offset_begin_(0), + oat_file_(nullptr), + compile_pic_(compile_pic), + compile_app_image_(compile_app_image), + boot_image_space_(nullptr), + target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), + image_method_array_(ImageHeader::kImageMethodsCount), + dirty_methods_(0u), + clean_methods_(0u), + class_table_bytes_(0u), + image_storage_mode_(image_storage_mode), + dex_file_oat_filename_map_(dex_file_oat_filename_map), + oat_filenames_(oat_filenames), + default_oat_filename_(oat_filenames[0]) { + CHECK_NE(image_begin, 0U); + for (const char* oat_filename : oat_filenames) { + image_info_map_.emplace(oat_filename, ImageInfo()); + } + std::fill_n(image_methods_, arraysize(image_methods_), nullptr); +} + +ImageWriter::ImageInfo::ImageInfo() : intern_table_(new InternTable) {} + } // namespace art diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 78297ae645..e5f7dc77df 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -58,33 +58,7 @@ class ImageWriter FINAL { bool compile_app_image, ImageHeader::StorageMode image_storage_mode, const std::vector<const char*> oat_filenames, - const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map) - : compiler_driver_(compiler_driver), - global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)), - image_objects_offset_begin_(0), - oat_file_(nullptr), - compile_pic_(compile_pic), - compile_app_image_(compile_app_image), - boot_image_space_(nullptr), - target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), - intern_table_bytes_(0u), - image_method_array_(ImageHeader::kImageMethodsCount), - dirty_methods_(0u), - clean_methods_(0u), - class_table_bytes_(0u), - image_storage_mode_(image_storage_mode), - dex_file_oat_filename_map_(dex_file_oat_filename_map), - oat_filenames_(oat_filenames), - default_oat_filename_(oat_filenames[0]) { - CHECK_NE(image_begin, 0U); - for (const char* oat_filename : oat_filenames) { - image_info_map_.emplace(oat_filename, ImageInfo()); - } - std::fill_n(image_methods_, arraysize(image_methods_), nullptr); - } - - ~ImageWriter() { - } + const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map); bool PrepareImageAddressSpace(); @@ -237,41 +211,36 @@ class ImageWriter FINAL { }; struct ImageInfo { - explicit ImageInfo() - : image_begin_(nullptr), - image_end_(RoundUp(sizeof(ImageHeader), kObjectAlignment)), - image_roots_address_(0), - image_offset_(0), - image_size_(0), - oat_offset_(0), - bin_slot_sizes_(), - bin_slot_offsets_(), - bin_slot_count_() {} + ImageInfo(); + ImageInfo(ImageInfo&&) = default; std::unique_ptr<MemMap> image_; // Memory mapped for generating the image. // Target begin of this image. Notes: It is not valid to write here, this is the address // of the target image, not necessarily where image_ is mapped. The address is only valid // after layouting (otherwise null). - uint8_t* image_begin_; + uint8_t* image_begin_ = nullptr; - size_t image_end_; // Offset to the free space in image_, initially size of image header. - uint32_t image_roots_address_; // The image roots address in the image. - size_t image_offset_; // Offset of this image from the start of the first image. + // Offset to the free space in image_, initially size of image header. + size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); + uint32_t image_roots_address_ = 0; // The image roots address in the image. + size_t image_offset_ = 0; // Offset of this image from the start of the first image. // Image size is the *address space* covered by this image. As the live bitmap is aligned // to the page size, the live bitmap will cover more address space than necessary. But live // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size. // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be // page-aligned). - size_t image_size_; + size_t image_size_ = 0; // Oat data. - size_t oat_offset_; // Offset of the oat file for this image from start of oat files. This is - // valid when the previous oat file has been written. - uint8_t* oat_data_begin_; // Start of oatdata in the corresponding oat file. This is - // valid when the images have been layed out. - size_t oat_size_; // Size of the corresponding oat data. + // Offset of the oat file for this image from start of oat files. This is + // valid when the previous oat file has been written. + size_t oat_offset_ = 0; + // Start of oatdata in the corresponding oat file. This is + // valid when the images have been layed out. + uint8_t* oat_data_begin_ = nullptr; + size_t oat_size_ = 0; // Size of the corresponding oat data. // Image bitmap which lets us know where the objects inside of the image reside. std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_; @@ -280,12 +249,18 @@ class ImageWriter FINAL { SafeMap<const DexFile*, size_t> dex_cache_array_starts_; // Offset from oat_data_begin_ to the stubs. - uint32_t oat_address_offsets_[kOatAddressCount]; + uint32_t oat_address_offsets_[kOatAddressCount] = {}; // Bin slot tracking for dirty object packing. - size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin. - size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins. - size_t bin_slot_count_[kBinSize]; // Number of objects in a bin. + size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin. + size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins. + size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin. + + // Cached size of the intern table for when we allocate memory. + size_t intern_table_bytes_ = 0; + + // Intern table associated with this for serialization. + std::unique_ptr<InternTable> intern_table_; }; // We use the lock word to store the offset of the object in the image. @@ -492,9 +467,6 @@ class ImageWriter FINAL { // Mapping of oat filename to image data. std::unordered_map<std::string, ImageInfo> image_info_map_; - // Cached size of the intern table for when we allocate memory. - size_t intern_table_bytes_; - // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to // have one entry per art field for convenience. ArtFields are placed right after the end of the // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields. diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 3037652c4c..d836532ed2 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -540,7 +540,7 @@ void PatchOat::PatchInternedStrings(const ImageHeader* image_header) { // Note that we require that ReadFromMemory does not make an internal copy of the elements. // This also relies on visit roots not doing any verification which could fail after we update // the roots to be the image addresses. - temp_table.ReadFromMemory(image_->Begin() + section.Offset()); + temp_table.AddTableFromMemory(image_->Begin() + section.Offset()); FixupRootVisitor visitor(this); temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots); } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 7f67ae4f08..7b32c5b67f 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -2333,7 +2333,7 @@ void Heap::PreZygoteFork() { if (HasZygoteSpace()) { return; } - Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote(); + Runtime::Current()->GetInternTable()->AddNewTable(); Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote(); VLOG(heap) << "Starting PreZygoteFork"; // Trim the pages at the end of the non moving space. diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index d035f5d960..68d7ebd0d5 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -32,7 +32,8 @@ namespace art { InternTable::InternTable() - : image_added_to_intern_table_(false), log_new_roots_(false), + : images_added_to_intern_table_(false), + log_new_roots_(false), weak_intern_condition_("New intern condition", *Locks::intern_table_lock_), weak_root_state_(gc::kWeakRootStateNormal) { } @@ -93,10 +94,10 @@ mirror::String* InternTable::LookupWeak(mirror::String* s) { return weak_interns_.Find(s); } -void InternTable::SwapPostZygoteWithPreZygote() { +void InternTable::AddNewTable() { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - weak_interns_.SwapPostZygoteWithPreZygote(); - strong_interns_.SwapPostZygoteWithPreZygote(); + weak_interns_.AddNewTable(); + strong_interns_.AddNewTable(); } mirror::String* InternTable::InsertStrong(mirror::String* s) { @@ -153,41 +154,36 @@ void InternTable::RemoveWeakFromTransaction(mirror::String* s) { void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) { CHECK(image_space != nullptr); MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - if (!image_added_to_intern_table_) { - const ImageHeader* const header = &image_space->GetImageHeader(); - // Check if we have the interned strings section. - const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings); - if (section.Size() > 0) { - ReadFromMemoryLocked(image_space->Begin() + section.Offset()); - } else { - // TODO: Delete this logic? - mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches); - mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>(); - for (int32_t i = 0; i < dex_caches->GetLength(); ++i) { - mirror::DexCache* dex_cache = dex_caches->Get(i); - const size_t num_strings = dex_cache->NumStrings(); - for (size_t j = 0; j < num_strings; ++j) { - mirror::String* image_string = dex_cache->GetResolvedString(j); - if (image_string != nullptr) { - mirror::String* found = LookupStrong(image_string); - if (found == nullptr) { - InsertStrong(image_string); - } else { - DCHECK_EQ(found, image_string); - } + const ImageHeader* const header = &image_space->GetImageHeader(); + // Check if we have the interned strings section. + const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings); + if (section.Size() > 0) { + AddTableFromMemoryLocked(image_space->Begin() + section.Offset()); + } else { + // TODO: Delete this logic? + mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches); + mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>(); + for (int32_t i = 0; i < dex_caches->GetLength(); ++i) { + mirror::DexCache* dex_cache = dex_caches->Get(i); + const size_t num_strings = dex_cache->NumStrings(); + for (size_t j = 0; j < num_strings; ++j) { + mirror::String* image_string = dex_cache->GetResolvedString(j); + if (image_string != nullptr) { + mirror::String* found = LookupStrong(image_string); + if (found == nullptr) { + InsertStrong(image_string); + } else { + DCHECK_EQ(found, image_string); } } } } - image_added_to_intern_table_ = true; } + images_added_to_intern_table_ = true; } mirror::String* InternTable::LookupStringFromImage(mirror::String* s) { - if (image_added_to_intern_table_) { - return nullptr; - } - std::vector<gc::space::ImageSpace*> image_spaces = + const std::vector<gc::space::ImageSpace*>& image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces(); if (image_spaces.empty()) { return nullptr; // No image present. @@ -284,9 +280,11 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool hold return weak; } // Check the image for a match. - mirror::String* image = LookupStringFromImage(s); - if (image != nullptr) { - return is_strong ? InsertStrong(image) : InsertWeak(image); + if (!images_added_to_intern_table_) { + mirror::String* const image_string = LookupStringFromImage(s); + if (image_string != nullptr) { + return is_strong ? InsertStrong(image_string) : InsertWeak(image_string); + } } // No match in the strong table or the weak table. Insert into the strong / weak table. return is_strong ? InsertStrong(s) : InsertWeak(s); @@ -326,27 +324,18 @@ void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) { weak_interns_.SweepWeaks(visitor); } -void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) { - const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection( - ImageHeader::kSectionInternedStrings); - // Read the string tables from the image. - const uint8_t* ptr = image_space->Begin() + intern_section.Offset(); - const size_t offset = ReadFromMemory(ptr); - CHECK_LE(offset, intern_section.Size()); -} - -size_t InternTable::ReadFromMemory(const uint8_t* ptr) { +size_t InternTable::AddTableFromMemory(const uint8_t* ptr) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - return ReadFromMemoryLocked(ptr); + return AddTableFromMemoryLocked(ptr); } -size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) { - return strong_interns_.ReadIntoPreZygoteTable(ptr); +size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) { + return strong_interns_.AddTableFromMemory(ptr); } size_t InternTable::WriteToMemory(uint8_t* ptr) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - return strong_interns_.WriteFromPostZygoteTable(ptr); + return strong_interns_.WriteToMemory(ptr); } std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const { @@ -364,71 +353,87 @@ bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a, return a.Read()->Equals(b.Read()); } -size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) { - CHECK_EQ(pre_zygote_table_.Size(), 0u); +size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) { size_t read_count = 0; - pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count); + UnorderedSet set(ptr, /*make copy*/false, &read_count); + // TODO: Disable this for app images if app images have intern tables. + static constexpr bool kCheckDuplicates = true; + if (kCheckDuplicates) { + for (GcRoot<mirror::String>& string : set) { + CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8(); + } + } + // Insert at the front since we insert into the back. + tables_.insert(tables_.begin(), std::move(set)); return read_count; } -size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) { - return post_zygote_table_.WriteToMemory(ptr); +size_t InternTable::Table::WriteToMemory(uint8_t* ptr) { + if (tables_.empty()) { + return 0; + } + UnorderedSet* table_to_write; + UnorderedSet combined; + if (tables_.size() > 1) { + table_to_write = &combined; + for (UnorderedSet& table : tables_) { + for (GcRoot<mirror::String>& string : table) { + combined.Insert(string); + } + } + } else { + table_to_write = &tables_.back(); + } + return table_to_write->WriteToMemory(ptr); } void InternTable::Table::Remove(mirror::String* s) { - auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s)); - if (it != post_zygote_table_.end()) { - post_zygote_table_.Erase(it); - } else { - it = pre_zygote_table_.Find(GcRoot<mirror::String>(s)); - DCHECK(it != pre_zygote_table_.end()); - pre_zygote_table_.Erase(it); + for (UnorderedSet& table : tables_) { + auto it = table.Find(GcRoot<mirror::String>(s)); + if (it != table.end()) { + table.Erase(it); + return; + } } + LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8(); } mirror::String* InternTable::Table::Find(mirror::String* s) { Locks::intern_table_lock_->AssertHeld(Thread::Current()); - auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s)); - if (it != pre_zygote_table_.end()) { - return it->Read(); - } - it = post_zygote_table_.Find(GcRoot<mirror::String>(s)); - if (it != post_zygote_table_.end()) { - return it->Read(); + for (UnorderedSet& table : tables_) { + auto it = table.Find(GcRoot<mirror::String>(s)); + if (it != table.end()) { + return it->Read(); + } } return nullptr; } -void InternTable::Table::SwapPostZygoteWithPreZygote() { - if (pre_zygote_table_.Empty()) { - std::swap(pre_zygote_table_, post_zygote_table_); - VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table"; - } else { - // This case happens if read the intern table from the image. - VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_"; - } +void InternTable::Table::AddNewTable() { + tables_.push_back(UnorderedSet()); } void InternTable::Table::Insert(mirror::String* s) { - // Always insert the post zygote table, this gets swapped when we create the zygote to be the - // pre zygote table. - post_zygote_table_.Insert(GcRoot<mirror::String>(s)); + // Always insert the last table, the image tables are before and we avoid inserting into these + // to prevent dirty pages. + DCHECK(!tables_.empty()); + tables_.back().Insert(GcRoot<mirror::String>(s)); } void InternTable::Table::VisitRoots(RootVisitor* visitor) { BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( visitor, RootInfo(kRootInternedString)); - for (auto& intern : pre_zygote_table_) { - buffered_visitor.VisitRoot(intern); - } - for (auto& intern : post_zygote_table_) { - buffered_visitor.VisitRoot(intern); + for (UnorderedSet& table : tables_) { + for (auto& intern : table) { + buffered_visitor.VisitRoot(intern); + } } } void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) { - SweepWeaks(&pre_zygote_table_, visitor); - SweepWeaks(&post_zygote_table_, visitor); + for (UnorderedSet& table : tables_) { + SweepWeaks(&table, visitor); + } } void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) { @@ -446,7 +451,12 @@ void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) } size_t InternTable::Table::Size() const { - return pre_zygote_table_.Size() + post_zygote_table_.Size(); + return std::accumulate(tables_.begin(), + tables_.end(), + size_t(0), + [](size_t sum, const UnorderedSet& set) { + return sum + set.Size(); + }); } void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) { @@ -464,10 +474,10 @@ void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) { InternTable::Table::Table() { Runtime* const runtime = Runtime::Current(); - pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(), - runtime->GetHashTableMaxLoadFactor()); - post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(), - runtime->GetHashTableMaxLoadFactor()); + // Initial table. + tables_.push_back(UnorderedSet()); + tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(), + runtime->GetHashTableMaxLoadFactor()); } } // namespace art diff --git a/runtime/intern_table.h b/runtime/intern_table.h index 3a4e8d8f11..ec0f1e2329 100644 --- a/runtime/intern_table.h +++ b/runtime/intern_table.h @@ -99,21 +99,20 @@ class InternTable { void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); // Adds all of the resolved image strings from the image space into the intern table. The - // advantage of doing this is preventing expensive DexFile::FindStringId calls. + // advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets + // images_added_to_intern_table_ to true, AddImageStringsToTable must be called on either all + // images or none. void AddImageStringsToTable(gc::space::ImageSpace* image_space) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); - // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages. - void SwapPostZygoteWithPreZygote() - SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); - - // Add an intern table which was serialized to the image. - void AddImageInternTable(gc::space::ImageSpace* image_space) + // Add a new intern table for inserting to, previous intern tables are still there but no + // longer inserted into and ideally unmodified. This is done to prevent dirty pages. + void AddNewTable() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); // Read the intern table from memory. The elements aren't copied, the intern hash set data will // point to somewhere within ptr. Only reads the strong interns. - size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_) + size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Write the post zygote intern table to a pointer. Only writes the strong interns since it is @@ -157,15 +156,17 @@ class InternTable { SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void SweepWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); - void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_); + // Add a new intern table that will only be inserted into from now on. + void AddNewTable() REQUIRES(Locks::intern_table_lock_); size_t Size() const REQUIRES(Locks::intern_table_lock_); - // Read pre zygote table is called from ReadFromMemory which happens during runtime creation - // when we load the image intern table. Returns how many bytes were read. - size_t ReadIntoPreZygoteTable(const uint8_t* ptr) + // Read and add an intern table from ptr. + // Tables read are inserted at the front of the table array. Only checks for conflicts in + // debug builds. Returns how many bytes were read. + size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in - // the post zygote table. Returns how many bytes were written. - size_t WriteFromPostZygoteTable(uint8_t* ptr) + // Write the intern tables to ptr, if there are multiple tables they are combined into a single one. + // Returns how many bytes were written. + size_t WriteToMemory(uint8_t* ptr) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); private: @@ -175,12 +176,9 @@ class InternTable { void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); - // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages - // caused by modifying the zygote intern table hash table. The pre zygote table are the - // interned strings which were interned before we created the zygote space. Post zygote is self - // explanatory. - UnorderedSet pre_zygote_table_; - UnorderedSet post_zygote_table_; + // We call AddNewTable when we create the zygote to reduce private dirty pages caused by + // modifying the zygote intern table. The back of table is modified when strings are interned. + std::vector<UnorderedSet> tables_; }; // Insert if non null, otherwise return null. Must be called holding the mutator lock. @@ -214,7 +212,7 @@ class InternTable { void RemoveWeakFromTransaction(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); - size_t ReadFromMemoryLocked(const uint8_t* ptr) + size_t AddTableFromMemoryLocked(const uint8_t* ptr) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Change the weak root state. May broadcast to waiters. @@ -225,7 +223,7 @@ class InternTable { void WaitUntilAccessible(Thread* self) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); + bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_); ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_); // Since this contains (strong) roots, they need a read barrier to diff --git a/runtime/runtime.cc b/runtime/runtime.cc index dfb28776f5..01f3bbd170 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -543,8 +543,7 @@ bool Runtime::Start() { // Use !IsAotCompiler so that we get test coverage, tests are never the zygote. if (!IsAotCompiler()) { ScopedObjectAccess soa(self); - std::vector<gc::space::ImageSpace*> image_spaces = heap_->GetBootImageSpaces(); - for (gc::space::ImageSpace* image_space : image_spaces) { + for (gc::space::ImageSpace* image_space : heap_->GetBootImageSpaces()) { ATRACE_BEGIN("AddImageStringsToTable"); GetInternTable()->AddImageStringsToTable(image_space); ATRACE_END(); |