Do not repeatedly search frozen strong intern tables.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 181943478
Bug: 223545499
Change-Id: Idf18993a5c51e88fcc7b62a11e46332630886b44
diff --git a/runtime/intern_table-inl.h b/runtime/intern_table-inl.h
index a332eae..daab3fa 100644
--- a/runtime/intern_table-inl.h
+++ b/runtime/intern_table-inl.h
@@ -130,11 +130,11 @@
}
}
- // Insert at the front since we add new interns into the back.
- // TODO: Instead, insert before the last unfrozen table since we add new interns into the back.
- // We want to keep the order of previous frozen tables unchanged, so that we can
- // can remember the number of searched frozen tables and not search them again.
- tables_.insert(tables_.begin(), InternalTable(std::move(intern_strings), is_boot_image));
+ // Insert before the last (unfrozen) table since we add new interns into the back.
+ // Keep the order of previous frozen tables unchanged, so that we can can remember
+ // the number of searched frozen tables and not search them again.
+ DCHECK(!tables_.empty());
+ tables_.insert(tables_.end() - 1, InternalTable(std::move(intern_strings), is_boot_image));
}
template <typename Visitor>
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 3c11282..7e950cf 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -198,7 +198,10 @@
Locks::intern_table_lock_->ExclusiveLock(self);
}
-ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s, uint32_t hash, bool is_strong) {
+ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
+ uint32_t hash,
+ bool is_strong,
+ size_t num_searched_strong_frozen_tables) {
DCHECK(s != nullptr);
DCHECK_EQ(hash, static_cast<uint32_t>(s->GetStoredHashCode()));
DCHECK_IMPLIES(hash == 0u, s->ComputeHashCode() == 0);
@@ -210,14 +213,16 @@
}
while (true) {
// Check the strong table for a match.
- ObjPtr<mirror::String> strong = strong_interns_.Find(s, hash);
+ ObjPtr<mirror::String> strong =
+ strong_interns_.Find(s, hash, num_searched_strong_frozen_tables);
if (strong != nullptr) {
return strong;
}
- if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
- (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
+ if (kUseReadBarrier ? self->GetWeakRefAccessEnabled()
+ : weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
break;
}
+ num_searched_strong_frozen_tables = strong_interns_.tables_.size() - 1u;
// weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
// cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
// cleared.
@@ -249,9 +254,12 @@
uint32_t hash = Utf8String::Hash(utf16_length, utf8_data);
Thread* self = Thread::Current();
ObjPtr<mirror::String> s;
+ size_t num_searched_strong_frozen_tables;
{
// Try to avoid allocation. If we need to allocate, release the mutex before the allocation.
MutexLock mu(self, *Locks::intern_table_lock_);
+ DCHECK(!strong_interns_.tables_.empty());
+ num_searched_strong_frozen_tables = strong_interns_.tables_.size() - 1u;
s = strong_interns_.Find(Utf8String(utf16_length, utf8_data), hash);
}
if (s != nullptr) {
@@ -266,7 +274,7 @@
return nullptr;
}
s->SetHashCode(static_cast<int32_t>(hash));
- return Insert(s, hash, /*is_strong=*/ true);
+ return Insert(s, hash, /*is_strong=*/ true, num_searched_strong_frozen_tables);
}
ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
@@ -323,12 +331,18 @@
LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
}
-ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s, uint32_t hash) {
+ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s,
+ uint32_t hash,
+ size_t num_searched_frozen_tables) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- for (InternalTable& table : tables_) {
- auto it = table.set_.FindWithHash(GcRoot<mirror::String>(s), hash);
- if (it != table.set_.end()) {
- return it->Read();
+ auto mid = tables_.begin() + num_searched_frozen_tables;
+ for (auto it = tables_.begin(); it != mid; ++it) {
+ DCHECK(it->set_.FindWithHash(GcRoot<mirror::String>(s), hash) == it->set_.end());
+ }
+ for (auto it = mid, end = tables_.end(); it != end; ++it) {
+ auto set_it = it->set_.FindWithHash(GcRoot<mirror::String>(s), hash);
+ if (set_it != it->set_.end()) {
+ return set_it->Read();
}
}
return nullptr;
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index c0c761a..d115f51 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -236,7 +236,9 @@
};
Table();
- ObjPtr<mirror::String> Find(ObjPtr<mirror::String> s, uint32_t hash)
+ ObjPtr<mirror::String> Find(ObjPtr<mirror::String> s,
+ uint32_t hash,
+ size_t num_searched_frozen_tables = 0u)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
ObjPtr<mirror::String> Find(const Utf8String& string, uint32_t hash)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
@@ -277,7 +279,10 @@
};
// Insert if non null, otherwise return null. Must be called holding the mutator lock.
- ObjPtr<mirror::String> Insert(ObjPtr<mirror::String> s, uint32_t hash, bool is_strong)
+ ObjPtr<mirror::String> Insert(ObjPtr<mirror::String> s,
+ uint32_t hash,
+ bool is_strong,
+ size_t num_searched_strong_frozen_tables = 0u)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Add a table from memory to the strong interns.