Change hash set to use noexcept
Otherwise it uses copy constructor instead of move constructor in
some cases (like std::vector).
Reduces shared dirty native by around 350k.
Bug: 27860101
(cherry picked from commit 5ef868c8332db11bb90284887a7f676f5dbef373)
Change-Id: I0311fa530fc2436630abebfdac2cad375eb4d691
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index fc1a52f..12d3be7 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -140,7 +140,7 @@
HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
- HashSet(double min_load_factor, double max_load_factor)
+ HashSet(double min_load_factor, double max_load_factor) noexcept
: num_elements_(0u),
num_buckets_(0u),
elements_until_expand_(0u),
@@ -152,7 +152,7 @@
DCHECK_LT(max_load_factor, 1.0);
}
- explicit HashSet(const allocator_type& alloc)
+ explicit HashSet(const allocator_type& alloc) noexcept
: allocfn_(alloc),
hashfn_(),
emptyfn_(),
@@ -166,7 +166,7 @@
max_load_factor_(kDefaultMaxLoadFactor) {
}
- HashSet(const HashSet& other)
+ HashSet(const HashSet& other) noexcept
: allocfn_(other.allocfn_),
hashfn_(other.hashfn_),
emptyfn_(other.emptyfn_),
@@ -184,7 +184,9 @@
}
}
- HashSet(HashSet&& other)
+ // noexcept required so that the move constructor is used instead of copy constructor.
+ // b/27860101
+ HashSet(HashSet&& other) noexcept
: allocfn_(std::move(other.allocfn_)),
hashfn_(std::move(other.hashfn_)),
emptyfn_(std::move(other.emptyfn_)),
@@ -206,7 +208,7 @@
// Construct from existing data.
// Read from a block of memory, if make_copy_of_data is false, then data_ points to within the
// passed in ptr_.
- HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) {
+ HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) noexcept {
uint64_t temp;
size_t offset = 0;
offset = ReadFromBytes(ptr, offset, &temp);
@@ -256,12 +258,12 @@
DeallocateStorage();
}
- HashSet& operator=(HashSet&& other) {
+ HashSet& operator=(HashSet&& other) noexcept {
HashSet(std::move(other)).swap(*this);
return *this;
}
- HashSet& operator=(const HashSet& other) {
+ HashSet& operator=(const HashSet& other) noexcept {
HashSet(other).swap(*this); // NOLINT(runtime/explicit) - a case of lint gone mad.
return *this;
}
@@ -298,6 +300,11 @@
return Size() == 0;
}
+ // Return true if the hash set has ownership of the underlying data.
+ bool OwnsData() const {
+ return owns_data_;
+ }
+
// Erase algorithm:
// Make an empty slot where the iterator is pointing.
// Scan forwards until we hit another empty slot.
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 79f24a8..eceb593 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -393,6 +393,10 @@
size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
size_t read_count = 0;
UnorderedSet set(ptr, /*make copy*/false, &read_count);
+ if (set.Empty()) {
+ // Avoid inserting empty sets.
+ return read_count;
+ }
// TODO: Disable this for app images if app images have intern tables.
static constexpr bool kCheckDuplicates = true;
if (kCheckDuplicates) {
@@ -400,7 +404,7 @@
CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
}
}
- // Insert at the front since we insert into the back.
+ // Insert at the front since we add new interns into the back.
tables_.insert(tables_.begin(), std::move(set));
return read_count;
}