summaryrefslogtreecommitdiff
path: root/libartbase/base
diff options
context:
space:
mode:
Diffstat (limited to 'libartbase/base')
-rw-r--r--libartbase/base/arena_allocator.h29
-rw-r--r--libartbase/base/arena_allocator_test.cc8
-rw-r--r--libartbase/base/arena_containers.h8
-rw-r--r--libartbase/base/atomic.h14
-rw-r--r--libartbase/base/bit_memory_region.h92
-rw-r--r--libartbase/base/bit_table.h348
-rw-r--r--libartbase/base/bit_table_test.cc131
-rw-r--r--libartbase/base/common_art_test.h20
-rw-r--r--libartbase/base/data_hash.h107
-rw-r--r--libartbase/base/file_utils_test.cc5
-rw-r--r--libartbase/base/fuchsia_compat.h36
-rw-r--r--libartbase/base/globals.h15
-rw-r--r--libartbase/base/hash_map.h9
-rw-r--r--libartbase/base/hash_set.h233
-rw-r--r--libartbase/base/hash_set_test.cc119
-rw-r--r--libartbase/base/indenter.h4
-rw-r--r--libartbase/base/iteration_range.h6
-rw-r--r--libartbase/base/malloc_arena_pool.cc6
-rw-r--r--libartbase/base/mem_map.cc75
-rw-r--r--libartbase/base/mem_map.h22
-rw-r--r--libartbase/base/mem_map_fuchsia.cc144
-rw-r--r--libartbase/base/mem_map_test.cc45
-rw-r--r--libartbase/base/mem_map_unix.cc35
-rw-r--r--libartbase/base/memory_tool.h56
-rw-r--r--libartbase/base/scoped_arena_containers.h10
-rw-r--r--libartbase/base/stats.h60
-rw-r--r--libartbase/base/utils.h14
-rw-r--r--libartbase/base/variant_map_test.cc2
28 files changed, 1146 insertions, 507 deletions
diff --git a/libartbase/base/arena_allocator.h b/libartbase/base/arena_allocator.h
index 4dccd033d6..a9ccae1b07 100644
--- a/libartbase/base/arena_allocator.h
+++ b/libartbase/base/arena_allocator.h
@@ -148,34 +148,9 @@ class ArenaAllocatorStatsImpl {
typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorStats;
-template <bool kAvailable, bool kValgrind>
-class ArenaAllocatorMemoryToolCheckImpl {
- // This is the generic template but since there is a partial specialization
- // for kValgrind == false, this can be instantiated only for kValgrind == true.
- static_assert(kValgrind, "This template can be instantiated only for Valgrind.");
- static_assert(kAvailable, "Valgrind implies memory tool availability.");
-
- public:
- ArenaAllocatorMemoryToolCheckImpl() : is_running_on_valgrind_(RUNNING_ON_MEMORY_TOOL) { }
- bool IsRunningOnMemoryTool() { return is_running_on_valgrind_; }
-
- private:
- const bool is_running_on_valgrind_;
-};
-
-template <bool kAvailable>
-class ArenaAllocatorMemoryToolCheckImpl<kAvailable, false> {
- public:
- ArenaAllocatorMemoryToolCheckImpl() { }
- bool IsRunningOnMemoryTool() { return kAvailable; }
-};
-
-typedef ArenaAllocatorMemoryToolCheckImpl<kMemoryToolIsAvailable, kMemoryToolIsValgrind>
- ArenaAllocatorMemoryToolCheck;
-
-class ArenaAllocatorMemoryTool : private ArenaAllocatorMemoryToolCheck {
+class ArenaAllocatorMemoryTool {
public:
- using ArenaAllocatorMemoryToolCheck::IsRunningOnMemoryTool;
+ bool IsRunningOnMemoryTool() { return kMemoryToolIsAvailable; }
void MakeDefined(void* ptr, size_t size) {
if (UNLIKELY(IsRunningOnMemoryTool())) {
diff --git a/libartbase/base/arena_allocator_test.cc b/libartbase/base/arena_allocator_test.cc
index e358710ca6..6323a2b97c 100644
--- a/libartbase/base/arena_allocator_test.cc
+++ b/libartbase/base/arena_allocator_test.cc
@@ -16,6 +16,7 @@
#include "arena_allocator-inl.h"
#include "arena_bit_vector.h"
+#include "base/common_art_test.h"
#include "gtest/gtest.h"
#include "malloc_arena_pool.h"
#include "memory_tool.h"
@@ -146,11 +147,8 @@ TEST_F(ArenaAllocatorTest, AllocAlignment) {
}
TEST_F(ArenaAllocatorTest, ReallocReuse) {
- // Realloc does not reuse arenas when running under sanitization. So we cannot do those
- if (RUNNING_ON_MEMORY_TOOL != 0) {
- printf("WARNING: TEST DISABLED FOR MEMORY_TOOL\n");
- return;
- }
+ // Realloc does not reuse arenas when running under sanitization.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
{
// Case 1: small aligned allocation, aligned extend inside arena.
diff --git a/libartbase/base/arena_containers.h b/libartbase/base/arena_containers.h
index bd57fb1cfc..41b3bb9f5d 100644
--- a/libartbase/base/arena_containers.h
+++ b/libartbase/base/arena_containers.h
@@ -70,15 +70,15 @@ using ArenaSafeMap =
template <typename T,
typename EmptyFn = DefaultEmptyFn<T>,
- typename HashFn = std::hash<T>,
- typename Pred = std::equal_to<T>>
+ typename HashFn = DefaultHashFn<T>,
+ typename Pred = DefaultPred<T>>
using ArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ArenaAllocatorAdapter<T>>;
template <typename Key,
typename Value,
typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
- typename HashFn = std::hash<Key>,
- typename Pred = std::equal_to<Key>>
+ typename HashFn = DefaultHashFn<Key>,
+ typename Pred = DefaultPred<Key>>
using ArenaHashMap = HashMap<Key,
Value,
EmptyFn,
diff --git a/libartbase/base/atomic.h b/libartbase/base/atomic.h
index b68f867bfa..9de84cdd20 100644
--- a/libartbase/base/atomic.h
+++ b/libartbase/base/atomic.h
@@ -28,6 +28,11 @@
namespace art {
+enum class CASMode {
+ kStrong,
+ kWeak,
+};
+
template<typename T>
class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
public:
@@ -100,6 +105,15 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
}
+ bool CompareAndSet(T expected_value,
+ T desired_value,
+ CASMode mode,
+ std::memory_order memory_order) {
+ return mode == CASMode::kStrong
+ ? this->compare_exchange_strong(expected_value, desired_value, memory_order)
+ : this->compare_exchange_weak(expected_value, desired_value, memory_order);
+ }
+
// Returns the address of the current atomic variable. This is only used by futex() which is
// declared to take a volatile address (see base/mutex-inl.h).
volatile T* Address() {
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index a3d3ee41d6..07c1611c60 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -57,6 +57,15 @@ class BitMemoryRegion FINAL : public ValueObject {
return result;
}
+ // Increase the size of the region and return the newly added range (starting at the old end).
+ ALWAYS_INLINE BitMemoryRegion Extend(size_t bit_length) {
+ BitMemoryRegion result = *this;
+ result.bit_start_ += result.bit_size_;
+ result.bit_size_ = bit_length;
+ bit_size_ += bit_length;
+ return result;
+ }
+
// Load a single bit in the region. The bit at offset 0 is the least
// significant bit in the first byte.
ATTRIBUTE_NO_SANITIZE_ADDRESS // We might touch extra bytes due to the alignment.
@@ -99,13 +108,6 @@ class BitMemoryRegion FINAL : public ValueObject {
return value & mask;
}
- // Load bits starting at given `bit_offset`, and advance the `bit_offset`.
- ALWAYS_INLINE uint32_t LoadBitsAndAdvance(size_t* bit_offset, size_t bit_length) const {
- uint32_t result = LoadBits(*bit_offset, bit_length);
- *bit_offset += bit_length;
- return result;
- }
-
// Store `bit_length` bits in `data` starting at given `bit_offset`.
// The least significant bit is stored in the smallest memory offset.
ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) {
@@ -132,12 +134,6 @@ class BitMemoryRegion FINAL : public ValueObject {
DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
}
- // Store bits starting at given `bit_offset`, and advance the `bit_offset`.
- ALWAYS_INLINE void StoreBitsAndAdvance(size_t* bit_offset, uint32_t value, size_t bit_length) {
- StoreBits(*bit_offset, value, bit_length);
- *bit_offset += bit_length;
- }
-
// Store bits from other bit region.
ALWAYS_INLINE void StoreBits(size_t bit_offset, const BitMemoryRegion& src, size_t bit_length) {
DCHECK_LE(bit_offset, bit_size_);
@@ -151,6 +147,20 @@ class BitMemoryRegion FINAL : public ValueObject {
StoreBits(bit_offset + bit, src.LoadBits(bit, num_bits), num_bits);
}
+ // Count the number of set bits within the given bit range.
+ ALWAYS_INLINE size_t PopCount(size_t bit_offset, size_t bit_length) const {
+ DCHECK_LE(bit_offset, bit_size_);
+ DCHECK_LE(bit_length, bit_size_ - bit_offset);
+ size_t count = 0;
+ size_t bit = 0;
+ constexpr size_t kNumBits = BitSizeOf<uint32_t>();
+ for (; bit + kNumBits <= bit_length; bit += kNumBits) {
+ count += POPCOUNT(LoadBits(bit_offset + bit, kNumBits));
+ }
+ count += POPCOUNT(LoadBits(bit_offset + bit, bit_length - bit));
+ return count;
+ }
+
ALWAYS_INLINE bool Equals(const BitMemoryRegion& other) const {
return data_ == other.data_ &&
bit_start_ == other.bit_start_ &&
@@ -164,6 +174,62 @@ class BitMemoryRegion FINAL : public ValueObject {
size_t bit_size_ = 0;
};
+class BitMemoryReader {
+ public:
+ explicit BitMemoryReader(const uint8_t* data, size_t bit_offset = 0) {
+ MemoryRegion region(const_cast<uint8_t*>(data), BitsToBytesRoundUp(bit_offset));
+ finished_region_ = BitMemoryRegion(region, 0, bit_offset);
+ DCHECK_EQ(GetBitOffset(), bit_offset);
+ }
+
+ size_t GetBitOffset() const { return finished_region_.size_in_bits(); }
+
+ ALWAYS_INLINE BitMemoryRegion Skip(size_t bit_length) {
+ return finished_region_.Extend(bit_length);
+ }
+
+ ALWAYS_INLINE uint32_t ReadBits(size_t bit_length) {
+ return finished_region_.Extend(bit_length).LoadBits(0, bit_length);
+ }
+
+ private:
+ // Represents all of the bits which were read so far. There is no upper bound.
+ // Therefore, by definition, the "cursor" is always at the end of the region.
+ BitMemoryRegion finished_region_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitMemoryReader);
+};
+
+template<typename Vector>
+class BitMemoryWriter {
+ public:
+ explicit BitMemoryWriter(Vector* out, size_t bit_offset = 0)
+ : out_(out), bit_offset_(bit_offset) {
+ DCHECK_EQ(GetBitOffset(), bit_offset);
+ }
+
+ const uint8_t* data() const { return out_->data(); }
+
+ size_t GetBitOffset() const { return bit_offset_; }
+
+ ALWAYS_INLINE BitMemoryRegion Allocate(size_t bit_length) {
+ out_->resize(BitsToBytesRoundUp(bit_offset_ + bit_length));
+ BitMemoryRegion region(MemoryRegion(out_->data(), out_->size()), bit_offset_, bit_length);
+ bit_offset_ += bit_length;
+ return region;
+ }
+
+ ALWAYS_INLINE void WriteBits(uint32_t value, size_t bit_length) {
+ Allocate(bit_length).StoreBits(0, value, bit_length);
+ }
+
+ private:
+ Vector* out_;
+ size_t bit_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitMemoryWriter);
+};
+
} // namespace art
#endif // ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index 0ae60b9070..ee477215e7 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -18,6 +18,7 @@
#define ART_LIBARTBASE_BASE_BIT_TABLE_H_
#include <array>
+#include <initializer_list>
#include <numeric>
#include <string.h>
#include <type_traits>
@@ -25,6 +26,7 @@
#include "base/bit_memory_region.h"
#include "base/casts.h"
+#include "base/iteration_range.h"
#include "base/memory_region.h"
#include "base/scoped_arena_containers.h"
#include "base/stl_util.h"
@@ -38,100 +40,56 @@ constexpr uint32_t kVarintSmallValue = 11; // Maximum value which is stored as-
// The first four bits determine the variable length of the encoded integer:
// Values 0..11 represent the result as-is, with no further following bits.
// Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
-ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryRegion region, size_t* bit_offset) {
- uint32_t x = region.LoadBitsAndAdvance(bit_offset, kVarintHeaderBits);
+ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryReader& reader) {
+ uint32_t x = reader.ReadBits(kVarintHeaderBits);
if (x > kVarintSmallValue) {
- x = region.LoadBitsAndAdvance(bit_offset, (x - kVarintSmallValue) * kBitsPerByte);
+ x = reader.ReadBits((x - kVarintSmallValue) * kBitsPerByte);
}
return x;
}
// Store variable-length bit-packed integer from `data` starting at `bit_offset`.
template<typename Vector>
-ALWAYS_INLINE static inline void EncodeVarintBits(Vector* out, size_t* bit_offset, uint32_t value) {
+ALWAYS_INLINE static inline void EncodeVarintBits(BitMemoryWriter<Vector>& out, uint32_t value) {
if (value <= kVarintSmallValue) {
- out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
- region.StoreBitsAndAdvance(bit_offset, value, kVarintHeaderBits);
+ out.WriteBits(value, kVarintHeaderBits);
} else {
uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
- out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits + num_bits));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
- region.StoreBitsAndAdvance(bit_offset, header, kVarintHeaderBits);
- region.StoreBitsAndAdvance(bit_offset, value, num_bits);
+ out.WriteBits(header, kVarintHeaderBits);
+ out.WriteBits(value, num_bits);
}
}
+// Generic purpose table of uint32_t values, which are tightly packed at bit level.
+// It has its own header with the number of rows and the bit-widths of all columns.
+// The values are accessible by (row, column). The value -1 is stored efficiently.
template<uint32_t kNumColumns>
-class BitTable {
+class BitTableBase {
public:
- class Accessor {
- public:
- static constexpr uint32_t kCount = kNumColumns;
- static constexpr uint32_t kNoValue = std::numeric_limits<uint32_t>::max();
-
- Accessor() {}
- Accessor(const BitTable* table, uint32_t row) : table_(table), row_(row) {}
-
- ALWAYS_INLINE uint32_t Row() const { return row_; }
-
- ALWAYS_INLINE bool IsValid() const { return table_ != nullptr && row_ < table_->NumRows(); }
-
- template<uint32_t Column>
- ALWAYS_INLINE uint32_t Get() const {
- static_assert(Column < kNumColumns, "Column out of bounds");
- return table_->Get(row_, Column);
- }
-
- ALWAYS_INLINE bool Equals(const Accessor& other) {
- return this->table_ == other.table_ && this->row_ == other.row_;
- }
-
-// Helper macro to create constructors and per-table utilities in derived class.
-#define BIT_TABLE_HEADER() \
- using BitTable<kCount>::Accessor::Accessor; /* inherit the constructors */ \
- template<int COLUMN, int UNUSED /*needed to compile*/> struct ColumnName; \
+ static constexpr uint32_t kNoValue = std::numeric_limits<uint32_t>::max(); // == -1.
+ static constexpr uint32_t kValueBias = kNoValue; // Bias so that -1 is encoded as 0.
-// Helper macro to create named column accessors in derived class.
-#define BIT_TABLE_COLUMN(COLUMN, NAME) \
- static constexpr uint32_t k##NAME = COLUMN; \
- ALWAYS_INLINE uint32_t Get##NAME() const { \
- return table_->Get(row_, COLUMN); \
- } \
- ALWAYS_INLINE bool Has##NAME() const { \
- return table_->Get(row_, COLUMN) != kNoValue; \
- } \
- template<int UNUSED> struct ColumnName<COLUMN, UNUSED> { \
- static constexpr const char* Value = #NAME; \
- }; \
-
- protected:
- const BitTable* table_ = nullptr;
- uint32_t row_ = -1;
- };
-
- static constexpr uint32_t kValueBias = -1;
-
- BitTable() {}
- BitTable(void* data, size_t size, size_t* bit_offset = 0) {
- Decode(BitMemoryRegion(MemoryRegion(data, size)), bit_offset);
+ BitTableBase() {}
+ explicit BitTableBase(BitMemoryReader& reader) {
+ Decode(reader);
}
- ALWAYS_INLINE void Decode(BitMemoryRegion region, size_t* bit_offset) {
+ ALWAYS_INLINE void Decode(BitMemoryReader& reader) {
// Decode row count and column sizes from the table header.
- num_rows_ = DecodeVarintBits(region, bit_offset);
+ size_t initial_bit_offset = reader.GetBitOffset();
+ num_rows_ = DecodeVarintBits(reader);
if (num_rows_ != 0) {
column_offset_[0] = 0;
for (uint32_t i = 0; i < kNumColumns; i++) {
- size_t column_end = column_offset_[i] + DecodeVarintBits(region, bit_offset);
+ size_t column_end = column_offset_[i] + DecodeVarintBits(reader);
column_offset_[i + 1] = dchecked_integral_cast<uint16_t>(column_end);
}
}
+ header_bit_size_ = reader.GetBitOffset() - initial_bit_offset;
// Record the region which contains the table data and skip past it.
- table_data_ = region.Subregion(*bit_offset, num_rows_ * NumRowBits());
- *bit_offset += table_data_.size_in_bits();
+ table_data_ = reader.Skip(num_rows_ * NumRowBits());
}
ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column = 0) const {
@@ -158,55 +116,220 @@ class BitTable {
return column_offset_[column + 1] - column_offset_[column];
}
- size_t DataBitSize() const { return num_rows_ * column_offset_[kNumColumns]; }
+ size_t HeaderBitSize() const { return header_bit_size_; }
+
+ size_t BitSize() const { return header_bit_size_ + table_data_.size_in_bits(); }
protected:
BitMemoryRegion table_data_;
size_t num_rows_ = 0;
uint16_t column_offset_[kNumColumns + 1] = {};
+ uint16_t header_bit_size_ = 0;
+};
+
+// Helper class which can be used to create BitTable accessors with named getters.
+template<uint32_t NumColumns>
+class BitTableAccessor {
+ public:
+ static constexpr uint32_t kNumColumns = NumColumns;
+ static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
+
+ BitTableAccessor(const BitTableBase<kNumColumns>* table, uint32_t row)
+ : table_(table), row_(row) {
+ DCHECK(table_ != nullptr);
+ }
+
+ ALWAYS_INLINE uint32_t Row() const { return row_; }
+
+ ALWAYS_INLINE bool IsValid() const { return row_ < table_->NumRows(); }
+
+ ALWAYS_INLINE bool Equals(const BitTableAccessor& other) {
+ return this->table_ == other.table_ && this->row_ == other.row_;
+ }
+
+// Helper macro to create constructors and per-table utilities in derived class.
+#define BIT_TABLE_HEADER() \
+ using BitTableAccessor<kNumColumns>::BitTableAccessor; /* inherit constructors */ \
+ template<int COLUMN, int UNUSED /*needed to compile*/> struct ColumnName; \
+
+// Helper macro to create named column accessors in derived class.
+#define BIT_TABLE_COLUMN(COLUMN, NAME) \
+ static constexpr uint32_t k##NAME = COLUMN; \
+ ALWAYS_INLINE uint32_t Get##NAME() const { return table_->Get(row_, COLUMN); } \
+ ALWAYS_INLINE bool Has##NAME() const { return Get##NAME() != kNoValue; } \
+ template<int UNUSED> struct ColumnName<COLUMN, UNUSED> { \
+ static constexpr const char* Value = #NAME; \
+ }; \
+
+ protected:
+ const BitTableBase<kNumColumns>* table_ = nullptr;
+ uint32_t row_ = -1;
};
// Template meta-programming helper.
template<typename Accessor, size_t... Columns>
-static const char** GetBitTableColumnNamesImpl(std::index_sequence<Columns...>) {
+static const char* const* GetBitTableColumnNamesImpl(std::index_sequence<Columns...>) {
static const char* names[] = { Accessor::template ColumnName<Columns, 0>::Value... };
return names;
}
+// Returns the names of all columns in the given accessor.
template<typename Accessor>
-static const char** GetBitTableColumnNames() {
- return GetBitTableColumnNamesImpl<Accessor>(std::make_index_sequence<Accessor::kCount>());
+static const char* const* GetBitTableColumnNames() {
+ return GetBitTableColumnNamesImpl<Accessor>(std::make_index_sequence<Accessor::kNumColumns>());
}
+// Wrapper which makes it easier to use named accessors for the individual rows.
+template<typename Accessor>
+class BitTable : public BitTableBase<Accessor::kNumColumns> {
+ public:
+ class const_iterator : public std::iterator<std::random_access_iterator_tag,
+ /* value_type */ Accessor,
+ /* difference_type */ int32_t,
+ /* pointer */ void,
+ /* reference */ void> {
+ public:
+ using difference_type = int32_t;
+ const_iterator() {}
+ const_iterator(const BitTable* table, uint32_t row) : table_(table), row_(row) {}
+ const_iterator operator+(difference_type n) { return const_iterator(table_, row_ + n); }
+ const_iterator operator-(difference_type n) { return const_iterator(table_, row_ - n); }
+ difference_type operator-(const const_iterator& other) { return row_ - other.row_; }
+ void operator+=(difference_type rows) { row_ += rows; }
+ void operator-=(difference_type rows) { row_ -= rows; }
+ const_iterator operator++() { return const_iterator(table_, ++row_); }
+ const_iterator operator--() { return const_iterator(table_, --row_); }
+ const_iterator operator++(int) { return const_iterator(table_, row_++); }
+ const_iterator operator--(int) { return const_iterator(table_, row_--); }
+ bool operator==(const_iterator i) const { DCHECK(table_ == i.table_); return row_ == i.row_; }
+ bool operator!=(const_iterator i) const { DCHECK(table_ == i.table_); return row_ != i.row_; }
+ bool operator<=(const_iterator i) const { DCHECK(table_ == i.table_); return row_ <= i.row_; }
+ bool operator>=(const_iterator i) const { DCHECK(table_ == i.table_); return row_ >= i.row_; }
+ bool operator<(const_iterator i) const { DCHECK(table_ == i.table_); return row_ < i.row_; }
+ bool operator>(const_iterator i) const { DCHECK(table_ == i.table_); return row_ > i.row_; }
+ Accessor operator*() {
+ DCHECK_LT(row_, table_->NumRows());
+ return Accessor(table_, row_);
+ }
+ Accessor operator->() {
+ DCHECK_LT(row_, table_->NumRows());
+ return Accessor(table_, row_);
+ }
+ Accessor operator[](size_t index) {
+ DCHECK_LT(row_ + index, table_->NumRows());
+ return Accessor(table_, row_ + index);
+ }
+ private:
+ const BitTable* table_ = nullptr;
+ uint32_t row_ = 0;
+ };
+
+ using BitTableBase<Accessor::kNumColumns>::BitTableBase; // Constructors.
+
+ ALWAYS_INLINE const_iterator begin() const { return const_iterator(this, 0); }
+ ALWAYS_INLINE const_iterator end() const { return const_iterator(this, this->NumRows()); }
+
+ ALWAYS_INLINE Accessor GetRow(uint32_t row) const {
+ return Accessor(this, row);
+ }
+
+ ALWAYS_INLINE Accessor GetInvalidRow() const {
+ return Accessor(this, static_cast<uint32_t>(-1));
+ }
+};
+
+template<typename Accessor>
+typename BitTable<Accessor>::const_iterator operator+(
+ typename BitTable<Accessor>::const_iterator::difference_type n,
+ typename BitTable<Accessor>::const_iterator a) {
+ return a + n;
+}
+
+template<typename Accessor>
+class BitTableRange : public IterationRange<typename BitTable<Accessor>::const_iterator> {
+ public:
+ typedef typename BitTable<Accessor>::const_iterator const_iterator;
+
+ using IterationRange<const_iterator>::IterationRange;
+ BitTableRange() : IterationRange<const_iterator>(const_iterator(), const_iterator()) { }
+
+ bool empty() const { return this->begin() == this->end(); }
+ size_t size() const { return this->end() - this->begin(); }
+
+ Accessor operator[](size_t index) const {
+ const_iterator it = this->begin() + index;
+ DCHECK(it < this->end());
+ return *it;
+ }
+
+ Accessor back() const {
+ DCHECK(!empty());
+ return *(this->end() - 1);
+ }
+
+ void pop_back() {
+ DCHECK(!empty());
+ --this->last_;
+ }
+};
+
// Helper class for encoding BitTable. It can optionally de-duplicate the inputs.
-// Type 'T' must be POD type consisting of uint32_t fields (one for each column).
-template<typename T>
-class BitTableBuilder {
+template<uint32_t kNumColumns>
+class BitTableBuilderBase {
public:
- static_assert(std::is_pod<T>::value, "Type 'T' must be POD");
- static constexpr size_t kNumColumns = sizeof(T) / sizeof(uint32_t);
+ static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
+ static constexpr uint32_t kValueBias = BitTableBase<kNumColumns>::kValueBias;
+
+ class Entry {
+ public:
+ Entry() {
+ // The definition of kNoValue here is for host and target debug builds which complain about
+ // missing a symbol definition for BitTableBase<N>::kNovValue when optimization is off.
+ static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
+ std::fill_n(data_, kNumColumns, kNoValue);
+ }
+
+ Entry(std::initializer_list<uint32_t> values) {
+ DCHECK_EQ(values.size(), kNumColumns);
+ std::copy(values.begin(), values.end(), data_);
+ }
+
+ uint32_t& operator[](size_t column) {
+ DCHECK_LT(column, kNumColumns);
+ return data_[column];
+ }
- explicit BitTableBuilder(ScopedArenaAllocator* allocator)
+ uint32_t operator[](size_t column) const {
+ DCHECK_LT(column, kNumColumns);
+ return data_[column];
+ }
+
+ private:
+ uint32_t data_[kNumColumns];
+ };
+
+ explicit BitTableBuilderBase(ScopedArenaAllocator* allocator)
: rows_(allocator->Adapter(kArenaAllocBitTableBuilder)),
dedup_(8, allocator->Adapter(kArenaAllocBitTableBuilder)) {
}
- T& operator[](size_t row) { return rows_[row]; }
- const T& operator[](size_t row) const { return rows_[row]; }
+ Entry& operator[](size_t row) { return rows_[row]; }
+ const Entry& operator[](size_t row) const { return rows_[row]; }
+ const Entry& back() const { return rows_.back(); }
size_t size() const { return rows_.size(); }
// Append given value to the vector without de-duplication.
// This will not add the element to the dedup map to avoid its associated costs.
- void Add(T value) {
+ void Add(Entry value) {
rows_.push_back(value);
}
// Append given list of values and return the index of the first value.
// If the exact same set of values was already added, return the old index.
- uint32_t Dedup(T* values, size_t count = 1) {
+ uint32_t Dedup(Entry* values, size_t count = 1) {
FNVHash<MemoryRegion> hasher;
- uint32_t hash = hasher(MemoryRegion(values, sizeof(T) * count));
+ uint32_t hash = hasher(MemoryRegion(values, sizeof(Entry) * count));
// Check if we have already added identical set of values.
auto range = dedup_.equal_range(hash);
@@ -216,8 +339,8 @@ class BitTableBuilder {
std::equal(values,
values + count,
rows_.begin() + index,
- [](const T& lhs, const T& rhs) {
- return memcmp(&lhs, &rhs, sizeof(T)) == 0;
+ [](const Entry& lhs, const Entry& rhs) {
+ return memcmp(&lhs, &rhs, sizeof(Entry)) == 0;
})) {
return index;
}
@@ -230,11 +353,8 @@ class BitTableBuilder {
return index;
}
- ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column) const {
- DCHECK_LT(row, size());
- DCHECK_LT(column, kNumColumns);
- const uint32_t* data = reinterpret_cast<const uint32_t*>(&rows_[row]);
- return data[column];
+ uint32_t Dedup(Entry value) {
+ return Dedup(&value, /* count */ 1);
}
// Calculate the column bit widths based on the current data.
@@ -243,7 +363,7 @@ class BitTableBuilder {
std::fill_n(max_column_value, kNumColumns, 0);
for (uint32_t r = 0; r < size(); r++) {
for (uint32_t c = 0; c < kNumColumns; c++) {
- max_column_value[c] |= Get(r, c) - BitTable<kNumColumns>::kValueBias;
+ max_column_value[c] |= rows_[r][c] - kValueBias;
}
}
for (uint32_t c = 0; c < kNumColumns; c++) {
@@ -253,52 +373,54 @@ class BitTableBuilder {
// Encode the stored data into a BitTable.
template<typename Vector>
- void Encode(Vector* out, size_t* bit_offset) const {
- constexpr uint32_t bias = BitTable<kNumColumns>::kValueBias;
- size_t initial_bit_offset = *bit_offset;
+ void Encode(BitMemoryWriter<Vector>& out) const {
+ size_t initial_bit_offset = out.GetBitOffset();
std::array<uint32_t, kNumColumns> column_bits;
Measure(&column_bits);
- EncodeVarintBits(out, bit_offset, size());
+ EncodeVarintBits(out, size());
if (size() != 0) {
// Write table header.
for (uint32_t c = 0; c < kNumColumns; c++) {
- EncodeVarintBits(out, bit_offset, column_bits[c]);
+ EncodeVarintBits(out, column_bits[c]);
}
// Write table data.
- uint32_t row_bits = std::accumulate(column_bits.begin(), column_bits.end(), 0u);
- out->resize(BitsToBytesRoundUp(*bit_offset + row_bits * size()));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
for (uint32_t r = 0; r < size(); r++) {
for (uint32_t c = 0; c < kNumColumns; c++) {
- region.StoreBitsAndAdvance(bit_offset, Get(r, c) - bias, column_bits[c]);
+ out.WriteBits(rows_[r][c] - kValueBias, column_bits[c]);
}
}
}
// Verify the written data.
if (kIsDebugBuild) {
- BitTable<kNumColumns> table;
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
- table.Decode(region, &initial_bit_offset);
+ BitTableBase<kNumColumns> table;
+ BitMemoryReader reader(out.data(), initial_bit_offset);
+ table.Decode(reader);
DCHECK_EQ(size(), table.NumRows());
for (uint32_t c = 0; c < kNumColumns; c++) {
DCHECK_EQ(column_bits[c], table.NumColumnBits(c));
}
for (uint32_t r = 0; r < size(); r++) {
for (uint32_t c = 0; c < kNumColumns; c++) {
- DCHECK_EQ(Get(r, c), table.Get(r, c)) << " (" << r << ", " << c << ")";
+ DCHECK_EQ(rows_[r][c], table.Get(r, c)) << " (" << r << ", " << c << ")";
}
}
}
}
protected:
- ScopedArenaDeque<T> rows_;
+ ScopedArenaDeque<Entry> rows_;
ScopedArenaUnorderedMultimap<uint32_t, uint32_t> dedup_; // Hash -> row index.
};
+template<typename Accessor>
+class BitTableBuilder : public BitTableBuilderBase<Accessor::kNumColumns> {
+ public:
+ using BitTableBuilderBase<Accessor::kNumColumns>::BitTableBuilderBase; // Constructors.
+};
+
// Helper class for encoding single-column BitTable of bitmaps (allows more than 32 bits).
class BitmapTableBuilder {
public:
@@ -342,28 +464,26 @@ class BitmapTableBuilder {
// Encode the stored data into a BitTable.
template<typename Vector>
- void Encode(Vector* out, size_t* bit_offset) const {
- size_t initial_bit_offset = *bit_offset;
+ void Encode(BitMemoryWriter<Vector>& out) const {
+ size_t initial_bit_offset = out.GetBitOffset();
- EncodeVarintBits(out, bit_offset, size());
+ EncodeVarintBits(out, size());
if (size() != 0) {
- EncodeVarintBits(out, bit_offset, max_num_bits_);
+ EncodeVarintBits(out, max_num_bits_);
// Write table data.
- out->resize(BitsToBytesRoundUp(*bit_offset + max_num_bits_ * size()));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
for (MemoryRegion row : rows_) {
BitMemoryRegion src(row);
- region.StoreBits(*bit_offset, src, std::min(max_num_bits_, src.size_in_bits()));
- *bit_offset += max_num_bits_;
+ BitMemoryRegion dst = out.Allocate(max_num_bits_);
+ dst.StoreBits(/* bit_offset */ 0, src, std::min(max_num_bits_, src.size_in_bits()));
}
}
// Verify the written data.
if (kIsDebugBuild) {
- BitTable<1> table;
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
- table.Decode(region, &initial_bit_offset);
+ BitTableBase<1> table;
+ BitMemoryReader reader(out.data(), initial_bit_offset);
+ table.Decode(reader);
DCHECK_EQ(size(), table.NumRows());
DCHECK_EQ(max_num_bits_, table.NumColumnBits(0));
for (uint32_t r = 0; r < size(); r++) {
diff --git a/libartbase/base/bit_table_test.cc b/libartbase/base/bit_table_test.cc
index 8abf0da9d9..2fd9052516 100644
--- a/libartbase/base/bit_table_test.cc
+++ b/libartbase/base/bit_table_test.cc
@@ -31,13 +31,12 @@ TEST(BitTableTest, TestVarint) {
uint32_t values[] = { 0, 1, 11, 12, 15, 16, 255, 256, ~1u, ~0u };
for (uint32_t value : values) {
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = start_bit_offset;
- EncodeVarintBits(&buffer, &encode_bit_offset, value);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
+ EncodeVarintBits(writer, value);
- size_t decode_bit_offset = start_bit_offset;
- BitMemoryRegion region(MemoryRegion(buffer.data(), buffer.size()));
- uint32_t result = DecodeVarintBits(region, &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryReader reader(buffer.data(), start_bit_offset);
+ uint32_t result = DecodeVarintBits(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(value, result);
}
}
@@ -49,13 +48,13 @@ TEST(BitTableTest, TestEmptyTable) {
ScopedArenaAllocator allocator(&arena_stack);
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
- BitTableBuilder<uint32_t> builder(&allocator);
- builder.Encode(&buffer, &encode_bit_offset);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
+ BitTableBuilderBase<1> builder(&allocator);
+ builder.Encode(writer);
- size_t decode_bit_offset = 0;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(0u, table.NumRows());
}
@@ -66,17 +65,17 @@ TEST(BitTableTest, TestSingleColumnTable) {
constexpr uint32_t kNoValue = -1;
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
- BitTableBuilder<uint32_t> builder(&allocator);
- builder.Add(42u);
- builder.Add(kNoValue);
- builder.Add(1000u);
- builder.Add(kNoValue);
- builder.Encode(&buffer, &encode_bit_offset);
-
- size_t decode_bit_offset = 0;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
+ BitTableBuilderBase<1> builder(&allocator);
+ builder.Add({42u});
+ builder.Add({kNoValue});
+ builder.Add({1000u});
+ builder.Add({kNoValue});
+ builder.Encode(writer);
+
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(4u, table.NumRows());
EXPECT_EQ(42u, table.Get(0));
EXPECT_EQ(kNoValue, table.Get(1));
@@ -92,14 +91,14 @@ TEST(BitTableTest, TestUnalignedTable) {
for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = start_bit_offset;
- BitTableBuilder<uint32_t> builder(&allocator);
- builder.Add(42u);
- builder.Encode(&buffer, &encode_bit_offset);
-
- size_t decode_bit_offset = start_bit_offset;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset) << " start_bit_offset=" << start_bit_offset;
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
+ BitTableBuilderBase<1> builder(&allocator);
+ builder.Add({42u});
+ builder.Encode(writer);
+
+ BitMemoryReader reader(buffer.data(), start_bit_offset);
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(1u, table.NumRows());
EXPECT_EQ(42u, table.Get(0));
}
@@ -112,21 +111,15 @@ TEST(BitTableTest, TestBigTable) {
constexpr uint32_t kNoValue = -1;
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
- struct RowData {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- };
- BitTableBuilder<RowData> builder(&allocator);
- builder.Add(RowData{42u, kNoValue, 0u, static_cast<uint32_t>(-2)});
- builder.Add(RowData{62u, kNoValue, 63u, static_cast<uint32_t>(-3)});
- builder.Encode(&buffer, &encode_bit_offset);
-
- size_t decode_bit_offset = 0;
- BitTable<4> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
+ BitTableBuilderBase<4> builder(&allocator);
+ builder.Add({42u, kNoValue, 0u, static_cast<uint32_t>(-2)});
+ builder.Add({62u, kNoValue, 63u, static_cast<uint32_t>(-3)});
+ builder.Encode(writer);
+
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<4> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(2u, table.NumRows());
EXPECT_EQ(42u, table.Get(0, 0));
EXPECT_EQ(kNoValue, table.Get(0, 1));
@@ -147,13 +140,9 @@ TEST(BitTableTest, TestDedup) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
- struct RowData {
- uint32_t a;
- uint32_t b;
- };
- BitTableBuilder<RowData> builder(&allocator);
- RowData value0{1, 2};
- RowData value1{3, 4};
+ BitTableBuilderBase<2> builder(&allocator);
+ BitTableBuilderBase<2>::Entry value0{1, 2};
+ BitTableBuilderBase<2>::Entry value1{3, 4};
EXPECT_EQ(0u, builder.Dedup(&value0));
EXPECT_EQ(1u, builder.Dedup(&value1));
EXPECT_EQ(0u, builder.Dedup(&value0));
@@ -167,7 +156,7 @@ TEST(BitTableTest, TestBitmapTable) {
ScopedArenaAllocator allocator(&arena_stack);
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
const uint64_t value = 0xDEADBEEF0BADF00Dull;
BitmapTableBuilder builder(&allocator);
std::multimap<uint64_t, size_t> indicies; // bitmap -> row.
@@ -175,12 +164,12 @@ TEST(BitTableTest, TestBitmapTable) {
uint64_t bitmap = value & MaxInt<uint64_t>(bit_length);
indicies.emplace(bitmap, builder.Dedup(&bitmap, MinimumBitsToStore(bitmap)));
}
- builder.Encode(&buffer, &encode_bit_offset);
+ builder.Encode(writer);
EXPECT_EQ(1 + static_cast<uint32_t>(POPCOUNT(value)), builder.size());
- size_t decode_bit_offset = 0;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
for (auto it : indicies) {
uint64_t expected = it.first;
BitMemoryRegion actual = table.GetBitMemoryRegion(it.second);
@@ -197,16 +186,12 @@ TEST(BitTableTest, TestCollisions) {
ScopedArenaAllocator allocator(&arena_stack);
FNVHash<MemoryRegion> hasher;
- struct RowData {
- uint32_t a;
- uint32_t b;
- };
- RowData value0{56948505, 0};
- RowData value1{67108869, 0};
+ BitTableBuilderBase<2>::Entry value0{56948505, 0};
+ BitTableBuilderBase<2>::Entry value1{67108869, 0};
- BitTableBuilder<RowData> builder(&allocator);
- EXPECT_EQ(hasher(MemoryRegion(&value0, sizeof(RowData))),
- hasher(MemoryRegion(&value1, sizeof(RowData))));
+ BitTableBuilderBase<2> builder(&allocator);
+ EXPECT_EQ(hasher(MemoryRegion(&value0, sizeof(value0))),
+ hasher(MemoryRegion(&value1, sizeof(value1))));
EXPECT_EQ(0u, builder.Dedup(&value0));
EXPECT_EQ(1u, builder.Dedup(&value1));
EXPECT_EQ(0u, builder.Dedup(&value0));
@@ -214,12 +199,12 @@ TEST(BitTableTest, TestCollisions) {
EXPECT_EQ(2u, builder.size());
BitmapTableBuilder builder2(&allocator);
- EXPECT_EQ(hasher(MemoryRegion(&value0, BitsToBytesRoundUp(MinimumBitsToStore(value0.a)))),
- hasher(MemoryRegion(&value1, BitsToBytesRoundUp(MinimumBitsToStore(value1.a)))));
- EXPECT_EQ(0u, builder2.Dedup(&value0.a, MinimumBitsToStore(value0.a)));
- EXPECT_EQ(1u, builder2.Dedup(&value1.a, MinimumBitsToStore(value1.a)));
- EXPECT_EQ(0u, builder2.Dedup(&value0.a, MinimumBitsToStore(value0.a)));
- EXPECT_EQ(1u, builder2.Dedup(&value1.a, MinimumBitsToStore(value1.a)));
+ EXPECT_EQ(hasher(MemoryRegion(&value0, BitsToBytesRoundUp(MinimumBitsToStore(value0[0])))),
+ hasher(MemoryRegion(&value1, BitsToBytesRoundUp(MinimumBitsToStore(value1[0])))));
+ EXPECT_EQ(0u, builder2.Dedup(&value0[0], MinimumBitsToStore(value0[0])));
+ EXPECT_EQ(1u, builder2.Dedup(&value1[0], MinimumBitsToStore(value1[0])));
+ EXPECT_EQ(0u, builder2.Dedup(&value0[0], MinimumBitsToStore(value0[0])));
+ EXPECT_EQ(1u, builder2.Dedup(&value1[0], MinimumBitsToStore(value1[0])));
EXPECT_EQ(2u, builder2.size());
}
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 3998be516d..0ace09de1a 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -210,23 +210,11 @@ using CommonArtTestWithParam = CommonArtTestBase<testing::TestWithParam<Param>>;
}
#define TEST_DISABLED_FOR_MEMORY_TOOL() \
- if (RUNNING_ON_MEMORY_TOOL > 0) { \
+ if (kRunningOnMemoryTool) { \
printf("WARNING: TEST DISABLED FOR MEMORY TOOL\n"); \
return; \
}
-#define TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND() \
- if (RUNNING_ON_MEMORY_TOOL > 0 && kMemoryToolIsValgrind) { \
- printf("WARNING: TEST DISABLED FOR MEMORY TOOL VALGRIND\n"); \
- return; \
- }
-
-#define TEST_DISABLED_FOR_MEMORY_TOOL_ASAN() \
- if (RUNNING_ON_MEMORY_TOOL > 0 && !kMemoryToolIsValgrind) { \
- printf("WARNING: TEST DISABLED FOR MEMORY TOOL ASAN\n"); \
- return; \
- }
-
#define TEST_DISABLED_FOR_HEAP_POISONING() \
if (kPoisonHeapReferences) { \
printf("WARNING: TEST DISABLED FOR HEAP POISONING\n"); \
@@ -234,4 +222,10 @@ using CommonArtTestWithParam = CommonArtTestBase<testing::TestWithParam<Param>>;
}
} // namespace art
+#define TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING() \
+ if (kRunningOnMemoryTool && kPoisonHeapReferences) { \
+ printf("WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING\n"); \
+ return; \
+ }
+
#endif // ART_LIBARTBASE_BASE_COMMON_ART_TEST_H_
diff --git a/libartbase/base/data_hash.h b/libartbase/base/data_hash.h
new file mode 100644
index 0000000000..5ad7779b80
--- /dev/null
+++ b/libartbase/base/data_hash.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_DATA_HASH_H_
+#define ART_LIBARTBASE_BASE_DATA_HASH_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+// Hash bytes using a relatively fast hash.
+static inline size_t HashBytes(const uint8_t* data, size_t len) {
+ size_t hash = 0x811c9dc5;
+ for (uint32_t i = 0; i < len; ++i) {
+ hash = (hash * 16777619) ^ data[i];
+ }
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
+ return hash;
+}
+
+class DataHash {
+ private:
+ static constexpr bool kUseMurmur3Hash = true;
+
+ public:
+ template <class Container>
+ size_t operator()(const Container& array) const {
+ // Containers that provide the data() function use contiguous storage.
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
+ uint32_t len = sizeof(typename Container::value_type) * array.size();
+ if (kUseMurmur3Hash) {
+ static constexpr uint32_t c1 = 0xcc9e2d51;
+ static constexpr uint32_t c2 = 0x1b873593;
+ static constexpr uint32_t r1 = 15;
+ static constexpr uint32_t r2 = 13;
+ static constexpr uint32_t m = 5;
+ static constexpr uint32_t n = 0xe6546b64;
+
+ uint32_t hash = 0;
+
+ const int nblocks = len / 4;
+ typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
+ int i;
+ for (i = 0; i < nblocks; i++) {
+ uint32_t k = blocks[i];
+ k *= c1;
+ k = (k << r1) | (k >> (32 - r1));
+ k *= c2;
+
+ hash ^= k;
+ hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
+ }
+
+ const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
+ uint32_t k1 = 0;
+
+ switch (len & 3) {
+ case 3:
+ k1 ^= tail[2] << 16;
+ FALLTHROUGH_INTENDED;
+ case 2:
+ k1 ^= tail[1] << 8;
+ FALLTHROUGH_INTENDED;
+ case 1:
+ k1 ^= tail[0];
+
+ k1 *= c1;
+ k1 = (k1 << r1) | (k1 >> (32 - r1));
+ k1 *= c2;
+ hash ^= k1;
+ }
+
+ hash ^= len;
+ hash ^= (hash >> 16);
+ hash *= 0x85ebca6b;
+ hash ^= (hash >> 13);
+ hash *= 0xc2b2ae35;
+ hash ^= (hash >> 16);
+
+ return hash;
+ } else {
+ return HashBytes(data, len);
+ }
+ }
+};
+
+} // namespace art
+
+#endif // ART_LIBARTBASE_BASE_DATA_HASH_H_
diff --git a/libartbase/base/file_utils_test.cc b/libartbase/base/file_utils_test.cc
index 56d1c44fc0..2a7273b85e 100644
--- a/libartbase/base/file_utils_test.cc
+++ b/libartbase/base/file_utils_test.cc
@@ -69,12 +69,11 @@ TEST_F(FileUtilsTest, GetAndroidRootSafe) {
EXPECT_EQ(android_root, android_root_env);
// Set ANDROID_ROOT to something else (but the directory must exist). So use dirname.
- char* root_dup = strdup(android_root_env.c_str());
- char* dir = dirname(root_dup);
+ UniqueCPtr<char> root_dup(strdup(android_root_env.c_str()));
+ char* dir = dirname(root_dup.get());
ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, 1 /* overwrite */));
std::string android_root2 = GetAndroidRootSafe(&error_msg);
EXPECT_STREQ(dir, android_root2.c_str());
- free(root_dup);
// Set a bogus value for ANDROID_ROOT. This should be an error.
ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", 1 /* overwrite */));
diff --git a/libartbase/base/fuchsia_compat.h b/libartbase/base/fuchsia_compat.h
new file mode 100644
index 0000000000..018bac0528
--- /dev/null
+++ b/libartbase/base/fuchsia_compat.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
+#define ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
+
+// stubs for features lacking in Fuchsia
+
+struct rlimit {
+ int rlim_cur;
+};
+
+#define RLIMIT_FSIZE (1)
+#define RLIM_INFINITY (-1)
+static int getrlimit(int resource, struct rlimit *rlim) {
+ LOG(FATAL) << "getrlimit not available for Fuchsia";
+}
+
+static int ashmem_create_region(const char *name, size_t size) {
+ LOG(FATAL) << "ashmem_create_region not available for Fuchsia";
+}
+
+#endif // ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 39e0c509cd..cd0bf8fafc 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -74,7 +74,9 @@ static constexpr bool kIsPGOInstrumentation = false;
// ART_TARGET - Defined for target builds of ART.
// ART_TARGET_LINUX - Defined for target Linux builds of ART.
// ART_TARGET_ANDROID - Defined for target Android builds of ART.
-// Note: Either ART_TARGET_LINUX or ART_TARGET_ANDROID need to be set when ART_TARGET is set.
+// ART_TARGET_FUCHSIA - Defined for Fuchsia builds of ART.
+// Note: Either ART_TARGET_LINUX, ART_TARGET_ANDROID or ART_TARGET_FUCHSIA
+// need to be set when ART_TARGET is set.
// Note: When ART_TARGET_LINUX is defined mem_map.h will not be using Ashmem for memory mappings
// (usually only available on Android kernels).
#if defined(ART_TARGET)
@@ -82,10 +84,16 @@ static constexpr bool kIsPGOInstrumentation = false;
static constexpr bool kIsTargetBuild = true;
# if defined(ART_TARGET_LINUX)
static constexpr bool kIsTargetLinux = true;
+static constexpr bool kIsTargetFuchsia = false;
# elif defined(ART_TARGET_ANDROID)
static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = false;
+# elif defined(ART_TARGET_FUCHSIA)
+static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = true;
# else
-# error "Either ART_TARGET_LINUX or ART_TARGET_ANDROID needs to be defined for target builds."
+# error "Either ART_TARGET_LINUX, ART_TARGET_ANDROID or ART_TARGET_FUCHSIA " \
+ "needs to be defined for target builds."
# endif
#else
static constexpr bool kIsTargetBuild = false;
@@ -93,8 +101,11 @@ static constexpr bool kIsTargetBuild = false;
# error "ART_TARGET_LINUX defined for host build."
# elif defined(ART_TARGET_ANDROID)
# error "ART_TARGET_ANDROID defined for host build."
+# elif defined(ART_TARGET_FUCHSIA)
+# error "ART_TARGET_FUCHSIA defined for host build."
# else
static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = false;
# endif
#endif
diff --git a/libartbase/base/hash_map.h b/libartbase/base/hash_map.h
index 0d7198c0f7..a3bb5b5550 100644
--- a/libartbase/base/hash_map.h
+++ b/libartbase/base/hash_map.h
@@ -48,9 +48,12 @@ class HashMapWrapper {
Fn fn_;
};
-template <class Key, class Value, class EmptyFn,
- class HashFn = std::hash<Key>, class Pred = std::equal_to<Key>,
- class Alloc = std::allocator<std::pair<Key, Value>>>
+template <class Key,
+ class Value,
+ class EmptyFn,
+ class HashFn = DefaultHashFn<Key>,
+ class Pred = DefaultPred<Key>,
+ class Alloc = std::allocator<std::pair<Key, Value>>>
class HashMap : public HashSet<std::pair<Key, Value>,
EmptyFn,
HashMapWrapper<HashFn>,
diff --git a/libartbase/base/hash_set.h b/libartbase/base/hash_set.h
index 2f810eaade..2b1a5eb947 100644
--- a/libartbase/base/hash_set.h
+++ b/libartbase/base/hash_set.h
@@ -22,16 +22,94 @@
#include <functional>
#include <iterator>
#include <memory>
+#include <string>
#include <type_traits>
#include <utility>
#include <android-base/logging.h>
+#include "base/data_hash.h"
#include "bit_utils.h"
#include "macros.h"
namespace art {
+template <class Elem, class HashSetType>
+class HashSetIterator : std::iterator<std::forward_iterator_tag, Elem> {
+ public:
+ HashSetIterator(const HashSetIterator&) = default;
+ HashSetIterator(HashSetIterator&&) = default;
+ HashSetIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {}
+
+ // Conversion from iterator to const_iterator.
+ template <class OtherElem,
+ class OtherHashSetType,
+ typename = typename std::enable_if<
+ std::is_same<Elem, const OtherElem>::value &&
+ std::is_same<HashSetType, const OtherHashSetType>::value>::type>
+ HashSetIterator(const HashSetIterator<OtherElem, OtherHashSetType>& other)
+ : index_(other.index_), hash_set_(other.hash_set_) {}
+
+ HashSetIterator& operator=(const HashSetIterator&) = default;
+ HashSetIterator& operator=(HashSetIterator&&) = default;
+
+ bool operator==(const HashSetIterator& other) const {
+ return hash_set_ == other.hash_set_ && this->index_ == other.index_;
+ }
+
+ bool operator!=(const HashSetIterator& other) const {
+ return !(*this == other);
+ }
+
+ HashSetIterator operator++() { // Value after modification.
+ this->index_ = hash_set_->NextNonEmptySlot(index_);
+ return *this;
+ }
+
+ HashSetIterator operator++(int) {
+ HashSetIterator temp = *this;
+ ++*this;
+ return temp;
+ }
+
+ Elem& operator*() const {
+ DCHECK(!hash_set_->IsFreeSlot(this->index_));
+ return hash_set_->ElementForIndex(this->index_);
+ }
+
+ Elem* operator->() const {
+ return &**this;
+ }
+
+ private:
+ size_t index_;
+ HashSetType* hash_set_;
+
+ template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+ friend bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs);
+ template <class T, class EmptyFn, class HashFn, class Pred, class Alloc> friend class HashSet;
+ template <class OtherElem, class OtherHashSetType> friend class HashSetIterator;
+};
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs) {
+ static_assert(
+ std::is_convertible<HashSetIterator<Elem1, HashSetType1>,
+ HashSetIterator<Elem2, HashSetType2>>::value ||
+ std::is_convertible<HashSetIterator<Elem2, HashSetType2>,
+ HashSetIterator<Elem1, HashSetType1>>::value, "Bad iterator types.");
+ DCHECK_EQ(lhs.hash_set_, rhs.hash_set_);
+ return lhs.index_ == rhs.index_;
+}
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator!=(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs) {
+ return !(lhs == rhs);
+}
+
// Returns true if an item is empty.
template <class T>
class DefaultEmptyFn {
@@ -55,70 +133,35 @@ class DefaultEmptyFn<T*> {
}
};
-// Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
-// boxed. Uses linear probing to resolve collisions.
+template <class T>
+using DefaultHashFn = typename std::conditional<std::is_same<T, std::string>::value,
+ DataHash,
+ std::hash<T>>::type;
+
+struct DefaultStringEquals {
+ // Allow comparison with anything that can be compared to std::string, for example StringPiece.
+ template <typename T>
+ bool operator()(const std::string& lhs, const T& rhs) const {
+ return lhs == rhs;
+ }
+};
+
+template <class T>
+using DefaultPred = typename std::conditional<std::is_same<T, std::string>::value,
+ DefaultStringEquals,
+ std::equal_to<T>>::type;
+
+// Low memory version of a hash set, uses less memory than std::unordered_multiset since elements
+// aren't boxed. Uses linear probing to resolve collisions.
// EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item).
// TODO: We could get rid of this requirement by using a bitmap, though maybe this would be slower
// and more complicated.
-template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
- class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
+template <class T,
+ class EmptyFn = DefaultEmptyFn<T>,
+ class HashFn = DefaultHashFn<T>,
+ class Pred = DefaultPred<T>,
+ class Alloc = std::allocator<T>>
class HashSet {
- template <class Elem, class HashSetType>
- class BaseIterator : std::iterator<std::forward_iterator_tag, Elem> {
- public:
- BaseIterator(const BaseIterator&) = default;
- BaseIterator(BaseIterator&&) = default;
- BaseIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {
- }
- BaseIterator& operator=(const BaseIterator&) = default;
- BaseIterator& operator=(BaseIterator&&) = default;
-
- bool operator==(const BaseIterator& other) const {
- return hash_set_ == other.hash_set_ && this->index_ == other.index_;
- }
-
- bool operator!=(const BaseIterator& other) const {
- return !(*this == other);
- }
-
- BaseIterator operator++() { // Value after modification.
- this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
- return *this;
- }
-
- BaseIterator operator++(int) {
- BaseIterator temp = *this;
- this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
- return temp;
- }
-
- Elem& operator*() const {
- DCHECK(!hash_set_->IsFreeSlot(this->index_));
- return hash_set_->ElementForIndex(this->index_);
- }
-
- Elem* operator->() const {
- return &**this;
- }
-
- // TODO: Operator -- --(int) (and use std::bidirectional_iterator_tag)
-
- private:
- size_t index_;
- HashSetType* hash_set_;
-
- size_t NextNonEmptySlot(size_t index, const HashSet* hash_set) const {
- const size_t num_buckets = hash_set->NumBuckets();
- DCHECK_LT(index, num_buckets);
- do {
- ++index;
- } while (index < num_buckets && hash_set->IsFreeSlot(index));
- return index;
- }
-
- friend class HashSet;
- };
-
public:
using value_type = T;
using allocator_type = Alloc;
@@ -126,8 +169,8 @@ class HashSet {
using const_reference = const T&;
using pointer = T*;
using const_pointer = const T*;
- using iterator = BaseIterator<T, HashSet>;
- using const_iterator = BaseIterator<const T, const HashSet>;
+ using iterator = HashSetIterator<T, HashSet>;
+ using const_iterator = HashSetIterator<const T, const HashSet>;
using size_type = size_t;
using difference_type = ptrdiff_t;
@@ -136,7 +179,7 @@ class HashSet {
static constexpr size_t kMinBuckets = 1000;
// If we don't own the data, this will create a new array which owns the data.
- void Clear() {
+ void clear() {
DeallocateStorage();
num_elements_ = 0;
elements_until_expand_ = 0;
@@ -300,13 +343,12 @@ class HashSet {
return const_iterator(this, NumBuckets());
}
- bool Empty() const {
- return Size() == 0;
+ size_t size() const {
+ return num_elements_;
}
- // Return true if the hash set has ownership of the underlying data.
- bool OwnsData() const {
- return owns_data_;
+ bool empty() const {
+ return size() == 0;
}
// Erase algorithm:
@@ -317,7 +359,7 @@ class HashSet {
// and set the empty slot to be the location we just moved from.
// Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
// element to its actual location/index.
- iterator Erase(iterator it) {
+ iterator erase(iterator it) {
// empty_index is the index that will become empty.
size_t empty_index = it.index_;
DCHECK(!IsFreeSlot(empty_index));
@@ -368,12 +410,12 @@ class HashSet {
// Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
// object in the heap for performance solution.
template <typename K>
- iterator Find(const K& key) {
+ iterator find(const K& key) {
return FindWithHash(key, hashfn_(key));
}
template <typename K>
- const_iterator Find(const K& key) const {
+ const_iterator find(const K& key) const {
return FindWithHash(key, hashfn_(key));
}
@@ -387,14 +429,26 @@ class HashSet {
return const_iterator(this, FindIndex(key, hash));
}
+ // Insert an element with hint, allows duplicates.
+ // Note: The hint is not very useful for a HashSet<> unless there are many hash conflicts
+ // and in that case the use of HashSet<> itself should be reconsidered.
+ iterator insert(const_iterator hint ATTRIBUTE_UNUSED, const T& element) {
+ return insert(element);
+ }
+ iterator insert(const_iterator hint ATTRIBUTE_UNUSED, T&& element) {
+ return insert(std::move(element));
+ }
+
// Insert an element, allows duplicates.
- template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
- void Insert(U&& element) {
- InsertWithHash(std::forward<U>(element), hashfn_(element));
+ iterator insert(const T& element) {
+ return InsertWithHash(element, hashfn_(element));
+ }
+ iterator insert(T&& element) {
+ return InsertWithHash(std::move(element), hashfn_(element));
}
template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
- void InsertWithHash(U&& element, size_t hash) {
+ iterator InsertWithHash(U&& element, size_t hash) {
DCHECK_EQ(hash, hashfn_(element));
if (num_elements_ >= elements_until_expand_) {
Expand();
@@ -403,10 +457,7 @@ class HashSet {
const size_t index = FirstAvailableSlot(IndexForHash(hash));
data_[index] = std::forward<U>(element);
++num_elements_;
- }
-
- size_t Size() const {
- return num_elements_;
+ return iterator(this, index);
}
void swap(HashSet& other) {
@@ -430,12 +481,12 @@ class HashSet {
}
void ShrinkToMaximumLoad() {
- Resize(Size() / max_load_factor_);
+ Resize(size() / max_load_factor_);
}
// Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
// set. No-op if the hash set is already large enough to do this.
- void Reserve(size_t num_elements) {
+ void reserve(size_t num_elements) {
size_t num_buckets = num_elements / max_load_factor_;
// Deal with rounding errors. Add one for rounding.
while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
@@ -466,7 +517,7 @@ class HashSet {
// Calculate the current load factor and return it.
double CalculateLoadFactor() const {
- return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
+ return static_cast<double>(size()) / static_cast<double>(NumBuckets());
}
// Make sure that everything reinserts in the right spot. Returns the number of errors.
@@ -510,7 +561,7 @@ class HashSet {
// maximum load factor.
const double load_factor = CalculateLoadFactor();
if (load_factor > max_load_factor_) {
- Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+ Resize(size() / ((min_load_factor_ + max_load_factor_) * 0.5));
}
}
@@ -605,7 +656,7 @@ class HashSet {
// Expand the set based on the load factors.
void Expand() {
- size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
+ size_t min_index = static_cast<size_t>(size() / min_load_factor_);
// Resize based on the minimum load factor.
Resize(min_index);
}
@@ -615,7 +666,7 @@ class HashSet {
if (new_size < kMinBuckets) {
new_size = kMinBuckets;
}
- DCHECK_GE(new_size, Size());
+ DCHECK_GE(new_size, size());
T* const old_data = data_;
size_t old_num_buckets = num_buckets_;
// Reinsert all of the old elements.
@@ -649,6 +700,15 @@ class HashSet {
return index;
}
+ size_t NextNonEmptySlot(size_t index) const {
+ const size_t num_buckets = NumBuckets();
+ DCHECK_LT(index, num_buckets);
+ do {
+ ++index;
+ } while (index < num_buckets && IsFreeSlot(index));
+ return index;
+ }
+
// Return new offset.
template <typename Elem>
static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
@@ -679,6 +739,9 @@ class HashSet {
double min_load_factor_;
double max_load_factor_;
+ template <class Elem, class HashSetType>
+ friend class HashSetIterator;
+
ART_FRIEND_TEST(InternTableTest, CrossHash);
};
diff --git a/libartbase/base/hash_set_test.cc b/libartbase/base/hash_set_test.cc
index ff745b4be5..782a68b5d5 100644
--- a/libartbase/base/hash_set_test.cc
+++ b/libartbase/base/hash_set_test.cc
@@ -24,6 +24,8 @@
#include <vector>
#include <gtest/gtest.h>
+
+#include "base/stringpiece.h"
#include "hash_map.h"
namespace art {
@@ -66,16 +68,16 @@ class HashSetTest : public testing::Test {
TEST_F(HashSetTest, TestSmoke) {
HashSet<std::string, IsEmptyFnString> hash_set;
const std::string test_string = "hello world 1234";
- ASSERT_TRUE(hash_set.Empty());
- ASSERT_EQ(hash_set.Size(), 0U);
- hash_set.Insert(test_string);
- auto it = hash_set.Find(test_string);
+ ASSERT_TRUE(hash_set.empty());
+ ASSERT_EQ(hash_set.size(), 0U);
+ hash_set.insert(test_string);
+ auto it = hash_set.find(test_string);
ASSERT_EQ(*it, test_string);
- auto after_it = hash_set.Erase(it);
+ auto after_it = hash_set.erase(it);
ASSERT_TRUE(after_it == hash_set.end());
- ASSERT_TRUE(hash_set.Empty());
- ASSERT_EQ(hash_set.Size(), 0U);
- it = hash_set.Find(test_string);
+ ASSERT_TRUE(hash_set.empty());
+ ASSERT_EQ(hash_set.size(), 0U);
+ it = hash_set.find(test_string);
ASSERT_TRUE(it == hash_set.end());
}
@@ -86,26 +88,26 @@ TEST_F(HashSetTest, TestInsertAndErase) {
for (size_t i = 0; i < count; ++i) {
// Insert a bunch of elements and make sure we can find them.
strings.push_back(RandomString(10));
- hash_set.Insert(strings[i]);
- auto it = hash_set.Find(strings[i]);
+ hash_set.insert(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
}
- ASSERT_EQ(strings.size(), hash_set.Size());
+ ASSERT_EQ(strings.size(), hash_set.size());
// Try to erase the odd strings.
for (size_t i = 1; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
- hash_set.Erase(it);
+ hash_set.erase(it);
}
// Test removed.
for (size_t i = 1; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it == hash_set.end());
}
for (size_t i = 0; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
}
@@ -119,7 +121,7 @@ TEST_F(HashSetTest, TestIterator) {
for (size_t i = 0; i < count; ++i) {
// Insert a bunch of elements and make sure we can find them.
strings.push_back(RandomString(10));
- hash_set.Insert(strings[i]);
+ hash_set.insert(strings[i]);
}
// Make sure we visit each string exactly once.
std::map<std::string, size_t> found_count;
@@ -133,7 +135,7 @@ TEST_F(HashSetTest, TestIterator) {
// Remove all the elements with iterator erase.
for (auto it = hash_set.begin(); it != hash_set.end();) {
++found_count[*it];
- it = hash_set.Erase(it);
+ it = hash_set.erase(it);
ASSERT_EQ(hash_set.Verify(), 0U);
}
for (size_t i = 0; i < count; ++i) {
@@ -147,14 +149,14 @@ TEST_F(HashSetTest, TestSwap) {
static constexpr size_t count = 1000;
for (size_t i = 0; i < count; ++i) {
strings.push_back(RandomString(10));
- hash_seta.Insert(strings[i]);
+ hash_seta.insert(strings[i]);
}
std::swap(hash_seta, hash_setb);
- hash_seta.Insert("TEST");
- hash_setb.Insert("TEST2");
+ hash_seta.insert("TEST");
+ hash_setb.insert("TEST2");
for (size_t i = 0; i < count; ++i) {
strings.push_back(RandomString(10));
- hash_seta.Insert(strings[i]);
+ hash_seta.insert(strings[i]);
}
}
@@ -163,7 +165,7 @@ TEST_F(HashSetTest, TestShrink) {
std::vector<std::string> strings = {"a", "b", "c", "d", "e", "f", "g"};
for (size_t i = 0; i < strings.size(); ++i) {
// Insert some strings into the beginning of our hash set to establish an initial size
- hash_set.Insert(strings[i]);
+ hash_set.insert(strings[i]);
}
hash_set.ShrinkToMaximumLoad();
@@ -174,12 +176,12 @@ TEST_F(HashSetTest, TestShrink) {
static constexpr size_t count = 1000;
for (size_t i = 0; i < count; ++i) {
random_strings.push_back(RandomString(10));
- hash_set.Insert(random_strings[i]);
+ hash_set.insert(random_strings[i]);
}
// Erase all the extra strings which guarantees that our load factor will be really bad.
for (size_t i = 0; i < count; ++i) {
- hash_set.Erase(hash_set.Find(random_strings[i]));
+ hash_set.erase(hash_set.find(random_strings[i]));
}
const double bad_load = hash_set.CalculateLoadFactor();
@@ -191,7 +193,7 @@ TEST_F(HashSetTest, TestShrink) {
// Make sure all the initial elements we had are still there
for (const std::string& initial_string : strings) {
- EXPECT_NE(hash_set.end(), hash_set.Find(initial_string))
+ EXPECT_NE(hash_set.end(), hash_set.find(initial_string))
<< "expected to find " << initial_string;
}
}
@@ -201,7 +203,7 @@ TEST_F(HashSetTest, TestLoadFactor) {
static constexpr size_t kStringCount = 1000;
static constexpr double kEpsilon = 0.01;
for (size_t i = 0; i < kStringCount; ++i) {
- hash_set.Insert(RandomString(i % 10 + 1));
+ hash_set.insert(RandomString(i % 10 + 1));
}
// Check that changing the load factor resizes the table to be within the target range.
EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
@@ -228,29 +230,29 @@ TEST_F(HashSetTest, TestStress) {
SetSeed(seed);
LOG(INFO) << "Starting stress test with seed " << seed;
for (size_t i = 0; i < operations; ++i) {
- ASSERT_EQ(hash_set.Size(), std_set.size());
+ ASSERT_EQ(hash_set.size(), std_set.size());
size_t delta = std::abs(static_cast<ssize_t>(target_size) -
- static_cast<ssize_t>(hash_set.Size()));
+ static_cast<ssize_t>(hash_set.size()));
size_t n = PRand();
if (n % target_size == 0) {
- hash_set.Clear();
+ hash_set.clear();
std_set.clear();
- ASSERT_TRUE(hash_set.Empty());
+ ASSERT_TRUE(hash_set.empty());
ASSERT_TRUE(std_set.empty());
} else if (n % target_size < delta) {
// Skew towards adding elements until we are at the desired size.
const std::string& s = strings[PRand() % string_count];
- hash_set.Insert(s);
+ hash_set.insert(s);
std_set.insert(s);
- ASSERT_EQ(*hash_set.Find(s), *std_set.find(s));
+ ASSERT_EQ(*hash_set.find(s), *std_set.find(s));
} else {
const std::string& s = strings[PRand() % string_count];
- auto it1 = hash_set.Find(s);
+ auto it1 = hash_set.find(s);
auto it2 = std_set.find(s);
ASSERT_EQ(it1 == hash_set.end(), it2 == std_set.end());
if (it1 != hash_set.end()) {
ASSERT_EQ(*it1, *it2);
- hash_set.Erase(it1);
+ hash_set.erase(it1);
std_set.erase(it2);
}
}
@@ -268,13 +270,13 @@ struct IsEmptyStringPair {
TEST_F(HashSetTest, TestHashMap) {
HashMap<std::string, int, IsEmptyStringPair> hash_map;
- hash_map.Insert(std::make_pair(std::string("abcd"), 123));
- hash_map.Insert(std::make_pair(std::string("abcd"), 124));
- hash_map.Insert(std::make_pair(std::string("bags"), 444));
- auto it = hash_map.Find(std::string("abcd"));
+ hash_map.insert(std::make_pair(std::string("abcd"), 123));
+ hash_map.insert(std::make_pair(std::string("abcd"), 124));
+ hash_map.insert(std::make_pair(std::string("bags"), 444));
+ auto it = hash_map.find(std::string("abcd"));
ASSERT_EQ(it->second, 123);
- hash_map.Erase(it);
- it = hash_map.Find(std::string("abcd"));
+ hash_map.erase(it);
+ it = hash_map.find(std::string("abcd"));
ASSERT_EQ(it->second, 124);
}
@@ -325,33 +327,50 @@ struct VectorIntHashEquals {
TEST_F(HashSetTest, TestLookupByAlternateKeyType) {
HashSet<std::vector<int>, IsEmptyFnVectorInt, VectorIntHashEquals, VectorIntHashEquals> hash_set;
- hash_set.Insert(std::vector<int>({1, 2, 3, 4}));
- hash_set.Insert(std::vector<int>({4, 2}));
- ASSERT_EQ(hash_set.end(), hash_set.Find(std::vector<int>({1, 1, 1, 1})));
- ASSERT_NE(hash_set.end(), hash_set.Find(std::vector<int>({1, 2, 3, 4})));
- ASSERT_EQ(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 1, 1, 1})));
- ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
+ hash_set.insert(std::vector<int>({1, 2, 3, 4}));
+ hash_set.insert(std::vector<int>({4, 2}));
+ ASSERT_EQ(hash_set.end(), hash_set.find(std::vector<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.find(std::vector<int>({1, 2, 3, 4})));
+ ASSERT_EQ(hash_set.end(), hash_set.find(std::forward_list<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.find(std::forward_list<int>({1, 2, 3, 4})));
}
TEST_F(HashSetTest, TestReserve) {
HashSet<std::string, IsEmptyFnString> hash_set;
std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
for (size_t size : sizes) {
- hash_set.Reserve(size);
+ hash_set.reserve(size);
const size_t buckets_before = hash_set.NumBuckets();
// Check that we expanded enough.
CHECK_GE(hash_set.ElementsUntilExpand(), size);
// Try inserting elements until we are at our reserve size and ensure the hash set did not
// expand.
- while (hash_set.Size() < size) {
- hash_set.Insert(std::to_string(hash_set.Size()));
+ while (hash_set.size() < size) {
+ hash_set.insert(std::to_string(hash_set.size()));
}
CHECK_EQ(hash_set.NumBuckets(), buckets_before);
}
// Check the behaviour for shrinking, it does not necessarily resize down.
constexpr size_t size = 100;
- hash_set.Reserve(size);
+ hash_set.reserve(size);
CHECK_GE(hash_set.ElementsUntilExpand(), size);
}
+TEST_F(HashSetTest, IteratorConversion) {
+ const char* test_string = "dummy";
+ HashSet<std::string> hash_set;
+ HashSet<std::string>::iterator it = hash_set.insert(test_string);
+ HashSet<std::string>::const_iterator cit = it;
+ ASSERT_TRUE(it == cit);
+ ASSERT_EQ(*it, *cit);
+}
+
+TEST_F(HashSetTest, StringSearchyStringPiece) {
+ const char* test_string = "dummy";
+ HashSet<std::string> hash_set;
+ HashSet<std::string>::iterator insert_pos = hash_set.insert(test_string);
+ HashSet<std::string>::iterator it = hash_set.find(StringPiece(test_string));
+ ASSERT_TRUE(it == insert_pos);
+}
+
} // namespace art
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index 06e7340d36..a479b7d650 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -122,6 +122,10 @@ class VariableIndentationOutputStream {
return indented_os_;
}
+ size_t GetIndentation() const {
+ return indenter_.count_;
+ }
+
void IncreaseIndentation(size_t adjustment) {
indenter_.count_ += adjustment;
}
diff --git a/libartbase/base/iteration_range.h b/libartbase/base/iteration_range.h
index 76049a7e4d..cd87d85f68 100644
--- a/libartbase/base/iteration_range.h
+++ b/libartbase/base/iteration_range.h
@@ -39,9 +39,9 @@ class IterationRange {
iterator cbegin() const { return first_; }
iterator cend() const { return last_; }
- private:
- const iterator first_;
- const iterator last_;
+ protected:
+ iterator first_;
+ iterator last_;
};
template <typename Iter>
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 144b06ceb9..15a5d71a6b 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -53,7 +53,7 @@ MallocArena::MallocArena(size_t size) {
memory_ = unaligned_memory_;
} else {
memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
- if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ if (kRunningOnMemoryTool) {
size_t head = memory_ - unaligned_memory_;
size_t tail = overallocation - head;
MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
@@ -66,7 +66,7 @@ MallocArena::MallocArena(size_t size) {
MallocArena::~MallocArena() {
constexpr size_t overallocation = RequiredOverallocation();
- if (overallocation != 0u && UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ if (overallocation != 0u && kRunningOnMemoryTool) {
size_t head = memory_ - unaligned_memory_;
size_t tail = overallocation - head;
MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
@@ -132,7 +132,7 @@ size_t MallocArenaPool::GetBytesAllocated() const {
}
void MallocArenaPool::FreeArenaChain(Arena* first) {
- if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ if (kRunningOnMemoryTool) {
for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
}
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index c455fed829..5cea869519 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -19,7 +19,7 @@
#include <inttypes.h>
#include <stdlib.h>
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
-#ifndef ANDROID_OS
+#if !defined(ANDROID_OS) && !defined(__Fuchsia__)
#include <sys/resource.h>
#endif
@@ -29,7 +29,12 @@
#include "android-base/stringprintf.h"
#include "android-base/unique_fd.h"
+
+#if !defined(__Fuchsia__)
#include "cutils/ashmem.h"
+#else
+#include "fuchsia_compat.h"
+#endif
#include "allocator.h"
#include "bit_utils.h"
@@ -161,7 +166,7 @@ bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string*
// non-null, we check that pointer is the actual_ptr == expected_ptr,
// and if not, report in error_msg what the conflict mapping was if
// found, or a generic error in other cases.
-static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
+bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
std::string* error_msg) {
// Handled first by caller for more specific error messages.
CHECK(actual_ptr != MAP_FAILED);
@@ -178,7 +183,7 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte
}
// We asked for an address but didn't get what we wanted, all paths below here should fail.
- int result = munmap(actual_ptr, byte_count);
+ int result = TargetMUnmap(actual_ptr, byte_count);
if (result == -1) {
PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
}
@@ -207,18 +212,18 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte
}
#if USE_ART_LOW_4G_ALLOCATOR
-static inline void* TryMemMapLow4GB(void* ptr,
+void* MemMap::TryMemMapLow4GB(void* ptr,
size_t page_aligned_byte_count,
int prot,
int flags,
int fd,
off_t offset) {
- void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
+ void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
if (actual != MAP_FAILED) {
// Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
// 4GB. If this is the case, unmap and retry.
if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
- munmap(actual, page_aligned_byte_count);
+ TargetMUnmap(actual, page_aligned_byte_count);
actual = MAP_FAILED;
}
}
@@ -237,7 +242,7 @@ MemMap* MemMap::MapAnonymous(const char* name,
#ifndef __LP64__
UNUSED(low_4gb);
#endif
- use_ashmem = use_ashmem && !kIsTargetLinux;
+ use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
if (byte_count == 0) {
return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
@@ -460,7 +465,7 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
(expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
size_t redzone_size = 0;
- if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
+ if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
redzone_size = kPageSize;
page_aligned_byte_count += redzone_size;
}
@@ -521,7 +526,7 @@ MemMap::~MemMap() {
if (!reuse_) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
if (!already_unmapped_) {
- int result = munmap(base_begin_, base_size_);
+ int result = TargetMUnmap(base_begin_, base_size_);
if (result == -1) {
PLOG(FATAL) << "munmap failed";
}
@@ -565,7 +570,7 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_
MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
std::string* error_msg, bool use_ashmem) {
- use_ashmem = use_ashmem && !kIsTargetLinux;
+ use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -607,7 +612,7 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
// Unmap/map the tail region.
- int result = munmap(tail_base_begin, tail_base_size);
+ int result = TargetMUnmap(tail_base_begin, tail_base_size);
if (result == -1) {
PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
*error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
@@ -618,12 +623,12 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
// calls. Otherwise, libc (or something else) might take this memory
// region. Note this isn't perfect as there's no way to prevent
// other threads to try to take this memory region here.
- uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin,
- tail_base_size,
- tail_prot,
- flags,
- fd.get(),
- 0));
+ uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
+ tail_base_size,
+ tail_prot,
+ flags,
+ fd.get(),
+ 0));
if (actual == MAP_FAILED) {
PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
*error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
@@ -647,19 +652,11 @@ void MemMap::MadviseDontNeedAndZero() {
}
bool MemMap::Sync() {
- bool result;
- if (redzone_size_ != 0) {
- // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
- // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
- // noaccess protection from the msync range. b/27552451.
- uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
- MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
- result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
- MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
- } else {
- result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
- }
- return result;
+ // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
+ // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
+ // only accepts page-aligned base address, and excludes the higher-end noaccess protection
+ // from the msync range. b/27552451.
+ return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
}
bool MemMap::Protect(int prot) {
@@ -798,6 +795,8 @@ void MemMap::Init() {
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
DCHECK(gMaps == nullptr);
gMaps = new Maps;
+
+ TargetMMapInit();
}
void MemMap::Shutdown() {
@@ -829,8 +828,10 @@ void MemMap::SetSize(size_t new_size) {
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
new_base_size),
base_size_ - new_base_size);
- CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
- base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+ CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+ base_size_ - new_base_size), 0)
+ << new_base_size << " " << base_size_;
base_size_ = new_base_size;
size_ = new_size;
}
@@ -976,7 +977,7 @@ void* MemMap::MapInternal(void* addr,
if (orig_prot != prot_non_exec) {
if (mprotect(actual, length, orig_prot) != 0) {
PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
- munmap(actual, length);
+ TargetMUnmap(actual, length);
errno = ENOMEM;
return MAP_FAILED;
}
@@ -984,14 +985,14 @@ void* MemMap::MapInternal(void* addr,
return actual;
}
- actual = mmap(addr, length, prot, flags, fd, offset);
+ actual = TargetMMap(addr, length, prot, flags, fd, offset);
#else
#if defined(__LP64__)
if (low_4gb && addr == nullptr) {
flags |= MAP_32BIT;
}
#endif
- actual = mmap(addr, length, prot, flags, fd, offset);
+ actual = TargetMMap(addr, length, prot, flags, fd, offset);
#endif
return actual;
}
@@ -1067,13 +1068,13 @@ void MemMap::AlignBy(size_t size) {
// Unmap the unaligned parts.
if (base_begin < aligned_base_begin) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
- CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
+ CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
<< "base_begin=" << reinterpret_cast<void*>(base_begin)
<< " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
}
if (aligned_base_end < base_end) {
MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
- CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
+ CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
<< "base_end=" << reinterpret_cast<void*>(base_end)
<< " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
}
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 3a324b2dc5..1979357714 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -29,10 +29,11 @@
namespace art {
-#if defined(__LP64__) && (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
+#if defined(__LP64__) && !defined(__Fuchsia__) && \
+ (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
#define USE_ART_LOW_4G_ALLOCATOR 1
#else
-#if defined(__LP64__) && !defined(__x86_64__)
+#if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
#error "Unrecognized 64-bit architecture."
#endif
#define USE_ART_LOW_4G_ALLOCATOR 0
@@ -264,6 +265,12 @@ class MemMap {
off_t offset)
REQUIRES(!MemMap::mem_maps_lock_);
+ // member function to access real_munmap
+ static bool CheckMapRequest(uint8_t* expected_ptr,
+ void* actual_ptr,
+ size_t byte_count,
+ std::string* error_msg);
+
const std::string name_;
uint8_t* begin_; // Start of data. May be changed by AlignBy.
size_t size_; // Length of data.
@@ -284,8 +291,19 @@ class MemMap {
#if USE_ART_LOW_4G_ALLOCATOR
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
+
+ static void* TryMemMapLow4GB(void* ptr,
+ size_t page_aligned_byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t offset);
#endif
+ static void TargetMMapInit();
+ static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
+ static int TargetMUnmap(void* start, size_t len);
+
static std::mutex* mem_maps_lock_;
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
new file mode 100644
index 0000000000..db31efb1c0
--- /dev/null
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map.h"
+#include <sys/mman.h>
+#include "logging.h"
+
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+namespace art {
+
+static zx_handle_t fuchsia_lowmem_vmar = ZX_HANDLE_INVALID;
+static zx_vaddr_t fuchsia_lowmem_base = 0;
+static size_t fuchsia_lowmem_size = 0;
+
+static const char map_name[] = "mmap-android";
+static constexpr uintptr_t FUCHSIA_LOWER_MEM_START = 0x80000000;
+static constexpr uintptr_t FUCHSIA_LOWER_MEM_SIZE = 0x60000000;
+
+void MemMap::TargetMMapInit() {
+ if (fuchsia_lowmem_vmar != ZX_HANDLE_INVALID) {
+ return;
+ }
+
+ zx_info_vmar_t vmarinfo;
+ CHECK_EQ(zx_object_get_info(zx_vmar_root_self(),
+ ZX_INFO_VMAR,
+ &vmarinfo,
+ sizeof(vmarinfo),
+ NULL,
+ NULL), ZX_OK) << "could not find info from root vmar";
+
+ uintptr_t lower_mem_start = FUCHSIA_LOWER_MEM_START - vmarinfo.base;
+ fuchsia_lowmem_size = FUCHSIA_LOWER_MEM_SIZE;
+ uint32_t allocflags = ZX_VM_FLAG_CAN_MAP_READ |
+ ZX_VM_FLAG_CAN_MAP_WRITE |
+ ZX_VM_FLAG_CAN_MAP_EXECUTE |
+ ZX_VM_FLAG_SPECIFIC;
+ CHECK_EQ(zx_vmar_allocate(zx_vmar_root_self(),
+ lower_mem_start,
+ fuchsia_lowmem_size,
+ allocflags,
+ &fuchsia_lowmem_vmar,
+ &fuchsia_lowmem_base), ZX_OK) << "could not allocate lowmem vmar";
+}
+
+void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) {
+ zx_status_t status;
+ uintptr_t mem = 0;
+
+ bool mmap_lower = (flags & MAP_32BIT) != 0;
+
+ // for file-based mapping use system library
+ if ((flags & MAP_ANONYMOUS) == 0) {
+ if (start != nullptr) {
+ flags |= MAP_FIXED;
+ }
+ CHECK(!mmap_lower) << "cannot map files into low memory for Fuchsia";
+ return mmap(start, len, prot, flags, fd, fd_off);
+ }
+
+ uint32_t vmarflags = 0;
+ if ((prot & PROT_READ) != 0) {
+ vmarflags |= ZX_VM_FLAG_PERM_READ;
+ }
+ if ((prot & PROT_WRITE) != 0) {
+ vmarflags |= ZX_VM_FLAG_PERM_WRITE;
+ }
+ if ((prot & PROT_EXEC) != 0) {
+ vmarflags |= ZX_VM_FLAG_PERM_EXECUTE;
+ }
+
+ if (len == 0) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+
+ zx_info_vmar_t vmarinfo;
+ size_t vmaroffset = 0;
+ if (start != nullptr) {
+ vmarflags |= ZX_VM_FLAG_SPECIFIC;
+ status = zx_object_get_info((mmap_lower ? fuchsia_lowmem_vmar : zx_vmar_root_self()),
+ ZX_INFO_VMAR,
+ &vmarinfo,
+ sizeof(vmarinfo),
+ NULL,
+ NULL);
+ if (status < 0 || reinterpret_cast<uintptr_t>(start) < vmarinfo.base) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+ vmaroffset = reinterpret_cast<uintptr_t>(start) - vmarinfo.base;
+ }
+
+ zx_handle_t vmo;
+ if (zx_vmo_create(len, 0, &vmo) < 0) {
+ errno = ENOMEM;
+ return MAP_FAILED;
+ }
+ zx_vmo_get_size(vmo, &len);
+ zx_object_set_property(vmo, ZX_PROP_NAME, map_name, strlen(map_name));
+
+ if (mmap_lower) {
+ status = zx_vmar_map(fuchsia_lowmem_vmar, vmaroffset, vmo, fd_off, len, vmarflags, &mem);
+ } else {
+ status = zx_vmar_map(zx_vmar_root_self(), vmaroffset, vmo, fd_off, len, vmarflags, &mem);
+ }
+ zx_handle_close(vmo);
+ if (status != ZX_OK) {
+ return MAP_FAILED;
+ }
+
+ return reinterpret_cast<void *>(mem);
+}
+
+int MemMap::TargetMUnmap(void* start, size_t len) {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(start);
+ zx_handle_t alloc_vmar = zx_vmar_root_self();
+ if (addr >= fuchsia_lowmem_base && addr < fuchsia_lowmem_base + fuchsia_lowmem_size) {
+ alloc_vmar = fuchsia_lowmem_vmar;
+ }
+ zx_status_t status = zx_vmar_unmap(alloc_vmar, addr, len);
+ if (status < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index d956126df1..c575c7a31f 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -471,31 +471,32 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
// cannot allocate in the 2GB-4GB region.
TEST_DISABLED_FOR_MIPS();
+ // This test does not work under AddressSanitizer.
+ // Historical note: This test did not work under Valgrind either.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
+
CommonInit();
- // This test may not work under valgrind.
- if (RUNNING_ON_MEMORY_TOOL == 0) {
- constexpr size_t size = 0x100000;
- // Try all addresses starting from 2GB to 4GB.
- size_t start_addr = 2 * GB;
- std::string error_msg;
- std::unique_ptr<MemMap> map;
- for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
- map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
- reinterpret_cast<uint8_t*>(start_addr),
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- false,
- &error_msg));
- if (map != nullptr) {
- break;
- }
+ constexpr size_t size = 0x100000;
+ // Try all addresses starting from 2GB to 4GB.
+ size_t start_addr = 2 * GB;
+ std::string error_msg;
+ std::unique_ptr<MemMap> map;
+ for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
+ map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+ reinterpret_cast<uint8_t*>(start_addr),
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ false,
+ &error_msg));
+ if (map != nullptr) {
+ break;
}
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
- ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
- ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
}
+ ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
}
TEST_F(MemMapTest, MapAnonymousOverflow) {
diff --git a/libartbase/base/mem_map_unix.cc b/libartbase/base/mem_map_unix.cc
new file mode 100644
index 0000000000..601b049525
--- /dev/null
+++ b/libartbase/base/mem_map_unix.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map.h"
+
+#include <sys/mman.h>
+
+namespace art {
+
+void MemMap::TargetMMapInit() {
+ // no-op for unix
+}
+
+void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) {
+ return mmap(start, len, prot, flags, fd, fd_off);
+}
+
+int MemMap::TargetMUnmap(void* start, size_t len) {
+ return munmap(start, len);
+}
+
+} // namespace art
diff --git a/libartbase/base/memory_tool.h b/libartbase/base/memory_tool.h
index e1df99fed4..d381f010f5 100644
--- a/libartbase/base/memory_tool.h
+++ b/libartbase/base/memory_tool.h
@@ -19,53 +19,53 @@
#include <stddef.h>
+namespace art {
+
#if !defined(__has_feature)
-#define __has_feature(x) 0
+# define __has_feature(x) 0
#endif
#if __has_feature(address_sanitizer)
-#include <sanitizer/asan_interface.h>
-#define ADDRESS_SANITIZER
+# include <sanitizer/asan_interface.h>
+# define ADDRESS_SANITIZER
-#ifdef ART_ENABLE_ADDRESS_SANITIZER
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
+# ifdef ART_ENABLE_ADDRESS_SANITIZER
+# define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
+# define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
+# define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
constexpr bool kMemoryToolIsAvailable = true;
-#else
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# else
+# define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
constexpr bool kMemoryToolIsAvailable = false;
-#endif
+# endif
extern "C" void __asan_handle_no_return();
-#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
-#define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
-#define RUNNING_ON_MEMORY_TOOL 1U
-constexpr bool kMemoryToolIsValgrind = false;
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+# define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
+constexpr bool kRunningOnMemoryTool = true;
constexpr bool kMemoryToolDetectsLeaks = true;
constexpr bool kMemoryToolAddsRedzones = true;
constexpr size_t kMemoryToolStackGuardSizeScale = 2;
#else
-#include <memcheck/memcheck.h>
-#include <valgrind.h>
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) VALGRIND_MAKE_MEM_NOACCESS(p, s)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) VALGRIND_MAKE_MEM_UNDEFINED(p, s)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) VALGRIND_MAKE_MEM_DEFINED(p, s)
-#define ATTRIBUTE_NO_SANITIZE_ADDRESS
-#define MEMORY_TOOL_HANDLE_NO_RETURN do { } while (0)
-#define RUNNING_ON_MEMORY_TOOL RUNNING_ON_VALGRIND
-constexpr bool kMemoryToolIsAvailable = true;
-constexpr bool kMemoryToolIsValgrind = true;
-constexpr bool kMemoryToolDetectsLeaks = true;
-constexpr bool kMemoryToolAddsRedzones = true;
+# define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS
+# define MEMORY_TOOL_HANDLE_NO_RETURN do { } while (0)
+constexpr bool kRunningOnMemoryTool = false;
+constexpr bool kMemoryToolIsAvailable = false;
+constexpr bool kMemoryToolDetectsLeaks = false;
+constexpr bool kMemoryToolAddsRedzones = false;
constexpr size_t kMemoryToolStackGuardSizeScale = 1;
#endif
+} // namespace art
+
#endif // ART_LIBARTBASE_BASE_MEMORY_TOOL_H_
diff --git a/libartbase/base/scoped_arena_containers.h b/libartbase/base/scoped_arena_containers.h
index 44d7ebbc96..80144d2c09 100644
--- a/libartbase/base/scoped_arena_containers.h
+++ b/libartbase/base/scoped_arena_containers.h
@@ -66,15 +66,15 @@ using ScopedArenaSafeMap =
template <typename T,
typename EmptyFn = DefaultEmptyFn<T>,
- typename HashFn = std::hash<T>,
- typename Pred = std::equal_to<T>>
+ typename HashFn = DefaultHashFn<T>,
+ typename Pred = DefaultPred<T>>
using ScopedArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ScopedArenaAllocatorAdapter<T>>;
template <typename Key,
typename Value,
typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
- typename HashFn = std::hash<Key>,
- typename Pred = std::equal_to<Key>>
+ typename HashFn = DefaultHashFn<Key>,
+ typename Pred = DefaultPred<Key>>
using ScopedArenaHashMap = HashMap<Key,
Value,
EmptyFn,
@@ -236,7 +236,7 @@ class ArenaDelete {
protected:
// Used for variable sized objects such as RegisterLine.
ALWAYS_INLINE void ProtectMemory(T* ptr, size_t size) const {
- if (RUNNING_ON_MEMORY_TOOL > 0) {
+ if (kRunningOnMemoryTool) {
// Writing to the memory will fail ift we already destroyed the pointer with
// DestroyOnlyDelete since we make it no access.
memset(ptr, kMagicFill, size);
diff --git a/libartbase/base/stats.h b/libartbase/base/stats.h
new file mode 100644
index 0000000000..4dcbfe81c6
--- /dev/null
+++ b/libartbase/base/stats.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_STATS_H_
+#define ART_LIBARTBASE_BASE_STATS_H_
+
+#include <unordered_map>
+
+#include "globals.h"
+
+namespace art {
+
+// Simple structure to record tree of statistical values.
+class Stats {
+ public:
+ double Value() const { return value_; }
+ size_t Count() const { return count_; }
+ Stats* Child(const char* name) { return &children_[name]; }
+ const std::unordered_map<const char*, Stats>& Children() const { return children_; }
+
+ void AddBytes(double bytes, size_t count = 1) { Add(bytes, count); }
+ void AddBits(double bits, size_t count = 1) { Add(bits / kBitsPerByte, count); }
+ void AddSeconds(double s, size_t count = 1) { Add(s, count); }
+ void AddNanoSeconds(double ns, size_t count = 1) { Add(ns / 1000000000.0, count); }
+
+ double SumChildrenValues() const {
+ double sum = 0.0;
+ for (auto it : children_) {
+ sum += it.second.Value();
+ }
+ return sum;
+ }
+
+ private:
+ void Add(double value, size_t count = 1) {
+ value_ += value;
+ count_ += count;
+ }
+
+ double value_ = 0.0; // Commutative sum of the collected statistic in basic units.
+ size_t count_ = 0; // The number of samples for this node.
+ std::unordered_map<const char*, Stats> children_;
+};
+
+} // namespace art
+
+#endif // ART_LIBARTBASE_BASE_STATS_H_
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 73c1c226f9..6e3b78e12c 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -244,20 +244,6 @@ static inline void CheckedCall(const Func& function, const char* what, Args... a
}
}
-// Hash bytes using a relatively fast hash.
-static inline size_t HashBytes(const uint8_t* data, size_t len) {
- size_t hash = 0x811c9dc5;
- for (uint32_t i = 0; i < len; ++i) {
- hash = (hash * 16777619) ^ data[i];
- }
- hash += hash << 13;
- hash ^= hash >> 7;
- hash += hash << 3;
- hash ^= hash >> 17;
- hash += hash << 5;
- return hash;
-}
-
} // namespace art
#endif // ART_LIBARTBASE_BASE_UTILS_H_
diff --git a/libartbase/base/variant_map_test.cc b/libartbase/base/variant_map_test.cc
index 4677b6d3b3..f2da3389b1 100644
--- a/libartbase/base/variant_map_test.cc
+++ b/libartbase/base/variant_map_test.cc
@@ -108,7 +108,7 @@ TEST(VariantMaps, RuleOfFive) {
EXPECT_EQ(size_t(2), fmFilled.Size());
// Test copy constructor
- FruitMap fmEmptyCopy(fmEmpty);
+ FruitMap fmEmptyCopy(fmEmpty); // NOLINT
EXPECT_EQ(size_t(0), fmEmptyCopy.Size());
// Test copy constructor