diff options
Diffstat (limited to 'runtime/base')
| -rw-r--r-- | runtime/base/arena_allocator-inl.h | 34 | ||||
| -rw-r--r-- | runtime/base/arena_allocator.cc | 25 | ||||
| -rw-r--r-- | runtime/base/arena_allocator.h | 2 | ||||
| -rw-r--r-- | runtime/base/arena_allocator_test.cc | 51 | ||||
| -rw-r--r-- | runtime/base/casts.h | 4 | ||||
| -rw-r--r-- | runtime/base/dumpable-inl.h | 2 | ||||
| -rw-r--r-- | runtime/base/histogram-inl.h | 2 | ||||
| -rw-r--r-- | runtime/base/logging.cc | 4 | ||||
| -rw-r--r-- | runtime/base/mutex-inl.h | 33 | ||||
| -rw-r--r-- | runtime/base/mutex.h | 38 | ||||
| -rw-r--r-- | runtime/base/mutex_test.cc | 4 | ||||
| -rw-r--r-- | runtime/base/safe_copy_test.cc | 60 | ||||
| -rw-r--r-- | runtime/base/scoped_arena_allocator.cc | 4 | ||||
| -rw-r--r-- | runtime/base/timing_logger.cc | 4 | ||||
| -rw-r--r-- | runtime/base/unix_file/fd_file.cc | 1 | ||||
| -rw-r--r-- | runtime/base/unix_file/fd_file_test.cc | 14 |
16 files changed, 190 insertions, 92 deletions
diff --git a/runtime/base/arena_allocator-inl.h b/runtime/base/arena_allocator-inl.h new file mode 100644 index 0000000000..0e4383741e --- /dev/null +++ b/runtime/base/arena_allocator-inl.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_ARENA_ALLOCATOR_INL_H_ +#define ART_RUNTIME_BASE_ARENA_ALLOCATOR_INL_H_ + +#include "arena_allocator.h" + +namespace art { +namespace arena_allocator { + +static constexpr bool kArenaAllocatorPreciseTracking = kArenaAllocatorCountAllocations; + +static constexpr size_t kArenaDefaultSize = kArenaAllocatorPreciseTracking + ? 32 + : 128 * KB; + +} // namespace arena_allocator +} // namespace art + +#endif // ART_RUNTIME_BASE_ARENA_ALLOCATOR_INL_H_ diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index b455441f79..54b40f28cf 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -14,25 +14,28 @@ * limitations under the License. */ +#include "arena_allocator-inl.h" + +#include <sys/mman.h> + #include <algorithm> #include <cstddef> #include <iomanip> #include <numeric> -#include "arena_allocator.h" #include "logging.h" #include "mem_map.h" #include "mutex.h" -#include "thread-inl.h" +#include "thread-current-inl.h" #include "systrace.h" namespace art { constexpr size_t kMemoryToolRedZoneBytes = 8; -constexpr size_t Arena::kDefaultSize; template <bool kCount> const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = { + // Every name should have the same width and end with a space. Abbreviate if necessary: "Misc ", "SwitchTbl ", "SlowPaths ", @@ -49,6 +52,7 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = { "Successors ", "Dominated ", "Instruction ", + "CtorFenceIns ", "InvokeInputs ", "PhiInputs ", "LoopInfo ", @@ -180,7 +184,7 @@ Arena::Arena() : bytes_allocated_(0), memory_(nullptr), size_(0), next_(nullptr) class MallocArena FINAL : public Arena { public: - explicit MallocArena(size_t size = Arena::kDefaultSize); + explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize); virtual ~MallocArena(); private: static constexpr size_t RequiredOverallocation() { @@ -343,6 +347,17 @@ void ArenaPool::FreeArenaChain(Arena* first) { MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_); } } + + if (arena_allocator::kArenaAllocatorPreciseTracking) { + // Do not reuse arenas when tracking. + while (first != nullptr) { + Arena* next = first->next_; + delete first; + first = next; + } + return; + } + if (first != nullptr) { Arena* last = first; while (last->next_ != nullptr) { @@ -436,7 +451,7 @@ ArenaAllocator::~ArenaAllocator() { } uint8_t* ArenaAllocator::AllocFromNewArena(size_t bytes) { - Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, bytes)); + Arena* new_arena = pool_->AllocArena(std::max(arena_allocator::kArenaDefaultSize, bytes)); DCHECK(new_arena != nullptr); DCHECK_LE(bytes, new_arena->Size()); if (static_cast<size_t>(end_ - ptr_) > new_arena->Size() - bytes) { diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index 2a4777fecc..ebde82db55 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -59,6 +59,7 @@ enum ArenaAllocKind { kArenaAllocSuccessors, kArenaAllocDominated, kArenaAllocInstruction, + kArenaAllocConstructorFenceInputs, kArenaAllocInvokeInputs, kArenaAllocPhiInputs, kArenaAllocLoopInfo, @@ -195,7 +196,6 @@ class ArenaAllocatorMemoryTool : private ArenaAllocatorMemoryToolCheck { class Arena { public: - static constexpr size_t kDefaultSize = 128 * KB; Arena(); virtual ~Arena() { } // Reset is for pre-use and uses memset for performance. diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc index fd48a3fd78..e2c2e2fc6d 100644 --- a/runtime/base/arena_allocator_test.cc +++ b/runtime/base/arena_allocator_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "base/arena_allocator.h" +#include "base/arena_allocator-inl.h" #include "base/arena_bit_vector.h" #include "base/memory_tool.h" #include "gtest/gtest.h" @@ -65,23 +65,28 @@ TEST_F(ArenaAllocatorTest, MakeDefined) { } TEST_F(ArenaAllocatorTest, LargeAllocations) { + if (arena_allocator::kArenaAllocatorPreciseTracking) { + printf("WARNING: TEST DISABLED FOR precise arena tracking\n"); + return; + } + { ArenaPool pool; ArenaAllocator arena(&pool); // Note: Leaving some space for memory tool red zones. - void* alloc1 = arena.Alloc(Arena::kDefaultSize * 5 / 8); - void* alloc2 = arena.Alloc(Arena::kDefaultSize * 2 / 8); + void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8); + void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 2 / 8); ASSERT_NE(alloc1, alloc2); ASSERT_EQ(1u, NumberOfArenas(&arena)); } { ArenaPool pool; ArenaAllocator arena(&pool); - void* alloc1 = arena.Alloc(Arena::kDefaultSize * 13 / 16); - void* alloc2 = arena.Alloc(Arena::kDefaultSize * 11 / 16); + void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16); + void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16); ASSERT_NE(alloc1, alloc2); ASSERT_EQ(2u, NumberOfArenas(&arena)); - void* alloc3 = arena.Alloc(Arena::kDefaultSize * 7 / 16); + void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 7 / 16); ASSERT_NE(alloc1, alloc3); ASSERT_NE(alloc2, alloc3); ASSERT_EQ(3u, NumberOfArenas(&arena)); @@ -89,12 +94,12 @@ TEST_F(ArenaAllocatorTest, LargeAllocations) { { ArenaPool pool; ArenaAllocator arena(&pool); - void* alloc1 = arena.Alloc(Arena::kDefaultSize * 13 / 16); - void* alloc2 = arena.Alloc(Arena::kDefaultSize * 9 / 16); + void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16); + void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16); ASSERT_NE(alloc1, alloc2); ASSERT_EQ(2u, NumberOfArenas(&arena)); // Note: Leaving some space for memory tool red zones. - void* alloc3 = arena.Alloc(Arena::kDefaultSize * 5 / 16); + void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16); ASSERT_NE(alloc1, alloc3); ASSERT_NE(alloc2, alloc3); ASSERT_EQ(2u, NumberOfArenas(&arena)); @@ -102,12 +107,12 @@ TEST_F(ArenaAllocatorTest, LargeAllocations) { { ArenaPool pool; ArenaAllocator arena(&pool); - void* alloc1 = arena.Alloc(Arena::kDefaultSize * 9 / 16); - void* alloc2 = arena.Alloc(Arena::kDefaultSize * 13 / 16); + void* alloc1 = arena.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16); + void* alloc2 = arena.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16); ASSERT_NE(alloc1, alloc2); ASSERT_EQ(2u, NumberOfArenas(&arena)); // Note: Leaving some space for memory tool red zones. - void* alloc3 = arena.Alloc(Arena::kDefaultSize * 5 / 16); + void* alloc3 = arena.Alloc(arena_allocator::kArenaDefaultSize * 5 / 16); ASSERT_NE(alloc1, alloc3); ASSERT_NE(alloc2, alloc3); ASSERT_EQ(2u, NumberOfArenas(&arena)); @@ -117,9 +122,9 @@ TEST_F(ArenaAllocatorTest, LargeAllocations) { ArenaAllocator arena(&pool); // Note: Leaving some space for memory tool red zones. for (size_t i = 0; i != 15; ++i) { - arena.Alloc(Arena::kDefaultSize * 1 / 16); // Allocate 15 times from the same arena. + arena.Alloc(arena_allocator::kArenaDefaultSize * 1 / 16); // Allocate 15 times from the same arena. ASSERT_EQ(i + 1u, NumberOfArenas(&arena)); - arena.Alloc(Arena::kDefaultSize * 17 / 16); // Allocate a separate arena. + arena.Alloc(arena_allocator::kArenaDefaultSize * 17 / 16); // Allocate a separate arena. ASSERT_EQ(i + 2u, NumberOfArenas(&arena)); } } @@ -204,10 +209,11 @@ TEST_F(ArenaAllocatorTest, ReallocReuse) { ArenaPool pool; ArenaAllocator arena(&pool); - const size_t original_size = Arena::kDefaultSize - ArenaAllocator::kAlignment * 5; + const size_t original_size = arena_allocator::kArenaDefaultSize - + ArenaAllocator::kAlignment * 5; void* original_allocation = arena.Alloc(original_size); - const size_t new_size = Arena::kDefaultSize + ArenaAllocator::kAlignment * 2; + const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2; void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size); EXPECT_NE(original_allocation, realloc_allocation); } @@ -217,12 +223,12 @@ TEST_F(ArenaAllocatorTest, ReallocReuse) { ArenaPool pool; ArenaAllocator arena(&pool); - const size_t original_size = Arena::kDefaultSize - + const size_t original_size = arena_allocator::kArenaDefaultSize - ArenaAllocator::kAlignment * 4 - ArenaAllocator::kAlignment / 2; void* original_allocation = arena.Alloc(original_size); - const size_t new_size = Arena::kDefaultSize + + const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2 + ArenaAllocator::kAlignment / 2; void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size); @@ -307,11 +313,12 @@ TEST_F(ArenaAllocatorTest, ReallocAlignment) { ArenaPool pool; ArenaAllocator arena(&pool); - const size_t original_size = Arena::kDefaultSize - ArenaAllocator::kAlignment * 5; + const size_t original_size = arena_allocator::kArenaDefaultSize - + ArenaAllocator::kAlignment * 5; void* original_allocation = arena.Alloc(original_size); ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation)); - const size_t new_size = Arena::kDefaultSize + ArenaAllocator::kAlignment * 2; + const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2; void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size); EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation)); @@ -324,13 +331,13 @@ TEST_F(ArenaAllocatorTest, ReallocAlignment) { ArenaPool pool; ArenaAllocator arena(&pool); - const size_t original_size = Arena::kDefaultSize - + const size_t original_size = arena_allocator::kArenaDefaultSize - ArenaAllocator::kAlignment * 4 - ArenaAllocator::kAlignment / 2; void* original_allocation = arena.Alloc(original_size); ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation)); - const size_t new_size = Arena::kDefaultSize + + const size_t new_size = arena_allocator::kArenaDefaultSize + ArenaAllocator::kAlignment * 2 + ArenaAllocator::kAlignment / 2; void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size); diff --git a/runtime/base/casts.h b/runtime/base/casts.h index 6b67864b08..c5b0af665b 100644 --- a/runtime/base/casts.h +++ b/runtime/base/casts.h @@ -98,7 +98,9 @@ inline Dest dchecked_integral_cast(const Source source) { // Check that the value is within the upper limit of Dest. (static_cast<uintmax_t>(std::numeric_limits<Dest>::max()) >= static_cast<uintmax_t>(std::numeric_limits<Source>::max()) || - source <= static_cast<Source>(std::numeric_limits<Dest>::max()))); + source <= static_cast<Source>(std::numeric_limits<Dest>::max()))) + << "dchecked_integral_cast failed for " << source + << " (would be " << static_cast<Dest>(source) << ")"; return static_cast<Dest>(source); } diff --git a/runtime/base/dumpable-inl.h b/runtime/base/dumpable-inl.h index 2cdf083f01..9d7fc39093 100644 --- a/runtime/base/dumpable-inl.h +++ b/runtime/base/dumpable-inl.h @@ -19,7 +19,7 @@ #include "base/dumpable.h" #include "base/mutex.h" -#include "thread-inl.h" +#include "thread-current-inl.h" namespace art { diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index b28eb729d8..be2092040d 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -198,7 +198,7 @@ inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double kFractionalDigits) << "-" << FormatDuration(Percentile(per_1, data) * kAdjust, unit, kFractionalDigits) << " " << "Avg: " << FormatDuration(Mean() * kAdjust, unit, kFractionalDigits) << " Max: " - << FormatDuration(Max() * kAdjust, unit, kFractionalDigits) << "\n"; + << FormatDuration(Max() * kAdjust, unit, kFractionalDigits) << std::endl; } template <class Value> diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 55b4306427..adfd7d323c 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -21,7 +21,7 @@ #include <sstream> #include "base/mutex.h" -#include "thread-inl.h" +#include "thread-current-inl.h" #include "utils.h" // Headers for LogMessage::LogLine. @@ -112,7 +112,7 @@ void LogHelper::LogLineLowStack(const char* file, if (priority == ANDROID_LOG_FATAL) { // Allocate buffer for snprintf(buf, buf_size, "%s:%u] %s", file, line, message) below. // If allocation fails, fall back to printing only the message. - buf_size = strlen(file) + 1 /* ':' */ + std::numeric_limits<typeof(line)>::max_digits10 + + buf_size = strlen(file) + 1 /* ':' */ + std::numeric_limits<decltype(line)>::max_digits10 + 2 /* "] " */ + strlen(message) + 1 /* terminating 0 */; buf = reinterpret_cast<char*>(malloc(buf_size)); } diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index 08b370ec4e..0ac2399a5d 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -194,6 +194,16 @@ inline uint64_t Mutex::GetExclusiveOwnerTid() const { return exclusive_owner_; } +inline void Mutex::AssertExclusiveHeld(const Thread* self) const { + if (kDebugLocking && (gAborting == 0)) { + CHECK(IsExclusiveHeld(self)) << *this; + } +} + +inline void Mutex::AssertHeld(const Thread* self) const { + AssertExclusiveHeld(self); +} + inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const { DCHECK(self == nullptr || self == Thread::Current()); bool result = (GetExclusiveOwnerTid() == SafeGetTid(self)); @@ -221,6 +231,16 @@ inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const { #endif } +inline void ReaderWriterMutex::AssertExclusiveHeld(const Thread* self) const { + if (kDebugLocking && (gAborting == 0)) { + CHECK(IsExclusiveHeld(self)) << *this; + } +} + +inline void ReaderWriterMutex::AssertWriterHeld(const Thread* self) const { + AssertExclusiveHeld(self); +} + inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) { AssertSharedHeld(self); RegisterAsUnlocked(self); @@ -231,6 +251,19 @@ inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) { AssertSharedHeld(self); } +inline ReaderMutexLock::ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) + : self_(self), mu_(mu) { + mu_.SharedLock(self_); +} + +inline ReaderMutexLock::~ReaderMutexLock() { + mu_.SharedUnlock(self_); +} + +// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of +// "ReaderMutexLock mu(lock)". +#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name") + } // namespace art #endif // ART_RUNTIME_BASE_MUTEX_INL_H_ diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 2414b5f937..e77d8d749d 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -244,15 +244,11 @@ class LOCKABLE Mutex : public BaseMutex { void Unlock(Thread* self) RELEASE() { ExclusiveUnlock(self); } // Is the current thread the exclusive holder of the Mutex. - bool IsExclusiveHeld(const Thread* self) const; + ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const; // Assert that the Mutex is exclusively held by the current thread. - void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) { - if (kDebugLocking && (gAborting == 0)) { - CHECK(IsExclusiveHeld(self)) << *this; - } - } - void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); } + ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this); + ALWAYS_INLINE void AssertHeld(const Thread* self) const ASSERT_CAPABILITY(this); // Assert that the Mutex is not held by the current thread. void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) { @@ -349,15 +345,11 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); } // Is the current thread the exclusive holder of the ReaderWriterMutex. - bool IsExclusiveHeld(const Thread* self) const; + ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const; // Assert the current thread has exclusive access to the ReaderWriterMutex. - void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) { - if (kDebugLocking && (gAborting == 0)) { - CHECK(IsExclusiveHeld(self)) << *this; - } - } - void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); } + ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this); + ALWAYS_INLINE void AssertWriterHeld(const Thread* self) const ASSERT_CAPABILITY(this); // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex. void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) { @@ -373,19 +365,19 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { bool IsSharedHeld(const Thread* self) const; // Assert the current thread has shared access to the ReaderWriterMutex. - void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { + ALWAYS_INLINE void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { // TODO: we can only assert this well when self != null. CHECK(IsSharedHeld(self) || self == nullptr) << *this; } } - void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { + ALWAYS_INLINE void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { AssertSharedHeld(self); } // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive // mode. - void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) { + ALWAYS_INLINE void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsSharedHeld(self)) << *this; } @@ -517,23 +509,15 @@ class SCOPED_CAPABILITY MutexLock { // construction and releases it upon destruction. class SCOPED_CAPABILITY ReaderMutexLock { public: - ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) ALWAYS_INLINE : - self_(self), mu_(mu) { - mu_.SharedLock(self_); - } + ALWAYS_INLINE ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu); - ~ReaderMutexLock() RELEASE() ALWAYS_INLINE { - mu_.SharedUnlock(self_); - } + ALWAYS_INLINE ~ReaderMutexLock() RELEASE(); private: Thread* const self_; ReaderWriterMutex& mu_; DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock); }; -// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of -// "ReaderMutexLock mu(lock)". -#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name") // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon // construction and releases it upon destruction. diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc index 340550f02e..752e77a7c0 100644 --- a/runtime/base/mutex_test.cc +++ b/runtime/base/mutex_test.cc @@ -14,10 +14,10 @@ * limitations under the License. */ -#include "mutex.h" +#include "mutex-inl.h" #include "common_runtime_test.h" -#include "thread-inl.h" +#include "thread-current-inl.h" namespace art { diff --git a/runtime/base/safe_copy_test.cc b/runtime/base/safe_copy_test.cc index 987895e6b7..a9ec9528a1 100644 --- a/runtime/base/safe_copy_test.cc +++ b/runtime/base/safe_copy_test.cc @@ -23,80 +23,86 @@ #include <sys/mman.h> #include <sys/user.h> +#include "globals.h" + namespace art { #if defined(__linux__) TEST(SafeCopyTest, smoke) { + DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE)); + // Map four pages, mark the second one as PROT_NONE, unmap the last one. - void* map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE, + void* map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); ASSERT_NE(MAP_FAILED, map); char* page1 = static_cast<char*>(map); - char* page2 = page1 + PAGE_SIZE; - char* page3 = page2 + PAGE_SIZE; - char* page4 = page3 + PAGE_SIZE; - ASSERT_EQ(0, mprotect(page1 + PAGE_SIZE, PAGE_SIZE, PROT_NONE)); - ASSERT_EQ(0, munmap(page4, PAGE_SIZE)); + char* page2 = page1 + kPageSize; + char* page3 = page2 + kPageSize; + char* page4 = page3 + kPageSize; + ASSERT_EQ(0, mprotect(page1 + kPageSize, kPageSize, PROT_NONE)); + ASSERT_EQ(0, munmap(page4, kPageSize)); page1[0] = 'a'; - page1[PAGE_SIZE - 1] = 'z'; + page1[kPageSize - 1] = 'z'; page3[0] = 'b'; - page3[PAGE_SIZE - 1] = 'y'; + page3[kPageSize - 1] = 'y'; - char buf[PAGE_SIZE]; + char buf[kPageSize]; // Completely valid read. memset(buf, 0xCC, sizeof(buf)); - EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page1, PAGE_SIZE)) << strerror(errno); - EXPECT_EQ(0, memcmp(buf, page1, PAGE_SIZE)); + EXPECT_EQ(static_cast<ssize_t>(kPageSize), SafeCopy(buf, page1, kPageSize)) << strerror(errno); + EXPECT_EQ(0, memcmp(buf, page1, kPageSize)); // Reading into a guard page. memset(buf, 0xCC, sizeof(buf)); - EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE - 1), SafeCopy(buf, page1 + 1, PAGE_SIZE)); - EXPECT_EQ(0, memcmp(buf, page1 + 1, PAGE_SIZE - 1)); + EXPECT_EQ(static_cast<ssize_t>(kPageSize - 1), SafeCopy(buf, page1 + 1, kPageSize)); + EXPECT_EQ(0, memcmp(buf, page1 + 1, kPageSize - 1)); // Reading from a guard page into a real page. memset(buf, 0xCC, sizeof(buf)); - EXPECT_EQ(0, SafeCopy(buf, page2 + PAGE_SIZE - 1, PAGE_SIZE)); + EXPECT_EQ(0, SafeCopy(buf, page2 + kPageSize - 1, kPageSize)); // Reading off of the end of a mapping. memset(buf, 0xCC, sizeof(buf)); - EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page3, PAGE_SIZE * 2)); - EXPECT_EQ(0, memcmp(buf, page3, PAGE_SIZE)); + EXPECT_EQ(static_cast<ssize_t>(kPageSize), SafeCopy(buf, page3, kPageSize * 2)); + EXPECT_EQ(0, memcmp(buf, page3, kPageSize)); // Completely invalid. - EXPECT_EQ(0, SafeCopy(buf, page1 + PAGE_SIZE, PAGE_SIZE)); + EXPECT_EQ(0, SafeCopy(buf, page1 + kPageSize, kPageSize)); // Clean up. - ASSERT_EQ(0, munmap(map, PAGE_SIZE * 3)); + ASSERT_EQ(0, munmap(map, kPageSize * 3)); } TEST(SafeCopyTest, alignment) { + DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE)); + // Copy the middle of a mapping to the end of another one. - void* src_map = mmap(nullptr, PAGE_SIZE * 3, PROT_READ | PROT_WRITE, + void* src_map = mmap(nullptr, kPageSize * 3, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); ASSERT_NE(MAP_FAILED, src_map); // Add a guard page to make sure we don't write past the end of the mapping. - void* dst_map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE, + void* dst_map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); ASSERT_NE(MAP_FAILED, dst_map); char* src = static_cast<char*>(src_map); char* dst = static_cast<char*>(dst_map); - ASSERT_EQ(0, mprotect(dst + 3 * PAGE_SIZE, PAGE_SIZE, PROT_NONE)); + ASSERT_EQ(0, mprotect(dst + 3 * kPageSize, kPageSize, PROT_NONE)); src[512] = 'a'; - src[PAGE_SIZE * 3 - 512 - 1] = 'z'; + src[kPageSize * 3 - 512 - 1] = 'z'; - EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE * 3 - 1024), - SafeCopy(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024)); - EXPECT_EQ(0, memcmp(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024)); + EXPECT_EQ(static_cast<ssize_t>(kPageSize * 3 - 1024), + SafeCopy(dst + 1024, src + 512, kPageSize * 3 - 1024)); + EXPECT_EQ(0, memcmp(dst + 1024, src + 512, kPageSize * 3 - 1024)); - ASSERT_EQ(0, munmap(src_map, PAGE_SIZE * 3)); - ASSERT_EQ(0, munmap(dst_map, PAGE_SIZE * 4)); + ASSERT_EQ(0, munmap(src_map, kPageSize * 3)); + ASSERT_EQ(0, munmap(dst_map, kPageSize * 4)); } #endif // defined(__linux__) diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc index 7d04fa0223..973f9b93ed 100644 --- a/runtime/base/scoped_arena_allocator.cc +++ b/runtime/base/scoped_arena_allocator.cc @@ -16,7 +16,7 @@ #include "scoped_arena_allocator.h" -#include "arena_allocator.h" +#include "arena_allocator-inl.h" #include "base/memory_tool.h" namespace art { @@ -54,7 +54,7 @@ MemStats ArenaStack::GetPeakStats() const { uint8_t* ArenaStack::AllocateFromNextArena(size_t rounded_bytes) { UpdateBytesAllocated(); - size_t allocation_size = std::max(Arena::kDefaultSize, rounded_bytes); + size_t allocation_size = std::max(arena_allocator::kArenaDefaultSize, rounded_bytes); if (UNLIKELY(top_arena_ == nullptr)) { top_arena_ = bottom_arena_ = stats_and_pool_.pool->AllocArena(allocation_size); top_arena_->next_ = nullptr; diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc index 9a0e0d02a6..aaa24317bb 100644 --- a/runtime/base/timing_logger.cc +++ b/runtime/base/timing_logger.cc @@ -24,7 +24,9 @@ #include "base/histogram-inl.h" #include "base/systrace.h" #include "base/time_utils.h" -#include "thread-inl.h" +#include "gc/heap.h" +#include "runtime.h" +#include "thread-current-inl.h" #include <cmath> #include <iomanip> diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc index 03fc959f6b..00b5567012 100644 --- a/runtime/base/unix_file/fd_file.cc +++ b/runtime/base/unix_file/fd_file.cc @@ -91,6 +91,7 @@ FdFile& FdFile::operator=(FdFile&& other) { fd_ = other.fd_; file_path_ = std::move(other.file_path_); auto_close_ = other.auto_close_; + read_only_mode_ = other.read_only_mode_; other.Release(); // Release other. return *this; diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc index 7657a38cec..6aef348433 100644 --- a/runtime/base/unix_file/fd_file_test.cc +++ b/runtime/base/unix_file/fd_file_test.cc @@ -186,6 +186,20 @@ TEST_F(FdFileTest, MoveConstructor) { ASSERT_EQ(file2.Close(), 0); } +TEST_F(FdFileTest, OperatorMoveEquals) { + // Make sure the read_only_ flag is correctly copied + // over. + art::ScratchFile tmp; + FdFile file(tmp.GetFilename(), O_RDONLY, false); + ASSERT_TRUE(file.ReadOnlyMode()); + + FdFile file2(tmp.GetFilename(), O_RDWR, false); + ASSERT_FALSE(file2.ReadOnlyMode()); + + file2 = std::move(file); + ASSERT_TRUE(file2.ReadOnlyMode()); +} + TEST_F(FdFileTest, EraseWithPathUnlinks) { // New scratch file, zero-length. art::ScratchFile tmp; |