diff options
-rw-r--r-- | libartbase/base/globals.h | 10 | ||||
-rw-r--r-- | libartbase/base/mem_map.cc | 9 | ||||
-rw-r--r-- | libartbase/base/mem_map_test.cc | 66 | ||||
-rw-r--r-- | libartbase/base/unix_file/fd_file.cc | 4 | ||||
-rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 2 | ||||
-rw-r--r-- | runtime/gc/collector/concurrent_copying.h | 5 | ||||
-rw-r--r-- | runtime/gc/heap.cc | 6 | ||||
-rw-r--r-- | runtime/gc/heap.h | 7 | ||||
-rw-r--r-- | runtime/gc/space/large_object_space.cc | 18 | ||||
-rw-r--r-- | runtime/gc/space/rosalloc_space.cc | 2 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 2 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.h | 20 | ||||
-rw-r--r-- | runtime/jni/local_reference_table.cc | 25 | ||||
-rw-r--r-- | runtime/jni/local_reference_table.h | 4 | ||||
-rw-r--r-- | runtime/jni/local_reference_table_test.cc | 30 | ||||
-rw-r--r-- | runtime/runtime.cc | 6 | ||||
-rw-r--r-- | runtime/runtime_options.def | 2 | ||||
-rw-r--r-- | runtime/thread.cc | 6 |
18 files changed, 124 insertions, 100 deletions
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h index 8dcf1e13ae..3fb923ccf5 100644 --- a/libartbase/base/globals.h +++ b/libartbase/base/globals.h @@ -57,15 +57,15 @@ static constexpr size_t kMaxPageSize = kMinPageSize; // this is the value to be used in images files for aligning contents to page size. static constexpr size_t kElfSegmentAlignment = kMaxPageSize; -// TODO: Kernels for arm and x86 in both, 32-bit and 64-bit modes use 512 entries per page-table -// page. Find a way to confirm that in userspace. // Address range covered by 1 Page Middle Directory (PMD) entry in the page table -static constexpr size_t kPMDSize = (kPageSize / sizeof(uint64_t)) * kPageSize; +extern const size_t kPMDSize; + // Address range covered by 1 Page Upper Directory (PUD) entry in the page table -static constexpr size_t kPUDSize = (kPageSize / sizeof(uint64_t)) * kPMDSize; +extern const size_t kPUDSize; + // Returns the ideal alignment corresponding to page-table levels for the // given size. -static constexpr size_t BestPageTableAlignment(size_t size) { +static inline size_t BestPageTableAlignment(size_t size) { return size < kPUDSize ? kPMDSize : kPUDSize; } // Clion, clang analyzer, etc can falsely believe that "if (kIsDebugBuild)" always diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc index aba20c6df9..884d712c22 100644 --- a/libartbase/base/mem_map.cc +++ b/libartbase/base/mem_map.cc @@ -56,6 +56,13 @@ using AllocationTrackingMultiMap = using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>; +// TODO: Kernels for arm and x86 in both, 32-bit and 64-bit modes use 512 entries per page-table +// page. Find a way to confirm that in userspace. +// Address range covered by 1 Page Middle Directory (PMD) entry in the page table +const size_t kPMDSize = (kPageSize / sizeof(uint64_t)) * kPageSize; +// Address range covered by 1 Page Upper Directory (PUD) entry in the page table +const size_t kPUDSize = (kPageSize / sizeof(uint64_t)) * kPMDSize; + // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()). static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr; @@ -126,7 +133,7 @@ uintptr_t CreateStartPos(uint64_t input) { constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1; // Lowest (usually 12) bits are not used, as aligned by page size. - constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); + const uintptr_t mask = mask_ones & ~(kPageSize - 1); // Mask input data. return (input & mask) + LOW_MEM_START; diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc index d6b869d19a..56dd35d9dc 100644 --- a/libartbase/base/mem_map_test.cc +++ b/libartbase/base/mem_map_test.cc @@ -382,10 +382,10 @@ TEST_F(MemMapTest, MapFile32Bit) { CommonInit(); std::string error_msg; ScratchFile scratch_file; - constexpr size_t kMapSize = kPageSize; - std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]()); - ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize)); - MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize, + const size_t map_size = kPageSize; + std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]()); + ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size)); + MemMap map = MemMap::MapFile(/*byte_count=*/map_size, PROT_READ, MAP_PRIVATE, scratch_file.GetFd(), @@ -395,7 +395,7 @@ TEST_F(MemMapTest, MapFile32Bit) { &error_msg); ASSERT_TRUE(map.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); - ASSERT_EQ(map.Size(), kMapSize); + ASSERT_EQ(map.Size(), map_size); ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32); } #endif @@ -461,15 +461,15 @@ TEST_F(MemMapTest, RemapFileViewAtEnd) { ScratchFile scratch_file; // Create a scratch file 3 pages large. - constexpr size_t kMapSize = 3 * kPageSize; - std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]()); + const size_t map_size = 3 * kPageSize; + std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]()); memset(data.get(), 1, kPageSize); memset(&data[0], 0x55, kPageSize); memset(&data[kPageSize], 0x5a, kPageSize); memset(&data[2 * kPageSize], 0xaa, kPageSize); - ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize)); + ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size)); - MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize, + MemMap map = MemMap::MapFile(/*byte_count=*/map_size, PROT_READ, MAP_PRIVATE, scratch_file.GetFd(), @@ -479,7 +479,7 @@ TEST_F(MemMapTest, RemapFileViewAtEnd) { &error_msg); ASSERT_TRUE(map.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); - ASSERT_EQ(map.Size(), kMapSize); + ASSERT_EQ(map.Size(), map_size); ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32); ASSERT_EQ(data[0], *map.Begin()); ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize)); @@ -773,12 +773,12 @@ TEST_F(MemMapTest, Reservation) { CommonInit(); std::string error_msg; ScratchFile scratch_file; - constexpr size_t kMapSize = 5 * kPageSize; - std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]()); - ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize)); + const size_t map_size = 5 * kPageSize; + std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]()); + ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size)); MemMap reservation = MemMap::MapAnonymous("Test reservation", - kMapSize, + map_size, PROT_NONE, /*low_4gb=*/ false, &error_msg); @@ -786,11 +786,11 @@ TEST_F(MemMapTest, Reservation) { ASSERT_TRUE(error_msg.empty()); // Map first part of the reservation. - constexpr size_t kChunk1Size = kPageSize - 1u; - ASSERT_LT(kChunk1Size, kMapSize) << "We want to split the reservation."; + const size_t chunk1_size = kPageSize - 1u; + ASSERT_LT(chunk1_size, map_size) << "We want to split the reservation."; uint8_t* addr1 = reservation.Begin(); MemMap map1 = MemMap::MapFileAtAddress(addr1, - /*byte_count=*/ kChunk1Size, + /*byte_count=*/ chunk1_size, PROT_READ, MAP_PRIVATE, scratch_file.GetFd(), @@ -802,7 +802,7 @@ TEST_F(MemMapTest, Reservation) { &error_msg); ASSERT_TRUE(map1.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); - ASSERT_EQ(map1.Size(), kChunk1Size); + ASSERT_EQ(map1.Size(), chunk1_size); ASSERT_EQ(addr1, map1.Begin()); ASSERT_TRUE(reservation.IsValid()); // Entire pages are taken from the `reservation`. @@ -810,12 +810,12 @@ TEST_F(MemMapTest, Reservation) { ASSERT_EQ(map1.BaseEnd(), reservation.Begin()); // Map second part as an anonymous mapping. - constexpr size_t kChunk2Size = 2 * kPageSize; - DCHECK_LT(kChunk2Size, reservation.Size()); // We want to split the reservation. + const size_t chunk2_size = 2 * kPageSize; + DCHECK_LT(chunk2_size, reservation.Size()); // We want to split the reservation. uint8_t* addr2 = reservation.Begin(); MemMap map2 = MemMap::MapAnonymous("MiddleReservation", addr2, - /*byte_count=*/ kChunk2Size, + /*byte_count=*/ chunk2_size, PROT_READ, /*low_4gb=*/ false, /*reuse=*/ false, @@ -823,16 +823,16 @@ TEST_F(MemMapTest, Reservation) { &error_msg); ASSERT_TRUE(map2.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); - ASSERT_EQ(map2.Size(), kChunk2Size); + ASSERT_EQ(map2.Size(), chunk2_size); ASSERT_EQ(addr2, map2.Begin()); - ASSERT_EQ(map2.End(), map2.BaseEnd()); // kChunk2Size is page aligned. + ASSERT_EQ(map2.End(), map2.BaseEnd()); // chunk2_size is page aligned. ASSERT_EQ(map2.BaseEnd(), reservation.Begin()); // Map the rest of the reservation except the last byte. - const size_t kChunk3Size = reservation.Size() - 1u; + const size_t chunk3_size = reservation.Size() - 1u; uint8_t* addr3 = reservation.Begin(); MemMap map3 = MemMap::MapFileAtAddress(addr3, - /*byte_count=*/ kChunk3Size, + /*byte_count=*/ chunk3_size, PROT_READ, MAP_PRIVATE, scratch_file.GetFd(), @@ -844,30 +844,30 @@ TEST_F(MemMapTest, Reservation) { &error_msg); ASSERT_TRUE(map3.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); - ASSERT_EQ(map3.Size(), kChunk3Size); + ASSERT_EQ(map3.Size(), chunk3_size); ASSERT_EQ(addr3, map3.Begin()); // Entire pages are taken from the `reservation`, so it's now exhausted. ASSERT_FALSE(reservation.IsValid()); // Now split the MiddleReservation. - constexpr size_t kChunk2ASize = kPageSize - 1u; - DCHECK_LT(kChunk2ASize, map2.Size()); // We want to split the reservation. - MemMap map2a = map2.TakeReservedMemory(kChunk2ASize); + const size_t chunk2a_size = kPageSize - 1u; + DCHECK_LT(chunk2a_size, map2.Size()); // We want to split the reservation. + MemMap map2a = map2.TakeReservedMemory(chunk2a_size); ASSERT_TRUE(map2a.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); - ASSERT_EQ(map2a.Size(), kChunk2ASize); + ASSERT_EQ(map2a.Size(), chunk2a_size); ASSERT_EQ(addr2, map2a.Begin()); ASSERT_TRUE(map2.IsValid()); ASSERT_LT(map2a.End(), map2a.BaseEnd()); ASSERT_EQ(map2a.BaseEnd(), map2.Begin()); // And take the rest of the middle reservation. - const size_t kChunk2BSize = map2.Size() - 1u; + const size_t chunk2b_size = map2.Size() - 1u; uint8_t* addr2b = map2.Begin(); - MemMap map2b = map2.TakeReservedMemory(kChunk2BSize); + MemMap map2b = map2.TakeReservedMemory(chunk2b_size); ASSERT_TRUE(map2b.IsValid()) << error_msg; ASSERT_TRUE(error_msg.empty()); - ASSERT_EQ(map2b.Size(), kChunk2ASize); + ASSERT_EQ(map2b.Size(), chunk2a_size); ASSERT_EQ(addr2b, map2b.Begin()); ASSERT_FALSE(map2.IsValid()); } diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc index a955b7d402..eef3045e0b 100644 --- a/libartbase/base/unix_file/fd_file.cc +++ b/libartbase/base/unix_file/fd_file.cc @@ -498,8 +498,8 @@ bool FdFile::Copy(FdFile* input_file, int64_t offset, int64_t size) { if (lseek(input_file->Fd(), off, SEEK_SET) != off) { return false; } - constexpr size_t kMaxBufferSize = 4 * ::art::kPageSize; - const size_t buffer_size = std::min<uint64_t>(size, kMaxBufferSize); + const size_t max_buffer_size = 4 * ::art::kPageSize; + const size_t buffer_size = std::min<uint64_t>(size, max_buffer_size); art::UniqueCPtr<void> buffer(malloc(buffer_size)); if (buffer == nullptr) { errno = ENOMEM; diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 4e006b5ed9..18fa7b5970 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -135,7 +135,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, for (size_t i = 0; i < kMarkStackPoolSize; ++i) { accounting::AtomicStack<mirror::Object>* mark_stack = accounting::AtomicStack<mirror::Object>::Create( - "thread local mark stack", kMarkStackSize, kMarkStackSize); + "thread local mark stack", GetMarkStackSize(), GetMarkStackSize()); pooled_mark_stacks_.push_back(mark_stack); } } diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index c488faa425..4887bd91b1 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -375,7 +375,10 @@ class ConcurrentCopying : public GarbageCollector { Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::vector<accounting::ObjectStack*> revoked_mark_stacks_ GUARDED_BY(mark_stack_lock_); - static constexpr size_t kMarkStackSize = kPageSize; + // Size of thread local mark stack. + static size_t GetMarkStackSize() { + return kPageSize; + } static constexpr size_t kMarkStackPoolSize = 256; std::vector<accounting::ObjectStack*> pooled_mark_stacks_ GUARDED_BY(mark_stack_lock_); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index ee77283fb2..6e08a8380b 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -624,7 +624,7 @@ Heap::Heap(size_t initial_size, const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin(); non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map), "zygote / non moving space", - kDefaultStartingSize, + GetDefaultStartingSize(), initial_size, size, size, @@ -895,7 +895,7 @@ space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map, // Create rosalloc space. malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map), name, - kDefaultStartingSize, + GetDefaultStartingSize(), initial_size, growth_limit, capacity, @@ -904,7 +904,7 @@ space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map, } else { malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map), name, - kDefaultStartingSize, + GetDefaultStartingSize(), initial_size, growth_limit, capacity, diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 0f8e65e57c..866e95de5b 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -137,7 +137,6 @@ class Heap { static constexpr size_t kPartialTlabSize = 16 * KB; static constexpr bool kUsePartialTlabs = true; - static constexpr size_t kDefaultStartingSize = kPageSize; static constexpr size_t kDefaultInitialSize = 2 * MB; static constexpr size_t kDefaultMaximumSize = 256 * MB; static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB; @@ -187,6 +186,12 @@ class Heap { // How often we allow heap trimming to happen (nanoseconds). static constexpr uint64_t kHeapTrimWait = MsToNs(5000); + + // Starting size of DlMalloc/RosAlloc spaces. + static size_t GetDefaultStartingSize() { + return kPageSize; + } + // Whether the transition-GC heap threshold condition applies or not for non-low memory devices. // Stressing GC will bypass the heap threshold condition. DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition); diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 7eb1dab537..ea567f5d29 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -55,14 +55,14 @@ class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace { size_t* usable_size, size_t* bytes_tl_bulk_allocated) override { mirror::Object* obj = - LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated, + LargeObjectMapSpace::Alloc(self, num_bytes + MemoryToolRedZoneBytes() * 2, bytes_allocated, usable_size, bytes_tl_bulk_allocated); mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>( - reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes); - MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes); + reinterpret_cast<uintptr_t>(obj) + MemoryToolRedZoneBytes()); + MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), MemoryToolRedZoneBytes()); MEMORY_TOOL_MAKE_NOACCESS( reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes, - kMemoryToolRedZoneBytes); + MemoryToolRedZoneBytes()); if (usable_size != nullptr) { *usable_size = num_bytes; // Since we have redzones, shrink the usable size. } @@ -88,17 +88,19 @@ class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace { } private: + static size_t MemoryToolRedZoneBytes() { + return kPageSize; + } + static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) { return reinterpret_cast<const mirror::Object*>( - reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes); + reinterpret_cast<uintptr_t>(obj) - MemoryToolRedZoneBytes()); } static mirror::Object* ObjectWithRedzone(mirror::Object* obj) { return reinterpret_cast<mirror::Object*>( - reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes); + reinterpret_cast<uintptr_t>(obj) - MemoryToolRedZoneBytes()); } - - static constexpr size_t kMemoryToolRedZoneBytes = kPageSize; }; void LargeObjectSpace::SwapBitmaps() { diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index 80430bd26b..86a5d3a794 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -152,7 +152,7 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, // Note: making this value large means that large allocations are unlikely to succeed as rosalloc // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the // size of the large allocation) will be greater than the footprint limit. - size_t starting_size = Heap::kDefaultStartingSize; + size_t starting_size = Heap::GetDefaultStartingSize(); MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity); if (!mem_map.IsValid()) { LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size " diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 697ea94a85..2474399f59 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -1101,7 +1101,7 @@ bool JitCodeCache::ShouldDoFullCollection() { if (IsAtMaxCapacity()) { // Always do a full collection when the code cache is full. return true; - } else if (private_region_.GetCurrentCapacity() < kReservedCapacity) { + } else if (private_region_.GetCurrentCapacity() < GetReservedCapacity()) { // Always do partial collection when the code cache size is below the reserved // capacity. return false; diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 8f408b8d91..96db8aeba8 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -181,15 +181,21 @@ class ZygoteMap { class JitCodeCache { public: static constexpr size_t kMaxCapacity = 64 * MB; - // Put the default to a very low amount for debug builds to stress the code cache - // collection. It should be at least two pages, however, as the storage is split - // into data and code sections with sizes that should be aligned to page size each - // as that's the unit mspaces use. See also: JitMemoryRegion::Initialize. - static constexpr size_t kInitialCapacity = std::max(kIsDebugBuild ? 8 * KB : 64 * KB, - 2 * kPageSize); + // Default initial capacity of the JIT code cache. + static size_t GetInitialCapacity() { + // Put the default to a very low amount for debug builds to stress the code cache + // collection. It should be at least two pages, however, as the storage is split + // into data and code sections with sizes that should be aligned to page size each + // as that's the unit mspaces use. See also: JitMemoryRegion::Initialize. + return std::max(kIsDebugBuild ? 8 * KB : 64 * KB, 2 * kPageSize); + } + + // Reserved capacity of the JIT code cache. // By default, do not GC until reaching four times the initial capacity. - static constexpr size_t kReservedCapacity = kInitialCapacity * 4; + static size_t GetReservedCapacity() { + return GetInitialCapacity() * 4; + } // Create the code cache with a code + data capacity equal to "capacity", error message is passed // in the out arg error_msg. diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc index 68359f22ce..f701c71347 100644 --- a/runtime/jni/local_reference_table.cc +++ b/runtime/jni/local_reference_table.cc @@ -40,6 +40,9 @@ namespace jni { static constexpr bool kDumpStackOnNonLocalReference = false; static constexpr bool kDebugLRT = false; +// Number of free lists in the allocator. +static const size_t kNumLrtSlots = WhichPowerOf2(kPageSize / kInitialLrtBytes); + // Mmap an "indirect ref table region. Table_bytes is a multiple of a page size. static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) { return MemMap::MapAnonymous("local ref table", @@ -50,7 +53,7 @@ static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) { } SmallLrtAllocator::SmallLrtAllocator() - : free_lists_(kNumSlots, nullptr), + : free_lists_(kNumLrtSlots, nullptr), shared_lrt_maps_(), lock_("Small LRT allocator lock", LockLevel::kGenericBottomLock) { } @@ -60,7 +63,7 @@ inline size_t SmallLrtAllocator::GetIndex(size_t size) { DCHECK_LT(size, kPageSize / sizeof(LrtEntry)); DCHECK(IsPowerOfTwo(size)); size_t index = WhichPowerOf2(size / kSmallLrtEntries); - DCHECK_LT(index, kNumSlots); + DCHECK_LT(index, kNumLrtSlots); return index; } @@ -68,11 +71,11 @@ LrtEntry* SmallLrtAllocator::Allocate(size_t size, std::string* error_msg) { size_t index = GetIndex(size); MutexLock lock(Thread::Current(), lock_); size_t fill_from = index; - while (fill_from != kNumSlots && free_lists_[fill_from] == nullptr) { + while (fill_from != kNumLrtSlots && free_lists_[fill_from] == nullptr) { ++fill_from; } void* result = nullptr; - if (fill_from != kNumSlots) { + if (fill_from != kNumLrtSlots) { // We found a slot with enough memory. result = free_lists_[fill_from]; free_lists_[fill_from] = *reinterpret_cast<void**>(result); @@ -101,13 +104,13 @@ LrtEntry* SmallLrtAllocator::Allocate(size_t size, std::string* error_msg) { void SmallLrtAllocator::Deallocate(LrtEntry* unneeded, size_t size) { size_t index = GetIndex(size); MutexLock lock(Thread::Current(), lock_); - while (index < kNumSlots) { + while (index < kNumLrtSlots) { // Check if we can merge this free block with another block with the same size. void** other = reinterpret_cast<void**>( reinterpret_cast<uintptr_t>(unneeded) ^ (kInitialLrtBytes << index)); void** before = &free_lists_[index]; - if (index + 1u == kNumSlots && *before == other && *other == nullptr) { - // Do not unmap the page if we do not have other free blocks with index `kNumSlots - 1`. + if (index + 1u == kNumLrtSlots && *before == other && *other == nullptr) { + // Do not unmap the page if we do not have other free blocks with index `kNumLrtSlots - 1`. // (Keep at least one free block to avoid a situation where creating and destroying a single // thread with no local references would map and unmap a page in the `SmallLrtAllocator`.) break; @@ -125,9 +128,9 @@ void SmallLrtAllocator::Deallocate(LrtEntry* unneeded, size_t size) { unneeded = reinterpret_cast<LrtEntry*>( reinterpret_cast<uintptr_t>(unneeded) & reinterpret_cast<uintptr_t>(other)); } - if (index == kNumSlots) { + if (index == kNumLrtSlots) { // Free the entire page. - DCHECK(free_lists_[kNumSlots - 1u] != nullptr); + DCHECK(free_lists_[kNumLrtSlots - 1u] != nullptr); auto match = [=](MemMap& map) { return unneeded == reinterpret_cast<LrtEntry*>(map.Begin()); }; auto it = std::find_if(shared_lrt_maps_.begin(), shared_lrt_maps_.end(), match); DCHECK(it != shared_lrt_maps_.end()); @@ -614,9 +617,9 @@ void LocalReferenceTable::Trim() { PrunePoppedFreeEntries([&](size_t index) { return GetEntry(index); }); } // Small tables can hold as many entries as the next table. - constexpr size_t kSmallTablesCapacity = GetTableSize(MaxSmallTables()); + const size_t small_tables_capacity = GetTableSize(MaxSmallTables()); size_t mem_map_index = 0u; - if (top_index > kSmallTablesCapacity) { + if (top_index > small_tables_capacity) { const size_t table_size = TruncToPowerOfTwo(top_index); const size_t table_index = NumTablesForSize(table_size); const size_t start_index = top_index - table_size; diff --git a/runtime/jni/local_reference_table.h b/runtime/jni/local_reference_table.h index 8fdc317f6a..8dce271ff2 100644 --- a/runtime/jni/local_reference_table.h +++ b/runtime/jni/local_reference_table.h @@ -227,8 +227,6 @@ class SmallLrtAllocator { void Deallocate(LrtEntry* unneeded, size_t size) REQUIRES(!lock_); private: - static constexpr size_t kNumSlots = WhichPowerOf2(kPageSize / kInitialLrtBytes); - static size_t GetIndex(size_t size); // Free lists of small chunks linked through the first word. @@ -401,7 +399,7 @@ class LocalReferenceTable { return 1u + WhichPowerOf2(size / kSmallLrtEntries); } - static constexpr size_t MaxSmallTables() { + static size_t MaxSmallTables() { return NumTablesForSize(kPageSize / sizeof(LrtEntry)); } diff --git a/runtime/jni/local_reference_table_test.cc b/runtime/jni/local_reference_table_test.cc index 32c6d48115..abf87158af 100644 --- a/runtime/jni/local_reference_table_test.cc +++ b/runtime/jni/local_reference_table_test.cc @@ -830,9 +830,9 @@ TEST_F(LocalReferenceTableTest, RegressionTestB276864369) { // Add refs to fill all small tables and one bigger table. const LRTSegmentState cookie0 = kLRTFirstSegment; - constexpr size_t kRefsPerPage = kPageSize / sizeof(LrtEntry); + const size_t refs_per_page = kPageSize / sizeof(LrtEntry); std::vector<IndirectRef> refs; - for (size_t i = 0; i != 2 * kRefsPerPage; ++i) { + for (size_t i = 0; i != 2 * refs_per_page; ++i) { refs.push_back(lrt.Add(cookie0, c, &error_msg)); ASSERT_TRUE(refs.back() != nullptr); } @@ -854,9 +854,9 @@ TEST_F(LocalReferenceTableTest, Trim) { // Add refs to fill all small tables. const LRTSegmentState cookie0 = kLRTFirstSegment; - constexpr size_t kRefsPerPage = kPageSize / sizeof(LrtEntry); + const size_t refs_per_page = kPageSize / sizeof(LrtEntry); std::vector<IndirectRef> refs0; - for (size_t i = 0; i != kRefsPerPage; ++i) { + for (size_t i = 0; i != refs_per_page; ++i) { refs0.push_back(lrt.Add(cookie0, c, &error_msg)); ASSERT_TRUE(refs0.back() != nullptr); } @@ -868,7 +868,7 @@ TEST_F(LocalReferenceTableTest, Trim) { // Add refs to fill the next, page-sized table. std::vector<IndirectRef> refs1; LRTSegmentState cookie1 = lrt.GetSegmentState(); - for (size_t i = 0; i != kRefsPerPage; ++i) { + for (size_t i = 0; i != refs_per_page; ++i) { refs1.push_back(lrt.Add(cookie1, c, &error_msg)); ASSERT_TRUE(refs1.back() != nullptr); } @@ -893,7 +893,7 @@ TEST_F(LocalReferenceTableTest, Trim) { // Add refs to fill the page-sized table and half of the next one. cookie1 = lrt.GetSegmentState(); // Push a new segment. - for (size_t i = 0; i != 2 * kRefsPerPage; ++i) { + for (size_t i = 0; i != 2 * refs_per_page; ++i) { refs1.push_back(lrt.Add(cookie1, c, &error_msg)); ASSERT_TRUE(refs1.back() != nullptr); } @@ -901,7 +901,7 @@ TEST_F(LocalReferenceTableTest, Trim) { // Add refs to fill the other half of the table with two pages. std::vector<IndirectRef> refs2; const LRTSegmentState cookie2 = lrt.GetSegmentState(); - for (size_t i = 0; i != kRefsPerPage; ++i) { + for (size_t i = 0; i != refs_per_page; ++i) { refs2.push_back(lrt.Add(cookie2, c, &error_msg)); ASSERT_TRUE(refs2.back() != nullptr); } @@ -938,12 +938,12 @@ TEST_F(LocalReferenceTableTest, Trim) { refs0.clear(); // Fill small tables and one more reference, then another segment up to 4 pages. - for (size_t i = 0; i != kRefsPerPage + 1u; ++i) { + for (size_t i = 0; i != refs_per_page + 1u; ++i) { refs0.push_back(lrt.Add(cookie0, c, &error_msg)); ASSERT_TRUE(refs0.back() != nullptr); } cookie1 = lrt.GetSegmentState(); // Push a new segment. - for (size_t i = 0; i != 3u * kRefsPerPage - 1u; ++i) { + for (size_t i = 0; i != 3u * refs_per_page - 1u; ++i) { refs1.push_back(lrt.Add(cookie1, c, &error_msg)); ASSERT_TRUE(refs1.back() != nullptr); } @@ -959,11 +959,11 @@ TEST_F(LocalReferenceTableTest, Trim) { ASSERT_FALSE(IndirectReferenceTable::ClearIndirectRefKind<LrtEntry*>(ref)->IsNull()); } ASSERT_EQ(refs0.size(), lrt.Capacity()); - for (IndirectRef ref : ArrayRef<IndirectRef>(refs1).SubArray(0u, kRefsPerPage - 1u)) { + for (IndirectRef ref : ArrayRef<IndirectRef>(refs1).SubArray(0u, refs_per_page - 1u)) { // Popped but not trimmed as these are at the same page as the last entry in `refs0`. ASSERT_FALSE(IndirectReferenceTable::ClearIndirectRefKind<LrtEntry*>(ref)->IsNull()); } - for (IndirectRef ref : ArrayRef<IndirectRef>(refs1).SubArray(kRefsPerPage - 1u)) { + for (IndirectRef ref : ArrayRef<IndirectRef>(refs1).SubArray(refs_per_page - 1u)) { ASSERT_TRUE(IndirectReferenceTable::ClearIndirectRefKind<LrtEntry*>(ref)->IsNull()); } } @@ -978,9 +978,9 @@ TEST_F(LocalReferenceTableTest, PruneBeforeTrim) { // Add refs to fill all small tables and one bigger table. const LRTSegmentState cookie0 = kLRTFirstSegment; - constexpr size_t kRefsPerPage = kPageSize / sizeof(LrtEntry); + const size_t refs_per_page = kPageSize / sizeof(LrtEntry); std::vector<IndirectRef> refs; - for (size_t i = 0; i != 2 * kRefsPerPage; ++i) { + for (size_t i = 0; i != 2 * refs_per_page; ++i) { refs.push_back(lrt.Add(cookie0, c, &error_msg)); ASSERT_TRUE(refs.back() != nullptr); } @@ -996,10 +996,10 @@ TEST_F(LocalReferenceTableTest, PruneBeforeTrim) { // Pop the entire segment and trim. Small tables are not pruned. lrt.SetSegmentState(cookie0); lrt.Trim(); - for (IndirectRef ref : ArrayRef<IndirectRef>(refs).SubArray(0u, kRefsPerPage)) { + for (IndirectRef ref : ArrayRef<IndirectRef>(refs).SubArray(0u, refs_per_page)) { ASSERT_FALSE(IndirectReferenceTable::ClearIndirectRefKind<LrtEntry*>(ref)->IsNull()); } - for (IndirectRef ref : ArrayRef<IndirectRef>(refs).SubArray(kRefsPerPage)) { + for (IndirectRef ref : ArrayRef<IndirectRef>(refs).SubArray(refs_per_page)) { ASSERT_TRUE(IndirectReferenceTable::ClearIndirectRefKind<LrtEntry*>(ref)->IsNull()); } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 54fe1c4176..06bf34234f 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1525,10 +1525,10 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { // Note: Don't request an error message. That will lead to a maps dump in the case of failure, // leading to logspam. { - constexpr uintptr_t kSentinelAddr = + const uintptr_t sentinel_addr = RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize); protected_fault_page_ = MemMap::MapAnonymous("Sentinel fault page", - reinterpret_cast<uint8_t*>(kSentinelAddr), + reinterpret_cast<uint8_t*>(sentinel_addr), kPageSize, PROT_NONE, /*low_4gb=*/ true, @@ -1537,7 +1537,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { /*error_msg=*/ nullptr); if (!protected_fault_page_.IsValid()) { LOG(WARNING) << "Could not reserve sentinel fault page"; - } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) { + } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != sentinel_addr) { LOG(WARNING) << "Could not reserve sentinel fault page at the right address."; protected_fault_page_.Reset(); } diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index d4e074273f..8dcba19113 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -98,7 +98,7 @@ RUNTIME_OPTIONS_KEY (unsigned int, JITPriorityThreadWeight) RUNTIME_OPTIONS_KEY (unsigned int, JITInvokeTransitionWeight) RUNTIME_OPTIONS_KEY (int, JITPoolThreadPthreadPriority, jit::kJitPoolThreadPthreadDefaultPriority) RUNTIME_OPTIONS_KEY (int, JITZygotePoolThreadPthreadPriority, jit::kJitZygotePoolThreadPthreadDefaultPriority) -RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity) +RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::GetInitialCapacity()) RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity) RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \ HSpaceCompactForOOMMinIntervalsMs,\ diff --git a/runtime/thread.cc b/runtime/thread.cc index 59d3da9395..1e34986814 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -153,7 +153,7 @@ static constexpr size_t kSuspendTimeDuringFlip = 5'000; // of the stack (lowest memory). The higher portion of the memory // is protected against reads and the lower is available for use while // throwing the StackOverflow exception. -constexpr size_t kStackOverflowProtectedSize = kMemoryToolStackGuardSizeScale * kPageSize; +static const size_t kStackOverflowProtectedSize = kMemoryToolStackGuardSizeScale * kPageSize; static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; @@ -1363,8 +1363,8 @@ bool Thread::InitStackHwm() { // // On systems with 4K page size, typically the minimum stack size will be 4+8+4 = 16K. // The thread won't be able to do much with this stack: even the GC takes between 8K and 12K. - DCHECK_ALIGNED(kStackOverflowProtectedSize, kPageSize); - uint32_t min_stack = kStackOverflowProtectedSize + + DCHECK_ALIGNED_PARAM(kStackOverflowProtectedSize, kPageSize); + size_t min_stack = kStackOverflowProtectedSize + RoundUp(GetStackOverflowReservedBytes(kRuntimeISA) + 4 * KB, kPageSize); if (read_stack_size <= min_stack) { // Note, as we know the stack is small, avoid operations that could use a lot of stack. |