summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Ruben Ayrapetyan <ruben.ayrapetyan@arm.com> 2023-09-28 18:01:33 +0100
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2023-11-30 00:02:56 +0000
commit54417d2c7e5254f8941119f8f16476c1a45e028a (patch)
tree1f4b82cbaa3df28f62624cefdf7cdf57b6370959
parent565b3b67de4b3b781b1c97aa86164c1bd604cd3d (diff)
Use globals naming scheme for kPageSize etc.
This patch is part of the chain preparing for making kPageSize non-constexpr in a future patch. The following values aren't always constexpr anymore - in some configurations these are global dynamically initialized constants. Rename them accordingly: - kPageSize to gPageSize; - kPMDSize to gPMDSize; - kPUDSize to gPUDSize; - kNumLrtSlots to gNumLrtSlots; - kStackOverflowProtectedSize to gStackOverflowProtectedSize. Also fix the typo in the kMininumMapSize identifier. Test: Same as for I5430741a8494b340ed7fd2d8692c41a59ad9c530. The whole patches chain was tested as a whole. Change-Id: Ic8502aa66d75d2bbba698282a1eaf1a029b02d3a
-rw-r--r--compiler/common_compiler_test.cc4
-rw-r--r--compiler/jit/jit_logger.cc4
-rw-r--r--dex2oat/common_compiler_driver_test.cc4
-rw-r--r--dex2oat/linker/elf_writer_test.cc2
-rw-r--r--dex2oat/utils/swap_space.cc6
-rw-r--r--dexlayout/dex_visualize.cc8
-rw-r--r--dexlayout/dexdiag.cc12
-rw-r--r--imgdiag/imgdiag.cc34
-rw-r--r--imgdiag/page_info.cc10
-rw-r--r--libartbase/arch/instruction_set.cc2
-rw-r--r--libartbase/base/bit_memory_region.h2
-rw-r--r--libartbase/base/globals.h8
-rw-r--r--libartbase/base/mem_map.cc106
-rw-r--r--libartbase/base/mem_map_test.cc122
-rw-r--r--libartbase/base/unix_file/fd_file.cc2
-rw-r--r--libartbase/base/utils.cc10
-rw-r--r--libdexfile/dex/code_item_accessors_test.cc2
-rw-r--r--libelffile/elf/xz_utils.cc2
-rw-r--r--runtime/base/gc_visited_arena_pool.cc20
-rw-r--r--runtime/base/gc_visited_arena_pool.h30
-rw-r--r--runtime/base/mem_map_arena_pool.cc2
-rw-r--r--runtime/debugger.cc2
-rw-r--r--runtime/gc/accounting/bitmap.cc2
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc6
-rw-r--r--runtime/gc/allocator/art-dlmalloc.cc4
-rw-r--r--runtime/gc/allocator/rosalloc.cc190
-rw-r--r--runtime/gc/allocator/rosalloc.h18
-rw-r--r--runtime/gc/collector/concurrent_copying.cc10
-rw-r--r--runtime/gc/collector/concurrent_copying.h2
-rw-r--r--runtime/gc/collector/garbage_collector.cc6
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc4
-rw-r--r--runtime/gc/collector/mark_compact.cc256
-rw-r--r--runtime/gc/collector/mark_compact.h6
-rw-r--r--runtime/gc/collector/mark_sweep.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc16
-rw-r--r--runtime/gc/heap.cc10
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/gc/space/bump_pointer_space.cc2
-rw-r--r--runtime/gc/space/dlmalloc_space.cc2
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/gc/space/large_object_space.cc2
-rw-r--r--runtime/gc/space/malloc_space.cc16
-rw-r--r--runtime/gc/space/region_space.cc4
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/indirect_reference_table.cc8
-rw-r--r--runtime/jit/jit.cc24
-rw-r--r--runtime/jit/jit_code_cache.h2
-rw-r--r--runtime/jit/jit_memory_region.cc8
-rw-r--r--runtime/jit/jit_memory_region_test.cc82
-rw-r--r--runtime/jni/jni_internal_test.cc60
-rw-r--r--runtime/jni/local_reference_table.cc30
-rw-r--r--runtime/jni/local_reference_table.h2
-rw-r--r--runtime/jni/local_reference_table_test.cc10
-rw-r--r--runtime/oat_file.cc2
-rw-r--r--runtime/runtime.cc10
-rw-r--r--runtime/runtime_callbacks_test.cc2
-rw-r--r--runtime/runtime_globals.h2
-rw-r--r--runtime/thread.cc42
-rw-r--r--runtime/thread_android.cc4
-rw-r--r--runtime/thread_pool.cc6
-rw-r--r--test/305-other-fault-handler/fault_handler.cc2
61 files changed, 628 insertions, 628 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 676f591c28..206020fa82 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -57,9 +57,9 @@ class CommonCompilerTestImpl::CodeAndMetadata {
: sizeof(OatQuickMethodHeader) + vmap_table.size();
OatQuickMethodHeader method_header(vmap_table_offset);
const size_t code_alignment = GetInstructionSetCodeAlignment(instruction_set);
- DCHECK_ALIGNED_PARAM(kPageSize, code_alignment);
+ DCHECK_ALIGNED_PARAM(gPageSize, code_alignment);
const uint32_t code_offset = RoundUp(vmap_table.size() + sizeof(method_header), code_alignment);
- const uint32_t capacity = RoundUp(code_offset + code_size, kPageSize);
+ const uint32_t capacity = RoundUp(code_offset + code_size, gPageSize);
// Create a memfd handle with sufficient capacity.
android::base::unique_fd mem_fd(art::memfd_create_compat("test code", /*flags=*/ 0));
diff --git a/compiler/jit/jit_logger.cc b/compiler/jit/jit_logger.cc
index 32845260f3..c50103df9c 100644
--- a/compiler/jit/jit_logger.cc
+++ b/compiler/jit/jit_logger.cc
@@ -211,7 +211,7 @@ void JitLogger::OpenMarkerFile() {
int fd = jit_dump_file_->Fd();
// The 'perf inject' tool requires that the jit-PID.dump file
// must have a mmap(PROT_READ|PROT_EXEC) record in perf.data.
- marker_address_ = mmap(nullptr, kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
+ marker_address_ = mmap(nullptr, gPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
if (marker_address_ == MAP_FAILED) {
LOG(WARNING) << "Failed to create record in perf.data. JITed code profiling will not work.";
return;
@@ -220,7 +220,7 @@ void JitLogger::OpenMarkerFile() {
void JitLogger::CloseMarkerFile() {
if (marker_address_ != nullptr) {
- munmap(marker_address_, kPageSize);
+ munmap(marker_address_, gPageSize);
}
}
diff --git a/dex2oat/common_compiler_driver_test.cc b/dex2oat/common_compiler_driver_test.cc
index 7a1d11c298..d47982f54a 100644
--- a/dex2oat/common_compiler_driver_test.cc
+++ b/dex2oat/common_compiler_driver_test.cc
@@ -111,13 +111,13 @@ void CommonCompilerDriverTest::SetUp() {
// Note: We cannot use MemMap because some tests tear down the Runtime and destroy
// the gMaps, so when destroying the MemMap, the test would crash.
- inaccessible_page_ = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ inaccessible_page_ = mmap(nullptr, gPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
CHECK(inaccessible_page_ != MAP_FAILED) << strerror(errno);
}
void CommonCompilerDriverTest::TearDown() {
if (inaccessible_page_ != nullptr) {
- munmap(inaccessible_page_, kPageSize);
+ munmap(inaccessible_page_, gPageSize);
inaccessible_page_ = nullptr;
}
image_reservation_.Reset();
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index 6fa5f6602d..b01ebf59e3 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -99,7 +99,7 @@ TEST_F(ElfWriterTest, dlsym) {
bool success = ef->GetLoadedSize(&size, &error_msg);
CHECK(success) << error_msg;
MemMap reservation = MemMap::MapAnonymous("ElfWriterTest#dlsym reservation",
- RoundUp(size, kPageSize),
+ RoundUp(size, gPageSize),
PROT_NONE,
/*low_4gb=*/ true,
&error_msg);
diff --git a/dex2oat/utils/swap_space.cc b/dex2oat/utils/swap_space.cc
index 6e0773bba4..336aad8de5 100644
--- a/dex2oat/utils/swap_space.cc
+++ b/dex2oat/utils/swap_space.cc
@@ -29,7 +29,7 @@
namespace art {
// The chunk size by which the swap file is increased and mapped.
-static constexpr size_t kMininumMapSize = 16 * MB;
+static constexpr size_t kMinimumMapSize = 16 * MB;
static constexpr bool kCheckFreeMaps = false;
@@ -146,7 +146,7 @@ void* SwapSpace::Alloc(size_t size) {
SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
#if !defined(__APPLE__)
- size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize));
+ size_t next_part = std::max(RoundUp(min_size, gPageSize), RoundUp(kMinimumMapSize, gPageSize));
int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
if (result != 0) {
PLOG(FATAL) << "Unable to increase swap file.";
@@ -165,7 +165,7 @@ SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
SpaceChunk new_chunk = {ptr, next_part};
return new_chunk;
#else
- UNUSED(min_size, kMininumMapSize);
+ UNUSED(min_size, kMinimumMapSize);
LOG(FATAL) << "No swap file support on the Mac.";
UNREACHABLE();
#endif
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index 382e294d12..f47f27c848 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -70,7 +70,7 @@ class Dumper {
if (printed_one) {
fprintf(out_file_, ", ");
}
- fprintf(out_file_, "\"%s\" %" PRIuPTR, s.name.c_str(), s.offset / kPageSize);
+ fprintf(out_file_, "\"%s\" %" PRIuPTR, s.name.c_str(), s.offset / gPageSize);
printed_one = true;
}
}
@@ -98,8 +98,8 @@ class Dumper {
}
void DumpAddressRange(uint32_t from, uint32_t size, int class_index) {
- const uint32_t low_page = from / kPageSize;
- const uint32_t high_page = (size > 0) ? (from + size - 1) / kPageSize : low_page;
+ const uint32_t low_page = from / gPageSize;
+ const uint32_t high_page = (size > 0) ? (from + size - 1) / gPageSize : low_page;
const uint32_t size_delta = high_page - low_page;
fprintf(out_file_, "%d %d %d 0 %d\n", low_page, class_index, size_delta, GetColor(from));
}
@@ -336,7 +336,7 @@ void ShowDexSectionStatistics(dex_ir::Header* header, size_t dex_file_index) {
file_section.offset,
file_section.size,
bytes,
- RoundUp(bytes, kPageSize) / kPageSize,
+ RoundUp(bytes, gPageSize) / gPageSize,
100 * bytes / header->FileSize());
}
fprintf(stdout, "\n");
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index 9aa0353345..21bb1c47ee 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -186,7 +186,7 @@ static char PageTypeChar(uint16_t type) {
static uint16_t FindSectionTypeForPage(size_t page,
const std::vector<dex_ir::DexFileSection>& sections) {
for (const auto& section : sections) {
- size_t first_page_of_section = section.offset / kPageSize;
+ size_t first_page_of_section = section.offset / gPageSize;
// Only consider non-empty sections.
if (section.size == 0) {
continue;
@@ -287,14 +287,14 @@ static void ProcessOneDexMapping(const std::vector<uint64_t>& pagemap,
<< std::endl;
return;
}
- uint64_t start_page = (dex_file_start - vdex_start) / kPageSize;
- uint64_t start_address = start_page * kPageSize;
- uint64_t end_page = RoundUp(start_address + dex_file_size, kPageSize) / kPageSize;
+ uint64_t start_page = (dex_file_start - vdex_start) / gPageSize;
+ uint64_t start_address = start_page * gPageSize;
+ uint64_t end_page = RoundUp(start_address + dex_file_size, gPageSize) / gPageSize;
std::cout << "DEX "
<< dex_file->GetLocation().c_str()
<< StringPrintf(": %" PRIx64 "-%" PRIx64,
- map_start + start_page * kPageSize,
- map_start + end_page * kPageSize)
+ map_start + start_page * gPageSize,
+ map_start + end_page * gPageSize)
<< std::endl;
// Build a list of the dex file section types, sorted from highest offset to lowest.
std::vector<dex_ir::DexFileSection> sections;
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 1fba35493c..1510a04e6f 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -229,9 +229,9 @@ size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
// Print all pages the entry belongs to
void PrintEntryPages(uintptr_t entry_address, size_t entry_size, std::ostream& os) {
const char* tabs = " ";
- const uintptr_t first_page_idx = entry_address / kPageSize;
+ const uintptr_t first_page_idx = entry_address / gPageSize;
const uintptr_t last_page_idx = RoundUp(entry_address + entry_size,
- kObjectAlignment) / kPageSize;
+ kObjectAlignment) / gPageSize;
for (uintptr_t page_idx = first_page_idx; page_idx <= last_page_idx; ++page_idx) {
os << tabs << "page_idx=" << page_idx << "\n";
}
@@ -298,13 +298,13 @@ struct RegionCommon {
uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
// Iterate every page this entry belongs to
do {
- current_page_idx = entry_address / kPageSize + page_off;
+ current_page_idx = entry_address / gPageSize + page_off;
if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
// This entry is on a dirty page
return true;
}
page_off++;
- } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
+ } while ((current_page_idx * gPageSize) < RoundUp(entry_address + size, kObjectAlignment));
return false;
}
@@ -1155,7 +1155,7 @@ class RegionData : public RegionSpecializedBase<T> {
// Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
// TODO: fix this now that there are multiple regions in a mapping.
float true_dirtied_percent =
- RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
+ RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * gPageSize);
// Entry specific statistics.
os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n "
@@ -1413,7 +1413,7 @@ class ImgDiagDumper {
MappingData* mapping_data /*out*/,
std::string* error_msg /*out*/) {
// Iterate through one page at a time. Boot map begin/end already implicitly aligned.
- for (uintptr_t begin = boot_map.start; begin != boot_map.end; begin += kPageSize) {
+ for (uintptr_t begin = boot_map.start; begin != boot_map.end; begin += gPageSize) {
const ptrdiff_t offset = begin - boot_map.start;
// We treat the image header as part of the memory map for now
@@ -1422,11 +1422,11 @@ class ImgDiagDumper {
const uint8_t* zygote_ptr = &zygote_contents[offset];
const uint8_t* remote_ptr = &remote_contents[offset];
- if (memcmp(zygote_ptr, remote_ptr, kPageSize) != 0) {
+ if (memcmp(zygote_ptr, remote_ptr, gPageSize) != 0) {
mapping_data->different_pages++;
// Count the number of 32-bit integers that are different.
- for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
+ for (size_t i = 0; i < gPageSize / sizeof(uint32_t); ++i) {
const uint32_t* remote_ptr_int32 = reinterpret_cast<const uint32_t*>(remote_ptr);
const uint32_t* zygote_ptr_int32 = reinterpret_cast<const uint32_t*>(zygote_ptr);
@@ -1435,7 +1435,7 @@ class ImgDiagDumper {
}
}
// Count the number of bytes that are different.
- for (size_t i = 0; i < kPageSize; ++i) {
+ for (size_t i = 0; i < gPageSize; ++i) {
if (remote_ptr[i] != zygote_ptr[i]) {
mapping_data->different_bytes++;
}
@@ -1443,11 +1443,11 @@ class ImgDiagDumper {
}
}
- for (uintptr_t begin = boot_map.start; begin != boot_map.end; begin += kPageSize) {
+ for (uintptr_t begin = boot_map.start; begin != boot_map.end; begin += gPageSize) {
ptrdiff_t offset = begin - boot_map.start;
// Virtual page number (for an absolute memory address)
- size_t virtual_page_idx = begin / kPageSize;
+ size_t virtual_page_idx = begin / gPageSize;
uint64_t page_count = 0xC0FFEE;
// TODO: virtual_page_idx needs to be from the same process
@@ -1555,7 +1555,7 @@ class ImgDiagDumper {
// Adjust the `end` of the mapping. Some other mappings may have been
// inserted within the image.
- boot_map.end = RoundUp(boot_map.start + image_header.GetImageSize(), kPageSize);
+ boot_map.end = RoundUp(boot_map.start + image_header.GetImageSize(), gPageSize);
// The size of the boot image mapping.
size_t boot_map_size = boot_map.end - boot_map.start;
@@ -1569,7 +1569,7 @@ class ImgDiagDumper {
android::procinfo::MapInfo& zygote_boot_map = *maybe_zygote_boot_map;
// Adjust the `end` of the mapping. Some other mappings may have been
// inserted within the image.
- zygote_boot_map.end = RoundUp(zygote_boot_map.start + image_header.GetImageSize(), kPageSize);
+ zygote_boot_map.end = RoundUp(zygote_boot_map.start + image_header.GetImageSize(), gPageSize);
if (zygote_boot_map.start != boot_map.start) {
os << "Zygote boot map does not match image boot map: "
<< "zygote begin " << reinterpret_cast<const void*>(zygote_boot_map.start)
@@ -1589,8 +1589,8 @@ class ImgDiagDumper {
const uint8_t* image_end_unaligned = image_begin_unaligned + image_header.GetImageSize();
// Adjust range to nearest page
- const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
- const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
+ const uint8_t* image_begin = AlignDown(image_begin_unaligned, gPageSize);
+ const uint8_t* image_end = AlignUp(image_end_unaligned, gPageSize);
size_t image_size = image_end - image_begin;
if (image_size != boot_map_size) {
@@ -1603,8 +1603,8 @@ class ImgDiagDumper {
auto read_contents = [&](File* mem_file,
/*out*/ MemMap* map,
/*out*/ ArrayRef<uint8_t>* contents) {
- DCHECK_ALIGNED_PARAM(boot_map.start, kPageSize);
- DCHECK_ALIGNED_PARAM(boot_map_size, kPageSize);
+ DCHECK_ALIGNED_PARAM(boot_map.start, gPageSize);
+ DCHECK_ALIGNED_PARAM(boot_map_size, gPageSize);
std::string name = "Contents of " + mem_file->GetPath();
std::string local_error_msg;
// We need to use low 4 GiB memory so that we can walk the objects using standard
diff --git a/imgdiag/page_info.cc b/imgdiag/page_info.cc
index 8f4be090d5..628cc60b26 100644
--- a/imgdiag/page_info.cc
+++ b/imgdiag/page_info.cc
@@ -83,7 +83,7 @@ bool OpenProcFiles(pid_t pid, /*out*/ ProcFiles& files, /*out*/ std::string& err
}
void DumpPageInfo(uint64_t virtual_page_index, ProcFiles& proc_files, std::ostream& os) {
- const uint64_t virtual_page_addr = virtual_page_index * kPageSize;
+ const uint64_t virtual_page_addr = virtual_page_index * gPageSize;
os << "Virtual page index: " << virtual_page_index << "\n";
os << "Virtual page addr: " << virtual_page_addr << "\n";
@@ -117,7 +117,7 @@ void DumpPageInfo(uint64_t virtual_page_index, ProcFiles& proc_files, std::ostre
os << "kpageflags: " << page_flags << "\n";
if (page_count != 0) {
- std::vector<uint8_t> page_contents(kPageSize);
+ std::vector<uint8_t> page_contents(gPageSize);
if (!proc_files.mem.PreadFully(page_contents.data(), page_contents.size(), virtual_page_addr)) {
os << "Failed to read page contents\n";
return;
@@ -154,9 +154,9 @@ bool GetMapPageCounts(ProcFiles& proc_files,
map_page_counts.name = map_info.name;
map_page_counts.start = map_info.start;
map_page_counts.end = map_info.end;
- std::vector<uint8_t> page_contents(kPageSize);
- for (uint64_t begin = map_info.start; begin < map_info.end; begin += kPageSize) {
- const size_t virtual_page_index = begin / kPageSize;
+ std::vector<uint8_t> page_contents(gPageSize);
+ for (uint64_t begin = map_info.start; begin < map_info.end; begin += gPageSize) {
+ const size_t virtual_page_index = begin / gPageSize;
uint64_t page_frame_number = -1;
if (!GetPageFrameNumber(proc_files.pagemap, virtual_page_index, page_frame_number, error_msg)) {
return false;
diff --git a/libartbase/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc
index e0de4e8091..7d969e776f 100644
--- a/libartbase/arch/instruction_set.cc
+++ b/libartbase/arch/instruction_set.cc
@@ -133,7 +133,7 @@ static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
#error "ART frame size limit missing"
#endif
-// TODO: Should we require an extra page (RoundUp(SIZE) + kPageSize)?
+// TODO: Should we require an extra page (RoundUp(SIZE) + gPageSize)?
static_assert(ART_FRAME_SIZE_LIMIT < kArmStackOverflowReservedBytes, "Frame size limit too large");
static_assert(ART_FRAME_SIZE_LIMIT < kArm64StackOverflowReservedBytes,
"Frame size limit too large");
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index f3dbd63d17..e4f19c15ec 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -33,7 +33,7 @@ class BitMemoryRegion final : public ValueObject {
BitMemoryRegion() = default;
ALWAYS_INLINE BitMemoryRegion(uint8_t* data, ssize_t bit_start, size_t bit_size) {
// Normalize the data pointer. Note that bit_start may be negative.
- data_ = AlignDown(data + (bit_start >> kBitsPerByteLog2), kPageSize);
+ data_ = AlignDown(data + (bit_start >> kBitsPerByteLog2), gPageSize);
bit_start_ = bit_start + kBitsPerByte * (data - data_);
bit_size_ = bit_size;
}
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 3fb923ccf5..e2323f4747 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -36,7 +36,7 @@ static constexpr size_t kStackAlignment = 16;
// System page size. We check this against sysconf(_SC_PAGE_SIZE) at runtime, but use a simple
// compile-time constant so the compiler can generate better code.
-static constexpr size_t kPageSize = 4096;
+static constexpr size_t gPageSize = 4096;
// Minimum supported page size.
static constexpr size_t kMinPageSize = 4096;
@@ -58,15 +58,15 @@ static constexpr size_t kMaxPageSize = kMinPageSize;
static constexpr size_t kElfSegmentAlignment = kMaxPageSize;
// Address range covered by 1 Page Middle Directory (PMD) entry in the page table
-extern const size_t kPMDSize;
+extern const size_t gPMDSize;
// Address range covered by 1 Page Upper Directory (PUD) entry in the page table
-extern const size_t kPUDSize;
+extern const size_t gPUDSize;
// Returns the ideal alignment corresponding to page-table levels for the
// given size.
static inline size_t BestPageTableAlignment(size_t size) {
- return size < kPUDSize ? kPMDSize : kPUDSize;
+ return size < gPUDSize ? gPMDSize : gPUDSize;
}
// Clion, clang analyzer, etc can falsely believe that "if (kIsDebugBuild)" always
// returns the same value. By wrapping into a call to another constexpr function, we force it
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 884d712c22..9bfcea7592 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -59,9 +59,9 @@ using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
// TODO: Kernels for arm and x86 in both, 32-bit and 64-bit modes use 512 entries per page-table
// page. Find a way to confirm that in userspace.
// Address range covered by 1 Page Middle Directory (PMD) entry in the page table
-const size_t kPMDSize = (kPageSize / sizeof(uint64_t)) * kPageSize;
+const size_t gPMDSize = (gPageSize / sizeof(uint64_t)) * gPageSize;
// Address range covered by 1 Page Upper Directory (PUD) entry in the page table
-const size_t kPUDSize = (kPageSize / sizeof(uint64_t)) * kPMDSize;
+const size_t gPUDSize = (gPageSize / sizeof(uint64_t)) * gPMDSize;
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
@@ -110,7 +110,7 @@ static constexpr uintptr_t LOW_MEM_START = 64 * KB;
// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX
// ----------------------------------------
// = 0000111111111111111
-// & ~(kPageSize - 1) =~0000000000000001111
+// & ~(gPageSize - 1) =~0000000000000001111
// ----------------------------------------
// mask = 0000111111111110000
// & random data = YYYYYYYYYYYYYYYYYYY
@@ -133,7 +133,7 @@ uintptr_t CreateStartPos(uint64_t input) {
constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
// Lowest (usually 12) bits are not used, as aligned by page size.
- const uintptr_t mask = mask_ones & ~(kPageSize - 1);
+ const uintptr_t mask = mask_ones & ~(gPageSize - 1);
// Mask input data.
return (input & mask) + LOW_MEM_START;
@@ -243,7 +243,7 @@ bool MemMap::CheckReservation(uint8_t* expected_ptr,
*error_msg = StringPrintf("Invalid reservation for %s", name);
return false;
}
- DCHECK_ALIGNED_PARAM(reservation.Begin(), kPageSize);
+ DCHECK_ALIGNED_PARAM(reservation.Begin(), gPageSize);
if (reservation.Begin() != expected_ptr) {
*error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
name,
@@ -324,7 +324,7 @@ MemMap MemMap::MapAnonymous(const char* name,
*error_msg = "Empty MemMap requested.";
return Invalid();
}
- size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
+ size_t page_aligned_byte_count = RoundUp(byte_count, gPageSize);
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
if (reuse) {
@@ -402,25 +402,25 @@ MemMap MemMap::MapAnonymousAligned(const char* name,
DCHECK(IsPowerOfTwo(alignment));
#ifdef ART_PAGE_SIZE_AGNOSTIC
- // In page size agnostic configuration, the kPageSize is not known
+ // In page size agnostic configuration, the gPageSize is not known
// statically, so this interface has to support the case when alignment
// requested is greater than minimum page size however lower or equal to
// the actual page size.
DCHECK_GT(alignment, kMinPageSize);
- if (alignment <= kPageSize) {
+ if (alignment <= gPageSize) {
return MapAnonymous(name, byte_count, prot, low_4gb, error_msg);
}
#else
- DCHECK_GT(alignment, kPageSize);
+ DCHECK_GT(alignment, gPageSize);
#endif
- // Allocate extra 'alignment - kPageSize' bytes so that the mapping can be aligned.
+ // Allocate extra 'alignment - gPageSize' bytes so that the mapping can be aligned.
MemMap ret = MapAnonymous(name,
/*addr=*/nullptr,
// AlignBy requires the size to be page-aligned, so
// rounding it here. It is corrected afterwards with
// SetSize after AlignBy.
- RoundUp(byte_count, kPageSize) + alignment - kPageSize,
+ RoundUp(byte_count, gPageSize) + alignment - gPageSize,
prot,
low_4gb,
/*reuse=*/false,
@@ -439,7 +439,7 @@ MemMap MemMap::MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count
if (byte_count == 0) {
return Invalid();
}
- const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
+ const size_t page_aligned_byte_count = RoundUp(byte_count, gPageSize);
return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
}
@@ -566,10 +566,10 @@ MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
return Invalid();
}
// Adjust 'offset' to be page-aligned as required by mmap.
- int page_offset = start % kPageSize;
+ int page_offset = start % gPageSize;
off_t page_aligned_offset = start - page_offset;
// Adjust 'byte_count' to be page-aligned as we will map this anyway.
- size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
+ size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, gPageSize);
// The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
// not necessarily to virtual memory. mmap will page align 'expected' for us.
uint8_t* page_aligned_expected =
@@ -577,7 +577,7 @@ MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
size_t redzone_size = 0;
if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
- redzone_size = kPageSize;
+ redzone_size = gPageSize;
page_aligned_byte_count += redzone_size;
}
@@ -772,10 +772,10 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end,
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
- DCHECK_ALIGNED_PARAM(begin_, kPageSize);
- DCHECK_ALIGNED_PARAM(base_begin_, kPageSize);
- DCHECK_ALIGNED_PARAM(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
- DCHECK_ALIGNED_PARAM(new_end, kPageSize);
+ DCHECK_ALIGNED_PARAM(begin_, gPageSize);
+ DCHECK_ALIGNED_PARAM(base_begin_, gPageSize);
+ DCHECK_ALIGNED_PARAM(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, gPageSize);
+ DCHECK_ALIGNED_PARAM(new_end, gPageSize);
uint8_t* old_end = begin_ + size_;
uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
uint8_t* new_base_end = new_end;
@@ -790,7 +790,7 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end,
uint8_t* tail_base_begin = new_base_end;
size_t tail_base_size = old_base_end - new_base_end;
DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
- DCHECK_ALIGNED_PARAM(tail_base_size, kPageSize);
+ DCHECK_ALIGNED_PARAM(tail_base_size, gPageSize);
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
// Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
@@ -829,7 +829,7 @@ MemMap MemMap::RemapAtEnd(uint8_t* new_end,
MemMap MemMap::TakeReservedMemory(size_t byte_count, bool reuse) {
uint8_t* begin = Begin();
ReleaseReservedMemory(byte_count); // Performs necessary DCHECK()s on this reservation.
- size_t base_size = RoundUp(byte_count, kPageSize);
+ size_t base_size = RoundUp(byte_count, gPageSize);
return MemMap(name_, begin, byte_count, begin, base_size, prot_, reuse);
}
@@ -841,13 +841,13 @@ void MemMap::ReleaseReservedMemory(size_t byte_count) {
DCHECK_EQ(redzone_size_, 0u);
DCHECK_EQ(begin_, base_begin_);
DCHECK_EQ(size_, base_size_);
- DCHECK_ALIGNED_PARAM(begin_, kPageSize);
- DCHECK_ALIGNED_PARAM(size_, kPageSize);
+ DCHECK_ALIGNED_PARAM(begin_, gPageSize);
+ DCHECK_ALIGNED_PARAM(size_, gPageSize);
// Check and round up the `byte_count`.
DCHECK_NE(byte_count, 0u);
DCHECK_LE(byte_count, size_);
- byte_count = RoundUp(byte_count, kPageSize);
+ byte_count = RoundUp(byte_count, gPageSize);
if (byte_count == size_) {
Invalidate();
@@ -962,7 +962,7 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
size_t num_gaps = 0;
size_t num = 1u;
size_t size = map->BaseSize();
- CHECK_ALIGNED_PARAM(size, kPageSize);
+ CHECK_ALIGNED_PARAM(size, gPageSize);
void* end = map->BaseEnd();
while (it != maps_end &&
it->second->GetProtect() == map->GetProtect() &&
@@ -970,24 +970,24 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
(it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
if (it->second->BaseBegin() != end) {
++num_gaps;
- os << "+0x" << std::hex << (size / kPageSize) << "P";
+ os << "+0x" << std::hex << (size / gPageSize) << "P";
if (num != 1u) {
os << "(" << std::dec << num << ")";
}
size_t gap =
reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
- CHECK_ALIGNED_PARAM(gap, kPageSize);
- os << "~0x" << std::hex << (gap / kPageSize) << "P";
+ CHECK_ALIGNED_PARAM(gap, gPageSize);
+ os << "~0x" << std::hex << (gap / gPageSize) << "P";
num = 0u;
size = 0u;
}
- CHECK_ALIGNED_PARAM(it->second->BaseSize(), kPageSize);
+ CHECK_ALIGNED_PARAM(it->second->BaseSize(), gPageSize);
++num;
size += it->second->BaseSize();
end = it->second->BaseEnd();
++it;
}
- os << "+0x" << std::hex << (size / kPageSize) << "P";
+ os << "+0x" << std::hex << (size / gPageSize) << "P";
if (num != 1u) {
os << "(" << std::dec << num << ")";
}
@@ -1028,8 +1028,8 @@ void MemMap::Init() {
return;
}
- CHECK_GE(kPageSize, kMinPageSize);
- CHECK_LE(kPageSize, kMaxPageSize);
+ CHECK_GE(gPageSize, kMinPageSize);
+ CHECK_LE(gPageSize, kMaxPageSize);
mem_maps_lock_ = new std::mutex();
// Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
@@ -1061,7 +1061,7 @@ void MemMap::Shutdown() {
void MemMap::SetSize(size_t new_size) {
CHECK_LE(new_size, size_);
size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
- kPageSize);
+ gPageSize);
if (new_base_size == base_size_) {
size_ = new_size;
return;
@@ -1090,7 +1090,7 @@ void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
bool first_run = true;
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
- for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
+ for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += gPageSize) {
// Use gMaps as an optimization to skip over large maps.
// Find the first map which is address > ptr.
auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
@@ -1099,7 +1099,7 @@ void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
--before_it;
// Start at the end of the map before the upper bound.
ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
- CHECK_ALIGNED_PARAM(ptr, kPageSize);
+ CHECK_ALIGNED_PARAM(ptr, gPageSize);
}
while (it != gMaps->end()) {
// How much space do we have until the next map?
@@ -1110,7 +1110,7 @@ void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
}
// Otherwise, skip to the end of the map.
ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
- CHECK_ALIGNED_PARAM(ptr, kPageSize);
+ CHECK_ALIGNED_PARAM(ptr, gPageSize);
++it;
}
@@ -1125,7 +1125,7 @@ void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
// Not enough memory until 4GB.
if (first_run) {
// Try another time from the bottom;
- ptr = LOW_MEM_START - kPageSize;
+ ptr = LOW_MEM_START - gPageSize;
first_run = false;
continue;
} else {
@@ -1138,8 +1138,8 @@ void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
// Check pages are free.
bool safe = true;
- for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
- if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
+ for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += gPageSize) {
+ if (msync(reinterpret_cast<void*>(tail_ptr), gPageSize, 0) == 0) {
safe = false;
break;
} else {
@@ -1195,7 +1195,7 @@ void* MemMap::MapInternal(void* addr,
#else
UNUSED(low_4gb);
#endif
- DCHECK_ALIGNED_PARAM(length, kPageSize);
+ DCHECK_ALIGNED_PARAM(length, gPageSize);
// TODO:
// A page allocator would be a useful abstraction here, as
// 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
@@ -1254,11 +1254,11 @@ void MemMap::TryReadable() {
CHECK_NE(prot_ & PROT_READ, 0);
volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
volatile uint8_t* end = begin + base_size_;
- DCHECK(IsAlignedParam(begin, kPageSize));
- DCHECK(IsAlignedParam(end, kPageSize));
+ DCHECK(IsAlignedParam(begin, gPageSize));
+ DCHECK(IsAlignedParam(end, gPageSize));
// Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
// reads.
- for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
+ for (volatile uint8_t* ptr = begin; ptr < end; ptr += gPageSize) {
// This read could fault if protection wasn't set correctly.
uint8_t value = *ptr;
UNUSED(value);
@@ -1271,8 +1271,8 @@ static void inline RawClearMemory(uint8_t* begin, uint8_t* end) {
#if defined(__linux__)
static inline void ClearMemory(uint8_t* page_begin, size_t size, bool resident) {
- DCHECK(IsAlignedParam(page_begin, kPageSize));
- DCHECK(IsAlignedParam(page_begin + size, kPageSize));
+ DCHECK(IsAlignedParam(page_begin, gPageSize));
+ DCHECK(IsAlignedParam(page_begin + size, gPageSize));
if (resident) {
RawClearMemory(page_begin, page_begin + size);
// Note we check madvise return value against -1, as it seems old kernels
@@ -1294,8 +1294,8 @@ void ZeroMemory(void* address, size_t length, bool release_eagerly) {
}
uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
uint8_t* const mem_end = mem_begin + length;
- uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
- uint8_t* const page_end = AlignDown(mem_end, kPageSize);
+ uint8_t* const page_begin = AlignUp(mem_begin, gPageSize);
+ uint8_t* const page_end = AlignDown(mem_end, gPageSize);
if (!kMadviseZeroes || page_begin >= page_end) {
// No possible area to madvise.
RawClearMemory(mem_begin, mem_end);
@@ -1315,20 +1315,20 @@ void ZeroMemory(void* address, size_t length, bool release_eagerly) {
// mincore() is linux-specific syscall.
#if defined(__linux__)
if (!release_eagerly) {
- size_t vec_len = (page_end - page_begin) / kPageSize;
+ size_t vec_len = (page_end - page_begin) / gPageSize;
std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
if (mincore(page_begin, page_end - page_begin, vec.get()) == 0) {
uint8_t* current_page = page_begin;
- size_t current_size = kPageSize;
+ size_t current_size = gPageSize;
uint32_t old_state = vec[0] & 0x1;
for (size_t i = 1; i < vec_len; ++i) {
uint32_t new_state = vec[i] & 0x1;
if (old_state == new_state) {
- current_size += kPageSize;
+ current_size += gPageSize;
} else {
ClearMemory(current_page, current_size, old_state);
current_page = current_page + current_size;
- current_size = kPageSize;
+ current_size = gPageSize;
old_state = new_state;
}
}
@@ -1353,8 +1353,8 @@ void ZeroMemory(void* address, size_t length, bool release_eagerly) {
void MemMap::AlignBy(size_t alignment, bool align_both_ends) {
CHECK_EQ(begin_, base_begin_) << "Unsupported";
CHECK_EQ(size_, base_size_) << "Unsupported";
- CHECK_GT(alignment, static_cast<size_t>(kPageSize));
- CHECK_ALIGNED_PARAM(alignment, kPageSize);
+ CHECK_GT(alignment, static_cast<size_t>(gPageSize));
+ CHECK_ALIGNED_PARAM(alignment, gPageSize);
CHECK(!reuse_);
if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), alignment) &&
(!align_both_ends || IsAlignedParam(base_size_, alignment))) {
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index 56dd35d9dc..742836d517 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -64,7 +64,7 @@ class MemMapTest : public CommonArtTest {
static void RemapAtEndTest(bool low_4gb) {
std::string error_msg;
// Cast the page size to size_t.
- const size_t page_size = static_cast<size_t>(kPageSize);
+ const size_t page_size = static_cast<size_t>(gPageSize);
// Map a two-page memory region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
2 * page_size,
@@ -146,7 +146,7 @@ TEST_F(MemMapTest, Start) {
// Test a couple of values. Make sure they are different.
uintptr_t last = 0;
for (size_t i = 0; i < 100; ++i) {
- uintptr_t random_start = CreateStartPos(i * kPageSize);
+ uintptr_t random_start = CreateStartPos(i * gPageSize);
EXPECT_NE(last, random_start);
last = random_start;
}
@@ -163,13 +163,13 @@ TEST_F(MemMapTest, Start) {
TEST_F(MemMapTest, ReplaceMapping_SameSize) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- kPageSize,
+ gPageSize,
PROT_READ,
/*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- kPageSize,
+ gPageSize,
PROT_WRITE | PROT_READ,
/*low_4gb=*/ false,
&error_msg);
@@ -179,7 +179,7 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) {
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- std::vector<uint8_t> data = RandomData(kPageSize);
+ std::vector<uint8_t> data = RandomData(gPageSize);
memcpy(source.Begin(), data.data(), data.size());
ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
@@ -188,7 +188,7 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) {
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_FALSE(source.IsValid());
- ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(gPageSize));
ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
}
@@ -196,7 +196,7 @@ TEST_F(MemMapTest, ReplaceMapping_SameSize) {
TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- 5 * kPageSize, // Need to make it larger
+ 5 * gPageSize, // Need to make it larger
// initially so we know
// there won't be mappings
// in the way when we move
@@ -206,7 +206,7 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- 3 * kPageSize,
+ 3 * gPageSize,
PROT_WRITE | PROT_READ,
/*low_4gb=*/ false,
&error_msg);
@@ -216,22 +216,22 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
ASSERT_TRUE(IsAddressMapped(source_addr));
// Fill the source with random data.
- std::vector<uint8_t> data = RandomData(3 * kPageSize);
+ std::vector<uint8_t> data = RandomData(3 * gPageSize);
memcpy(source.Begin(), data.data(), data.size());
// Make the dest smaller so that we know we'll have space.
- dest.SetSize(kPageSize);
+ dest.SetSize(gPageSize);
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * gPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(gPageSize));
ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
- ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * gPageSize));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * gPageSize));
ASSERT_FALSE(source.IsValid());
ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
@@ -240,13 +240,13 @@ TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- 3 * kPageSize,
+ 3 * gPageSize,
PROT_READ,
/*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- kPageSize,
+ gPageSize,
PROT_WRITE | PROT_READ,
/*low_4gb=*/ false,
&error_msg);
@@ -255,18 +255,18 @@ TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * gPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * gPageSize));
- std::vector<uint8_t> data = RandomData(kPageSize);
- memcpy(source.Begin(), data.data(), kPageSize);
+ std::vector<uint8_t> data = RandomData(gPageSize);
+ memcpy(source.Begin(), data.data(), gPageSize);
ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
- ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(gPageSize));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * gPageSize));
ASSERT_FALSE(source.IsValid());
ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
@@ -277,37 +277,37 @@ TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
MemMap dest =
MemMap::MapAnonymous(
"MapAnonymousEmpty-atomic-replace-dest",
- 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
+ 3 * gPageSize, // Need to make it larger initially so we know there won't be mappings in
// the way when we move source.
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
- dest.SetSize(kPageSize);
+ dest.SetSize(gPageSize);
// Create source from the last 2 pages
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- dest.Begin() + kPageSize,
- 2 * kPageSize,
+ dest.Begin() + gPageSize,
+ 2 * gPageSize,
PROT_WRITE | PROT_READ,
/*low_4gb=*/ false,
/*reuse=*/ false,
/*reservation=*/ nullptr,
&error_msg);
ASSERT_TRUE(source.IsValid());
- ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
+ ASSERT_EQ(dest.Begin() + gPageSize, source.Begin());
uint8_t* source_addr = source.Begin();
uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
// Fill the source and dest with random data.
- std::vector<uint8_t> data = RandomData(2 * kPageSize);
+ std::vector<uint8_t> data = RandomData(2 * gPageSize);
memcpy(source.Begin(), data.data(), data.size());
- std::vector<uint8_t> dest_data = RandomData(kPageSize);
+ std::vector<uint8_t> dest_data = RandomData(gPageSize);
memcpy(dest.Begin(), dest_data.data(), dest_data.size());
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(gPageSize));
ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
@@ -334,7 +334,7 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
@@ -346,7 +346,7 @@ TEST_F(MemMapTest, MapAnonymousFailNullError) {
CommonInit();
// Test that we don't crash with a null error_str when mapping at an invalid location.
MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
- reinterpret_cast<uint8_t*>(kPageSize),
+ reinterpret_cast<uint8_t*>(gPageSize),
0x20000,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
@@ -370,7 +370,7 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
&error_msg);
@@ -382,7 +382,7 @@ TEST_F(MemMapTest, MapFile32Bit) {
CommonInit();
std::string error_msg;
ScratchFile scratch_file;
- const size_t map_size = kPageSize;
+ const size_t map_size = gPageSize;
std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
MemMap map = MemMap::MapFile(/*byte_count=*/map_size,
@@ -410,11 +410,11 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
CommonInit();
std::string error_msg;
// Find a valid address.
- uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
+ uint8_t* valid_address = GetValidMapAddress(gPageSize, /*low_4gb=*/false);
// Map at an address that should work, which should succeed.
MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
valid_address,
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
/*reuse=*/ false,
@@ -425,7 +425,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
ASSERT_TRUE(map0.BaseBegin() == valid_address);
// Map at an unspecified address, which should succeed.
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
@@ -435,7 +435,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
// Attempt to map at the same address, which should fail.
MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
reinterpret_cast<uint8_t*>(map1.BaseBegin()),
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
/*reuse=*/ false,
@@ -461,12 +461,12 @@ TEST_F(MemMapTest, RemapFileViewAtEnd) {
ScratchFile scratch_file;
// Create a scratch file 3 pages large.
- const size_t map_size = 3 * kPageSize;
+ const size_t map_size = 3 * gPageSize;
std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
- memset(data.get(), 1, kPageSize);
- memset(&data[0], 0x55, kPageSize);
- memset(&data[kPageSize], 0x5a, kPageSize);
- memset(&data[2 * kPageSize], 0xaa, kPageSize);
+ memset(data.get(), 1, gPageSize);
+ memset(&data[0], 0x55, gPageSize);
+ memset(&data[gPageSize], 0x5a, gPageSize);
+ memset(&data[2 * gPageSize], 0xaa, gPageSize);
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
MemMap map = MemMap::MapFile(/*byte_count=*/map_size,
@@ -482,10 +482,10 @@ TEST_F(MemMapTest, RemapFileViewAtEnd) {
ASSERT_EQ(map.Size(), map_size);
ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
ASSERT_EQ(data[0], *map.Begin());
- ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
- ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
+ ASSERT_EQ(data[gPageSize], *(map.Begin() + gPageSize));
+ ASSERT_EQ(data[2 * gPageSize], *(map.Begin() + 2 * gPageSize));
- for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
+ for (size_t offset = 2 * gPageSize; offset > 0; offset -= gPageSize) {
MemMap tail = map.RemapAtEnd(map.Begin() + offset,
"bad_offset_map",
PROT_READ,
@@ -496,7 +496,7 @@ TEST_F(MemMapTest, RemapFileViewAtEnd) {
ASSERT_TRUE(tail.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_EQ(offset, map.Size());
- ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
+ ASSERT_EQ(static_cast<size_t>(gPageSize), tail.Size());
ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
ASSERT_EQ(data[offset], *tail.Begin());
}
@@ -536,10 +536,10 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
CommonInit();
std::string error_msg;
uintptr_t ptr = 0;
- ptr -= kPageSize; // Now it's close to the top.
+ ptr -= gPageSize; // Now it's close to the top.
MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
reinterpret_cast<uint8_t*>(ptr),
- 2 * kPageSize, // brings it over the top.
+ 2 * gPageSize, // brings it over the top.
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
/*reuse=*/ false,
@@ -556,7 +556,7 @@ TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
MemMap map =
MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
/*reuse=*/ false,
@@ -610,7 +610,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
- kPageSize * kNumPages,
+ gPageSize * kNumPages,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
@@ -621,7 +621,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
// Map at the same address, taking from the `map` reservation.
MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&reservation,
@@ -630,23 +630,23 @@ TEST_F(MemMapTest, CheckNoGaps) {
ASSERT_TRUE(error_msg.empty());
ASSERT_EQ(map_base, map0.Begin());
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(map_base + kPageSize, map1.Begin());
+ ASSERT_EQ(map_base + gPageSize, map1.Begin());
MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
- kPageSize,
+ gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&reservation,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(map_base + 2 * kPageSize, map2.Begin());
+ ASSERT_EQ(map_base + 2 * gPageSize, map2.Begin());
ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used.
// One-map cases.
@@ -670,7 +670,7 @@ TEST_F(MemMapTest, AlignBy) {
CommonInit();
std::string error_msg;
// Cast the page size to size_t.
- const size_t page_size = static_cast<size_t>(kPageSize);
+ const size_t page_size = static_cast<size_t>(gPageSize);
// Map a region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
14 * page_size,
@@ -773,7 +773,7 @@ TEST_F(MemMapTest, Reservation) {
CommonInit();
std::string error_msg;
ScratchFile scratch_file;
- const size_t map_size = 5 * kPageSize;
+ const size_t map_size = 5 * gPageSize;
std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
@@ -786,7 +786,7 @@ TEST_F(MemMapTest, Reservation) {
ASSERT_TRUE(error_msg.empty());
// Map first part of the reservation.
- const size_t chunk1_size = kPageSize - 1u;
+ const size_t chunk1_size = gPageSize - 1u;
ASSERT_LT(chunk1_size, map_size) << "We want to split the reservation.";
uint8_t* addr1 = reservation.Begin();
MemMap map1 = MemMap::MapFileAtAddress(addr1,
@@ -810,7 +810,7 @@ TEST_F(MemMapTest, Reservation) {
ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
// Map second part as an anonymous mapping.
- const size_t chunk2_size = 2 * kPageSize;
+ const size_t chunk2_size = 2 * gPageSize;
DCHECK_LT(chunk2_size, reservation.Size()); // We want to split the reservation.
uint8_t* addr2 = reservation.Begin();
MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
@@ -850,7 +850,7 @@ TEST_F(MemMapTest, Reservation) {
ASSERT_FALSE(reservation.IsValid());
// Now split the MiddleReservation.
- const size_t chunk2a_size = kPageSize - 1u;
+ const size_t chunk2a_size = gPageSize - 1u;
DCHECK_LT(chunk2a_size, map2.Size()); // We want to split the reservation.
MemMap map2a = map2.TakeReservedMemory(chunk2a_size);
ASSERT_TRUE(map2a.IsValid()) << error_msg;
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index eef3045e0b..5dc491138b 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -498,7 +498,7 @@ bool FdFile::Copy(FdFile* input_file, int64_t offset, int64_t size) {
if (lseek(input_file->Fd(), off, SEEK_SET) != off) {
return false;
}
- const size_t max_buffer_size = 4 * ::art::kPageSize;
+ const size_t max_buffer_size = 4 * ::art::gPageSize;
const size_t buffer_size = std::min<uint64_t>(size, max_buffer_size);
art::UniqueCPtr<void> buffer(malloc(buffer_size));
if (buffer == nullptr) {
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index e114e4dbac..75320005fd 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -91,7 +91,7 @@ int CacheFlush(uintptr_t start, uintptr_t limit) {
bool TouchAndFlushCacheLinesWithinPage(uintptr_t start, uintptr_t limit, size_t attempts) {
CHECK_LT(start, limit);
- CHECK_EQ(RoundDown(start, kPageSize), RoundDown(limit - 1, kPageSize)) << "range spans pages";
+ CHECK_EQ(RoundDown(start, gPageSize), RoundDown(limit - 1, gPageSize)) << "range spans pages";
// Declare a volatile variable so the compiler does not elide reads from the page being touched.
[[maybe_unused]] volatile uint8_t v = 0;
for (size_t i = 0; i < attempts; ++i) {
@@ -139,14 +139,14 @@ bool FlushCpuCaches(void* begin, void* end) {
// A rare failure has occurred implying that part of the range (begin, end] has been swapped
// out. Retry flushing but this time grouping cache-line flushes on individual pages and
// touching each page before flushing.
- uintptr_t next_page = RoundUp(start + 1, kPageSize);
+ uintptr_t next_page = RoundUp(start + 1, gPageSize);
while (start < limit) {
uintptr_t boundary = std::min(next_page, limit);
if (!TouchAndFlushCacheLinesWithinPage(start, boundary, kMaxFlushAttempts)) {
return false;
}
start = boundary;
- next_page += kPageSize;
+ next_page += gPageSize;
}
return true;
}
@@ -366,8 +366,8 @@ bool IsAddressKnownBackedByFileOrShared(const void* addr) {
// We use the Linux pagemap interface for knowing if an address is backed
// by a file or is shared. See:
// https://www.kernel.org/doc/Documentation/vm/pagemap.txt
- uintptr_t vmstart = reinterpret_cast<uintptr_t>(AlignDown(addr, kPageSize));
- off_t index = (vmstart / kPageSize) * sizeof(uint64_t);
+ uintptr_t vmstart = reinterpret_cast<uintptr_t>(AlignDown(addr, gPageSize));
+ off_t index = (vmstart / gPageSize) * sizeof(uint64_t);
android::base::unique_fd pagemap(open("/proc/self/pagemap", O_RDONLY | O_CLOEXEC));
if (pagemap == -1) {
return false;
diff --git a/libdexfile/dex/code_item_accessors_test.cc b/libdexfile/dex/code_item_accessors_test.cc
index 036036141b..c545efb2a9 100644
--- a/libdexfile/dex/code_item_accessors_test.cc
+++ b/libdexfile/dex/code_item_accessors_test.cc
@@ -28,7 +28,7 @@ namespace art {
class CodeItemAccessorsTest : public testing::Test {};
std::unique_ptr<const DexFile> CreateFakeDex(bool compact_dex, std::vector<uint8_t>* data) {
- data->resize(kPageSize);
+ data->resize(gPageSize);
if (compact_dex) {
CompactDexFile::Header* header =
const_cast<CompactDexFile::Header*>(CompactDexFile::Header::At(data->data()));
diff --git a/libelffile/elf/xz_utils.cc b/libelffile/elf/xz_utils.cc
index f064cb0e10..0ea216274d 100644
--- a/libelffile/elf/xz_utils.cc
+++ b/libelffile/elf/xz_utils.cc
@@ -109,7 +109,7 @@ void XzDecompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
size_t dst_offset = 0;
ECoderStatus status;
do {
- dst->resize(RoundUp(dst_offset + kPageSize / 4, kPageSize));
+ dst->resize(RoundUp(dst_offset + gPageSize / 4, gPageSize));
size_t src_remaining = src.size() - src_offset;
size_t dst_remaining = dst->size() - dst_offset;
int return_val = XzUnpacker_Code(state.get(),
diff --git a/runtime/base/gc_visited_arena_pool.cc b/runtime/base/gc_visited_arena_pool.cc
index 88180dcc02..82131189dd 100644
--- a/runtime/base/gc_visited_arena_pool.cc
+++ b/runtime/base/gc_visited_arena_pool.cc
@@ -41,16 +41,16 @@ TrackedArena::TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork, bo
// entire arena.
bytes_allocated_ = size;
} else {
- DCHECK_ALIGNED_PARAM(size, kPageSize);
- DCHECK_ALIGNED_PARAM(start, kPageSize);
- size_t arr_size = size / kPageSize;
+ DCHECK_ALIGNED_PARAM(size, gPageSize);
+ DCHECK_ALIGNED_PARAM(start, gPageSize);
+ size_t arr_size = size / gPageSize;
first_obj_array_.reset(new uint8_t*[arr_size]);
std::fill_n(first_obj_array_.get(), arr_size, nullptr);
}
}
void TrackedArena::ReleasePages(uint8_t* begin, size_t size, bool pre_zygote_fork) {
- DCHECK_ALIGNED_PARAM(begin, kPageSize);
+ DCHECK_ALIGNED_PARAM(begin, gPageSize);
// Userfaultfd GC uses MAP_SHARED mappings for linear-alloc and therefore
// MADV_DONTNEED will not free the pages from page cache. Therefore use
// MADV_REMOVE instead, which is meant for this purpose.
@@ -69,7 +69,7 @@ void TrackedArena::Release() {
if (bytes_allocated_ > 0) {
ReleasePages(Begin(), Size(), pre_zygote_fork_);
if (first_obj_array_.get() != nullptr) {
- std::fill_n(first_obj_array_.get(), Size() / kPageSize, nullptr);
+ std::fill_n(first_obj_array_.get(), Size() / gPageSize, nullptr);
}
bytes_allocated_ = 0;
}
@@ -81,15 +81,15 @@ void TrackedArena::SetFirstObject(uint8_t* obj_begin, uint8_t* obj_end) {
DCHECK_LT(static_cast<void*>(obj_begin), static_cast<void*>(obj_end));
GcVisitedArenaPool* arena_pool =
static_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
- size_t idx = static_cast<size_t>(obj_begin - Begin()) / kPageSize;
- size_t last_byte_idx = static_cast<size_t>(obj_end - 1 - Begin()) / kPageSize;
+ size_t idx = static_cast<size_t>(obj_begin - Begin()) / gPageSize;
+ size_t last_byte_idx = static_cast<size_t>(obj_end - 1 - Begin()) / gPageSize;
// Do the update below with arena-pool's lock in shared-mode to serialize with
// the compaction-pause wherein we acquire it exclusively. This is to ensure
// that last-byte read there doesn't change after reading it and before
// userfaultfd registration.
ReaderMutexLock rmu(Thread::Current(), arena_pool->GetLock());
// If the addr is at the beginning of a page, then we set it for that page too.
- if (IsAlignedParam(obj_begin, kPageSize)) {
+ if (IsAlignedParam(obj_begin, gPageSize)) {
first_obj_array_[idx] = obj_begin;
}
while (idx < last_byte_idx) {
@@ -106,7 +106,7 @@ uint8_t* GcVisitedArenaPool::AddMap(size_t min_size) {
}
#endif
size_t alignment = BestPageTableAlignment(size);
- DCHECK_GE(size, kPMDSize);
+ DCHECK_GE(size, gPMDSize);
std::string err_msg;
maps_.emplace_back(MemMap::MapAnonymousAligned(
name_, size, PROT_READ | PROT_WRITE, low_4gb_, alignment, &err_msg));
@@ -218,7 +218,7 @@ void GcVisitedArenaPool::FreeSingleObjArena(uint8_t* addr) {
Arena* GcVisitedArenaPool::AllocArena(size_t size, bool single_obj_arena) {
// Return only page aligned sizes so that madvise can be leveraged.
- size = RoundUp(size, kPageSize);
+ size = RoundUp(size, gPageSize);
if (pre_zygote_fork_) {
// The first fork out of zygote hasn't happened yet. Allocate arena in a
// private-anonymous mapping to retain clean pages across fork.
diff --git a/runtime/base/gc_visited_arena_pool.h b/runtime/base/gc_visited_arena_pool.h
index d4fe4fb8f0..eaf2b6b413 100644
--- a/runtime/base/gc_visited_arena_pool.h
+++ b/runtime/base/gc_visited_arena_pool.h
@@ -44,22 +44,22 @@ class TrackedArena final : public Arena {
void VisitRoots(PageVisitor& visitor) const REQUIRES_SHARED(Locks::mutator_lock_) {
uint8_t* page_begin = Begin();
if (first_obj_array_.get() != nullptr) {
- DCHECK_ALIGNED_PARAM(Size(), kPageSize);
- DCHECK_ALIGNED_PARAM(Begin(), kPageSize);
- for (int i = 0, nr_pages = Size() / kPageSize; i < nr_pages; i++, page_begin += kPageSize) {
+ DCHECK_ALIGNED_PARAM(Size(), gPageSize);
+ DCHECK_ALIGNED_PARAM(Begin(), gPageSize);
+ for (int i = 0, nr_pages = Size() / gPageSize; i < nr_pages; i++, page_begin += gPageSize) {
uint8_t* first = first_obj_array_[i];
if (first != nullptr) {
- visitor(page_begin, first, kPageSize);
+ visitor(page_begin, first, gPageSize);
} else {
break;
}
}
} else {
size_t page_size = Size();
- while (page_size > kPageSize) {
- visitor(page_begin, nullptr, kPageSize);
- page_begin += kPageSize;
- page_size -= kPageSize;
+ while (page_size > gPageSize) {
+ visitor(page_begin, nullptr, gPageSize);
+ page_begin += gPageSize;
+ page_size -= gPageSize;
}
visitor(page_begin, nullptr, page_size);
}
@@ -69,17 +69,17 @@ class TrackedArena final : public Arena {
uint8_t* GetLastUsedByte() const REQUIRES_SHARED(Locks::mutator_lock_) {
// Jump past bytes-allocated for arenas which are not currently being used
// by arena-allocator. This helps in reducing loop iterations below.
- uint8_t* last_byte = AlignUp(Begin() + GetBytesAllocated(), kPageSize);
+ uint8_t* last_byte = AlignUp(Begin() + GetBytesAllocated(), gPageSize);
if (first_obj_array_.get() != nullptr) {
- DCHECK_ALIGNED_PARAM(Begin(), kPageSize);
- DCHECK_ALIGNED_PARAM(End(), kPageSize);
+ DCHECK_ALIGNED_PARAM(Begin(), gPageSize);
+ DCHECK_ALIGNED_PARAM(End(), gPageSize);
DCHECK_LE(last_byte, End());
} else {
DCHECK_EQ(last_byte, End());
}
- for (size_t i = (last_byte - Begin()) / kPageSize;
+ for (size_t i = (last_byte - Begin()) / gPageSize;
last_byte < End() && first_obj_array_[i] != nullptr;
- last_byte += kPageSize, i++) {
+ last_byte += gPageSize, i++) {
// No body.
}
return last_byte;
@@ -89,7 +89,7 @@ class TrackedArena final : public Arena {
DCHECK_LE(Begin(), addr);
DCHECK_GT(End(), addr);
if (first_obj_array_.get() != nullptr) {
- return first_obj_array_[(addr - Begin()) / kPageSize];
+ return first_obj_array_[(addr - Begin()) / gPageSize];
} else {
// The pages of this arena contain array of GC-roots. So we don't need
// first-object of any given page of the arena.
@@ -252,7 +252,7 @@ class GcVisitedArenaPool final : public ArenaPool {
class TrackedArenaHash {
public:
size_t operator()(const TrackedArena* arena) const {
- return std::hash<size_t>{}(reinterpret_cast<uintptr_t>(arena->Begin()) / kPageSize);
+ return std::hash<size_t>{}(reinterpret_cast<uintptr_t>(arena->Begin()) / gPageSize);
}
};
using AllocatedArenaSet =
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index fa56dd7d1c..647115fbf8 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -55,7 +55,7 @@ MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name)
MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
// Round up to a full page as that's the smallest unit of allocation for mmap()
// and we want to be able to use all memory that we actually allocate.
- size = RoundUp(size, kPageSize);
+ size = RoundUp(size, gPageSize);
std::string error_msg;
// TODO(b/278665389): remove this retry logic if the root cause is found.
constexpr int MAX_RETRY_CNT = 3;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a7b818ed99..9b0f49fb9e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -568,7 +568,7 @@ class HeapChunkContext {
// of the use of mmaps, so don't report. If not free memory then start a new segment.
bool flush = true;
if (start > startOfNextMemoryChunk_) {
- const size_t kMaxFreeLen = 2 * kPageSize;
+ const size_t kMaxFreeLen = 2 * gPageSize;
void* free_start = startOfNextMemoryChunk_;
void* free_end = start;
const size_t free_len =
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 4e4109d1ac..5a52a82ab6 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -47,7 +47,7 @@ Bitmap::~Bitmap() {
MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
const size_t bitmap_size = RoundUp(
- RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
+ RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), gPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
bitmap_size,
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 7b55600eee..277f95fb37 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -118,7 +118,7 @@ TEST_F(SpaceBitmapTest, ClearRange) {
for (uintptr_t i = 0; i < range.first; i += kObjectAlignment) {
EXPECT_TRUE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
}
- for (uintptr_t i = range.second; i < range.second + kPageSize; i += kObjectAlignment) {
+ for (uintptr_t i = range.second; i < range.second + gPageSize; i += kObjectAlignment) {
EXPECT_TRUE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
}
// Everything inside should be cleared.
@@ -212,7 +212,7 @@ TEST_F(SpaceBitmapTest, VisitorObjectAlignment) {
}
TEST_F(SpaceBitmapTest, VisitorPageAlignment) {
- RunTestCount(kPageSize);
+ RunTestCount(gPageSize);
}
void RunTestOrder(size_t alignment) {
@@ -248,7 +248,7 @@ TEST_F(SpaceBitmapTest, OrderObjectAlignment) {
}
TEST_F(SpaceBitmapTest, OrderPageAlignment) {
- RunTestOrder(kPageSize);
+ RunTestOrder(gPageSize);
}
} // namespace accounting
diff --git a/runtime/gc/allocator/art-dlmalloc.cc b/runtime/gc/allocator/art-dlmalloc.cc
index 6296acd3d6..62a768db39 100644
--- a/runtime/gc/allocator/art-dlmalloc.cc
+++ b/runtime/gc/allocator/art-dlmalloc.cc
@@ -69,8 +69,8 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte
return;
}
// Do we have any whole pages to give back?
- start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::kPageSize));
- end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::kPageSize));
+ start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::gPageSize));
+ end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::gPageSize));
if (end > start) {
size_t length = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
int rc = madvise(start, length, MADV_DONTNEED);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 93b1498a18..befc3f89c9 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -63,11 +63,11 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
page_release_mode_(page_release_mode),
page_release_size_threshold_(page_release_size_threshold),
is_running_on_memory_tool_(running_on_memory_tool) {
- DCHECK_ALIGNED_PARAM(base, kPageSize);
- DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
- DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
+ DCHECK_ALIGNED_PARAM(base, gPageSize);
+ DCHECK_EQ(RoundUp(capacity, gPageSize), capacity);
+ DCHECK_EQ(RoundUp(max_capacity, gPageSize), max_capacity);
CHECK_LE(capacity, max_capacity);
- CHECK_ALIGNED_PARAM(page_release_size_threshold_, kPageSize);
+ CHECK_ALIGNED_PARAM(page_release_size_threshold_, gPageSize);
// Zero the memory explicitly (don't rely on that the mem map is zero-initialized).
if (!kMadviseZeroes) {
memset(base_, 0, max_capacity);
@@ -88,11 +88,11 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
current_runs_[i] = dedicated_full_run_;
}
DCHECK_EQ(footprint_, capacity_);
- size_t num_of_pages = footprint_ / kPageSize;
- size_t max_num_of_pages = max_capacity_ / kPageSize;
+ size_t num_of_pages = footprint_ / gPageSize;
+ size_t max_num_of_pages = max_capacity_ / gPageSize;
std::string error_msg;
page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
- RoundUp(max_num_of_pages, kPageSize),
+ RoundUp(max_num_of_pages, gPageSize),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
@@ -106,7 +106,7 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
free_pages->magic_num_ = kMagicNumFree;
}
free_pages->SetByteSize(this, capacity_);
- DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(capacity_ % gPageSize, static_cast<size_t>(0));
DCHECK(free_pages->IsFree());
free_pages->ReleasePages(this);
DCHECK(free_pages->IsFree());
@@ -131,13 +131,13 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
lock_.AssertHeld(self);
DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
FreePageRun* res = nullptr;
- const size_t req_byte_size = num_pages * kPageSize;
+ const size_t req_byte_size = num_pages * gPageSize;
// Find the lowest address free page run that's large enough.
for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
FreePageRun* fpr = *it;
DCHECK(fpr->IsFree());
size_t fpr_byte_size = fpr->ByteSize(this);
- DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(fpr_byte_size % gPageSize, static_cast<size_t>(0));
if (req_byte_size <= fpr_byte_size) {
// Found one.
it = free_page_runs_.erase(it);
@@ -154,7 +154,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
remainder->magic_num_ = kMagicNumFree;
}
remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
- DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(remainder->ByteSize(this) % gPageSize, static_cast<size_t>(0));
// Don't need to call madvise on remainder here.
free_page_runs_.insert(remainder);
if (kTraceRosAlloc) {
@@ -163,7 +163,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
<< " into free_page_runs_";
}
fpr->SetByteSize(this, req_byte_size);
- DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(fpr->ByteSize(this) % gPageSize, static_cast<size_t>(0));
}
res = fpr;
break;
@@ -191,9 +191,9 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
// If we grow the heap, we can allocate it.
size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
capacity_ - footprint_);
- DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(increment % gPageSize, static_cast<size_t>(0));
size_t new_footprint = footprint_ + increment;
- size_t new_num_of_pages = new_footprint / kPageSize;
+ size_t new_num_of_pages = new_footprint / gPageSize;
DCHECK_LT(page_map_size_, new_num_of_pages);
DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
page_map_size_ = new_num_of_pages;
@@ -204,7 +204,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
// There was a free page run at the end. Expand its size.
DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
- DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(last_free_page_run->ByteSize(this) % gPageSize, static_cast<size_t>(0));
DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
} else {
// Otherwise, insert a new free page run at the end.
@@ -213,7 +213,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
new_free_page_run->magic_num_ = kMagicNumFree;
}
new_free_page_run->SetByteSize(this, increment);
- DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(new_free_page_run->ByteSize(this) % gPageSize, static_cast<size_t>(0));
free_page_runs_.insert(new_free_page_run);
DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
if (kTraceRosAlloc) {
@@ -238,7 +238,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
DCHECK_EQ(last_free_page_run, fpr);
}
size_t fpr_byte_size = fpr->ByteSize(this);
- DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(fpr_byte_size % gPageSize, static_cast<size_t>(0));
DCHECK_LE(req_byte_size, fpr_byte_size);
free_page_runs_.erase(fpr);
if (kTraceRosAlloc) {
@@ -252,7 +252,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
remainder->magic_num_ = kMagicNumFree;
}
remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
- DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(remainder->ByteSize(this) % gPageSize, static_cast<size_t>(0));
free_page_runs_.insert(remainder);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
@@ -260,7 +260,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
<< " into free_page_runs_";
}
fpr->SetByteSize(this, req_byte_size);
- DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(fpr->ByteSize(this) % gPageSize, static_cast<size_t>(0));
}
res = fpr;
}
@@ -290,12 +290,12 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
}
if (kIsDebugBuild) {
// Clear the first page since it is not madvised due to the magic number.
- memset(res, 0, kPageSize);
+ memset(res, 0, gPageSize);
}
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
- << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
- << "(" << std::dec << (num_pages * kPageSize) << ")";
+ << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * gPageSize)
+ << "(" << std::dec << (num_pages * gPageSize) << ")";
}
return res;
}
@@ -337,7 +337,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
num_pages++;
idx++;
}
- const size_t byte_size = num_pages * kPageSize;
+ const size_t byte_size = num_pages * gPageSize;
if (already_zero) {
if (ShouldCheckZeroMemory()) {
const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
@@ -352,7 +352,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
if (kTraceRosAlloc) {
LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
<< "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
- << "(" << std::dec << (num_pages * kPageSize) << ")";
+ << "(" << std::dec << (num_pages * gPageSize) << ")";
}
// Turn it into a free run.
@@ -361,7 +361,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
fpr->magic_num_ = kMagicNumFree;
}
fpr->SetByteSize(this, byte_size);
- DCHECK_ALIGNED_PARAM(fpr->ByteSize(this), kPageSize);
+ DCHECK_ALIGNED_PARAM(fpr->ByteSize(this), gPageSize);
DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
if (!free_page_runs_.empty()) {
@@ -374,7 +374,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
}
for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.end(); ) {
FreePageRun* h = *it;
- DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(h->ByteSize(this) % gPageSize, static_cast<size_t>(0));
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
<< std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
@@ -396,7 +396,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
<< " from free_page_runs_";
}
fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
- DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(fpr->ByteSize(this) % gPageSize, static_cast<size_t>(0));
} else {
// Not adjacent. Stop.
if (kTraceRosAlloc) {
@@ -410,7 +410,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
--it;
FreePageRun* l = *it;
- DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(l->ByteSize(this) % gPageSize, static_cast<size_t>(0));
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
<< std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
@@ -428,7 +428,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
<< " from free_page_runs_";
}
l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
- DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(l->ByteSize(this) % gPageSize, static_cast<size_t>(0));
// Clear magic num since this is no longer the start of a free page run.
if (kIsDebugBuild) {
fpr->magic_num_ = 0;
@@ -445,7 +445,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
}
// Insert it.
- DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(fpr->ByteSize(this) % gPageSize, static_cast<size_t>(0));
DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
DCHECK(fpr->IsFree());
fpr->ReleasePages(this);
@@ -464,7 +464,7 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
DCHECK(bytes_allocated != nullptr);
DCHECK(usable_size != nullptr);
DCHECK_GT(size, kLargeSizeThreshold);
- size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
+ size_t num_pages = RoundUp(size, gPageSize) / gPageSize;
void* r;
{
MutexLock mu(self, lock_);
@@ -476,14 +476,14 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
return nullptr;
}
- const size_t total_bytes = num_pages * kPageSize;
+ const size_t total_bytes = num_pages * gPageSize;
*bytes_allocated = total_bytes;
*usable_size = total_bytes;
*bytes_tl_bulk_allocated = total_bytes;
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
- << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
- << "(" << std::dec << (num_pages * kPageSize) << ")";
+ << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * gPageSize)
+ << "(" << std::dec << (num_pages * gPageSize) << ")";
}
// Check if the returned memory is really all zero.
if (ShouldCheckZeroMemory()) {
@@ -519,11 +519,11 @@ size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
// Find the beginning of the run.
do {
--pm_idx;
- DCHECK_LT(pm_idx, capacity_ / kPageSize);
+ DCHECK_LT(pm_idx, capacity_ / gPageSize);
} while (page_map_[pm_idx] != kPageMapRun);
FALLTHROUGH_INTENDED;
case kPageMapRun:
- run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
+ run = reinterpret_cast<Run*>(base_ + pm_idx * gPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
break;
case kPageMapReleased:
@@ -950,7 +950,7 @@ inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() {
memset(this, 0, headerSizes[idx]);
// Check that the entire run is all zero.
if (kIsDebugBuild) {
- const size_t size = numOfPages[idx] * kPageSize;
+ const size_t size = numOfPages[idx] * gPageSize;
const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this);
for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) {
CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
@@ -971,7 +971,7 @@ void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size
size_t num_slots = numOfSlots[idx];
size_t bracket_size = IndexToBracketSize(idx);
DCHECK_EQ(slot_base + num_slots * bracket_size,
- reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
+ reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * gPageSize);
// Free slots are on the free list and the allocated/used slots are not. We traverse the free list
// to find out and record which slots are free in the is_free array.
std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
@@ -1037,15 +1037,15 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
<< ", page_map_entry=" << static_cast<int>(page_map_entry);
}
if (LIKELY(page_map_entry == kPageMapRun)) {
- run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
+ run = reinterpret_cast<Run*>(base_ + pm_idx * gPageSize);
} else if (LIKELY(page_map_entry == kPageMapRunPart)) {
size_t pi = pm_idx;
// Find the beginning of the run.
do {
--pi;
- DCHECK_LT(pi, capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / gPageSize);
} while (page_map_[pi] != kPageMapRun);
- run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
+ run = reinterpret_cast<Run*>(base_ + pi * gPageSize);
} else if (page_map_entry == kPageMapLargeObject) {
MutexLock mu(self, lock_);
freed_bytes += FreePages(self, ptr, false);
@@ -1064,15 +1064,15 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
<< ", page_map_entry=" << static_cast<int>(page_map_entry);
}
if (LIKELY(page_map_entry == kPageMapRun)) {
- run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
+ run = reinterpret_cast<Run*>(base_ + pm_idx * gPageSize);
} else if (LIKELY(page_map_entry == kPageMapRunPart)) {
size_t pi = pm_idx;
// Find the beginning of the run.
do {
--pi;
- DCHECK_LT(pi, capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / gPageSize);
} while (page_map_[pi] != kPageMapRun);
- run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
+ run = reinterpret_cast<Run*>(base_ + pi * gPageSize);
} else if (page_map_entry == kPageMapLargeObject) {
freed_bytes += FreePages(self, ptr, false);
continue;
@@ -1220,7 +1220,7 @@ std::string RosAlloc::DumpPageMap() {
case kPageMapReleased:
// Fall-through.
case kPageMapEmpty: {
- FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
+ FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * gPageSize);
if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
// Encountered a fresh free page run.
DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
@@ -1229,8 +1229,8 @@ std::string RosAlloc::DumpPageMap() {
DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
curr_fpr = fpr;
curr_fpr_size = fpr->ByteSize(this);
- DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
- remaining_curr_fpr_size = curr_fpr_size - kPageSize;
+ DCHECK_EQ(curr_fpr_size % gPageSize, static_cast<size_t>(0));
+ remaining_curr_fpr_size = curr_fpr_size - gPageSize;
stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
<< " (FPR start) fpr_size=" << curr_fpr_size
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
@@ -1245,9 +1245,9 @@ std::string RosAlloc::DumpPageMap() {
// Still part of the current free page run.
DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
- DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
- DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
- remaining_curr_fpr_size -= kPageSize;
+ DCHECK_EQ(remaining_curr_fpr_size % gPageSize, static_cast<size_t>(0));
+ DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(gPageSize));
+ remaining_curr_fpr_size -= gPageSize;
stream << "[" << i << "]=Empty (FPR part)"
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
@@ -1273,7 +1273,7 @@ std::string RosAlloc::DumpPageMap() {
case kPageMapRun: {
DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
num_running_empty_pages = 0;
- Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
+ Run* run = reinterpret_cast<Run*>(base_ + i * gPageSize);
size_t idx = run->size_bracket_idx_;
stream << "[" << i << "]=Run (start)"
<< " idx=" << idx
@@ -1316,7 +1316,7 @@ size_t RosAlloc::UsableSize(const void* ptr) {
num_pages++;
idx++;
}
- return num_pages * kPageSize;
+ return num_pages * gPageSize;
}
case kPageMapLargeObjectPart:
LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
@@ -1327,10 +1327,10 @@ size_t RosAlloc::UsableSize(const void* ptr) {
// Find the beginning of the run.
while (page_map_[pm_idx] != kPageMapRun) {
pm_idx--;
- DCHECK_LT(pm_idx, capacity_ / kPageSize);
+ DCHECK_LT(pm_idx, capacity_ / gPageSize);
}
DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
- Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
+ Run* run = reinterpret_cast<Run*>(base_ + pm_idx * gPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
@@ -1348,28 +1348,28 @@ size_t RosAlloc::UsableSize(const void* ptr) {
bool RosAlloc::Trim() {
MutexLock mu(Thread::Current(), lock_);
FreePageRun* last_free_page_run;
- DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(footprint_ % gPageSize, static_cast<size_t>(0));
auto it = free_page_runs_.rbegin();
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
// Remove the last free page run, if any.
DCHECK(last_free_page_run->IsFree());
DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
- DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(last_free_page_run->ByteSize(this) % gPageSize, static_cast<size_t>(0));
DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
free_page_runs_.erase(last_free_page_run);
size_t decrement = last_free_page_run->ByteSize(this);
size_t new_footprint = footprint_ - decrement;
- DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
- size_t new_num_of_pages = new_footprint / kPageSize;
+ DCHECK_EQ(new_footprint % gPageSize, static_cast<size_t>(0));
+ size_t new_num_of_pages = new_footprint / gPageSize;
DCHECK_GE(page_map_size_, new_num_of_pages);
// Zero out the tail of the page map.
uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
- uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
+ uint8_t* madvise_begin = AlignUp(zero_begin, gPageSize);
DCHECK_LE(madvise_begin, page_map_mem_map_.End());
size_t madvise_size = page_map_mem_map_.End() - madvise_begin;
if (madvise_size > 0) {
- DCHECK_ALIGNED_PARAM(madvise_begin, kPageSize);
- DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
+ DCHECK_ALIGNED_PARAM(madvise_begin, gPageSize);
+ DCHECK_EQ(RoundUp(madvise_size, gPageSize), madvise_size);
if (!kMadviseZeroes) {
memset(madvise_begin, 0, madvise_size);
}
@@ -1410,25 +1410,25 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by
// Fall-through.
case kPageMapEmpty: {
// The start of a free page run.
- FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
+ FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * gPageSize);
DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
size_t fpr_size = fpr->ByteSize(this);
- DCHECK_ALIGNED_PARAM(fpr_size, kPageSize);
+ DCHECK_ALIGNED_PARAM(fpr_size, gPageSize);
void* start = fpr;
if (kIsDebugBuild) {
// In the debug build, the first page of a free page run
// contains a magic number for debugging. Exclude it.
- start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
+ start = reinterpret_cast<uint8_t*>(fpr) + gPageSize;
}
void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
handler(start, end, 0, arg);
- size_t num_pages = fpr_size / kPageSize;
+ size_t num_pages = fpr_size / gPageSize;
if (kIsDebugBuild) {
for (size_t j = i + 1; j < i + num_pages; ++j) {
DCHECK(IsFreePage(j));
}
}
- i += fpr_size / kPageSize;
+ i += fpr_size / gPageSize;
DCHECK_LE(i, pm_end);
break;
}
@@ -1440,9 +1440,9 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by
num_pages++;
idx++;
}
- void* start = base_ + i * kPageSize;
- void* end = base_ + (i + num_pages) * kPageSize;
- size_t used_bytes = num_pages * kPageSize;
+ void* start = base_ + i * gPageSize;
+ void* end = base_ + (i + num_pages) * gPageSize;
+ size_t used_bytes = num_pages * gPageSize;
handler(start, end, used_bytes, arg);
if (kIsDebugBuild) {
for (size_t j = i + 1; j < i + num_pages; ++j) {
@@ -1458,7 +1458,7 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by
UNREACHABLE();
case kPageMapRun: {
// The start of a run.
- Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
+ Run* run = reinterpret_cast<Run*>(base_ + i * gPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
// The dedicated full run doesn't contain any real allocations, don't visit the slots in
// there.
@@ -1495,7 +1495,7 @@ size_t RosAlloc::FootprintLimit() {
void RosAlloc::SetFootprintLimit(size_t new_capacity) {
MutexLock mu(Thread::Current(), lock_);
- DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
+ DCHECK_EQ(RoundUp(new_capacity, gPageSize), new_capacity);
// Only growing is supported here. But Trim() is supported.
if (capacity_ < new_capacity) {
CHECK_LE(new_capacity, max_capacity_);
@@ -1664,7 +1664,7 @@ void RosAlloc::Initialize() {
// Compute numOfSlots and slotOffsets.
for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
size_t bracket_size = bracketSizes[i];
- size_t run_size = kPageSize * numOfPages[i];
+ size_t run_size = gPageSize * numOfPages[i];
size_t max_num_of_slots = run_size / bracket_size;
// Compute the actual number of slots by taking the header and
// alignment into account.
@@ -1763,14 +1763,14 @@ void RosAlloc::Verify() {
// Fall-through.
case kPageMapEmpty: {
// The start of a free page run.
- FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
+ FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * gPageSize);
DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
<< "An empty page must belong to the free page run set";
size_t fpr_size = fpr->ByteSize(this);
- CHECK_ALIGNED_PARAM(fpr_size, kPageSize)
+ CHECK_ALIGNED_PARAM(fpr_size, gPageSize)
<< "A free page run size isn't page-aligned : " << fpr_size;
- size_t num_pages = fpr_size / kPageSize;
+ size_t num_pages = fpr_size / gPageSize;
CHECK_GT(num_pages, static_cast<uintptr_t>(0))
<< "A free page run size must be > 0 : " << fpr_size;
for (size_t j = i + 1; j < i + num_pages; ++j) {
@@ -1793,7 +1793,7 @@ void RosAlloc::Verify() {
num_pages++;
idx++;
}
- uint8_t* start = base_ + i * kPageSize;
+ uint8_t* start = base_ + i * gPageSize;
if (is_running_on_memory_tool_) {
start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
}
@@ -1801,9 +1801,9 @@ void RosAlloc::Verify() {
size_t obj_size = obj->SizeOf();
CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
<< "A rosalloc large object size must be > " << kLargeSizeThreshold;
- CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
+ CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, gPageSize) / gPageSize)
<< "A rosalloc large object size " << obj_size + memory_tool_modifier
- << " does not match the page map table " << (num_pages * kPageSize)
+ << " does not match the page map table " << (num_pages * gPageSize)
<< std::endl << DumpPageMap();
i += num_pages;
CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
@@ -1815,7 +1815,7 @@ void RosAlloc::Verify() {
UNREACHABLE();
case kPageMapRun: {
// The start of a run.
- Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
+ Run* run = reinterpret_cast<Run*>(base_ + i * gPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
@@ -1879,7 +1879,7 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_mem
const size_t num_slots = numOfSlots[idx];
size_t bracket_size = IndexToBracketSize(idx);
CHECK_EQ(slot_base + num_slots * bracket_size,
- reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
+ reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * gPageSize)
<< "Mismatch in the end address of the run " << Dump();
// Check that the bulk free list is empty. It's only used during BulkFree().
CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump();
@@ -2006,17 +2006,17 @@ size_t RosAlloc::ReleasePages() {
if (IsFreePage(i)) {
// Free page runs can start with a released page if we coalesced a released page free
// page run with an empty page run.
- FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
+ FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * gPageSize);
// There is a race condition where FreePage can coalesce fpr with the previous
// free page run before we acquire lock_. In that case free_page_runs_.find will not find
// a run starting at fpr. To handle this race, we skip reclaiming the page range and go
// to the next page.
if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
size_t fpr_size = fpr->ByteSize(this);
- DCHECK_ALIGNED_PARAM(fpr_size, kPageSize);
+ DCHECK_ALIGNED_PARAM(fpr_size, gPageSize);
uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
- size_t pages = fpr_size / kPageSize;
+ size_t pages = fpr_size / gPageSize;
CHECK_GT(pages, 0U) << "Infinite loop probable";
i += pages;
DCHECK_LE(i, page_map_size_);
@@ -2040,13 +2040,13 @@ size_t RosAlloc::ReleasePages() {
}
size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
- DCHECK_ALIGNED_PARAM(start, kPageSize);
- DCHECK_ALIGNED_PARAM(end, kPageSize);
+ DCHECK_ALIGNED_PARAM(start, gPageSize);
+ DCHECK_ALIGNED_PARAM(end, gPageSize);
DCHECK_LT(start, end);
if (kIsDebugBuild) {
// In the debug build, the first page of a free page run
// contains a magic number for debugging. Exclude it.
- start += kPageSize;
+ start += gPageSize;
// Single pages won't be released.
if (start == end) {
@@ -2061,12 +2061,12 @@ size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
size_t pm_idx = ToPageMapIndex(start);
size_t reclaimed_bytes = 0;
// Calculate reclaimed bytes and upate page map.
- const size_t max_idx = pm_idx + (end - start) / kPageSize;
+ const size_t max_idx = pm_idx + (end - start) / gPageSize;
for (; pm_idx < max_idx; ++pm_idx) {
DCHECK(IsFreePage(pm_idx));
if (page_map_[pm_idx] == kPageMapEmpty) {
// Mark the page as released and update how many bytes we released.
- reclaimed_bytes += kPageSize;
+ reclaimed_bytes += gPageSize;
page_map_[pm_idx] = kPageMapReleased;
}
}
@@ -2088,10 +2088,10 @@ bool RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_allo
const char* new_buffer_msg = "";
if (failed_alloc_bytes > kLargeSizeThreshold) {
// Large allocation.
- required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
+ required_bytes = RoundUp(failed_alloc_bytes, gPageSize);
} else {
// Non-large allocation.
- required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
+ required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * gPageSize;
new_buffer_msg = " for a new buffer";
}
if (required_bytes > largest_continuous_free_pages) {
@@ -2145,7 +2145,7 @@ void RosAlloc::DumpStats(std::ostream& os) {
<< DumpPageMap();
UNREACHABLE();
case kPageMapRun: {
- Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
+ Run* run = reinterpret_cast<Run*>(base_ + i * gPageSize);
size_t idx = run->size_bracket_idx_;
size_t num_pages = numOfPages[idx];
num_runs[idx]++;
@@ -2170,7 +2170,7 @@ void RosAlloc::DumpStats(std::ostream& os) {
os << "Bracket " << i << " (" << bracketSizes[i] << "):"
<< " #runs=" << num_runs[i]
<< " #pages=" << num_pages_runs[i]
- << " (" << PrettySize(num_pages_runs[i] * kPageSize) << ")"
+ << " (" << PrettySize(num_pages_runs[i] * gPageSize) << ")"
<< " #metadata_bytes=" << PrettySize(num_metadata_bytes[i])
<< " #slots=" << num_slots[i] << " (" << PrettySize(num_slots[i] * bracketSizes[i]) << ")"
<< " #used_slots=" << num_used_slots[i]
@@ -2178,7 +2178,7 @@ void RosAlloc::DumpStats(std::ostream& os) {
}
os << "Large #allocations=" << num_large_objects
<< " #pages=" << num_pages_large_objects
- << " (" << PrettySize(num_pages_large_objects * kPageSize) << ")\n";
+ << " (" << PrettySize(num_pages_large_objects * gPageSize) << ")\n";
size_t total_num_pages = 0;
size_t total_metadata_bytes = 0;
size_t total_allocated_bytes = 0;
@@ -2188,8 +2188,8 @@ void RosAlloc::DumpStats(std::ostream& os) {
total_allocated_bytes += num_used_slots[i] * bracketSizes[i];
}
total_num_pages += num_pages_large_objects;
- total_allocated_bytes += num_pages_large_objects * kPageSize;
- os << "Total #total_bytes=" << PrettySize(total_num_pages * kPageSize)
+ total_allocated_bytes += num_pages_large_objects * gPageSize;
+ os << "Total #total_bytes=" << PrettySize(total_num_pages * gPageSize)
<< " #metadata_bytes=" << PrettySize(total_metadata_bytes)
<< " #used_bytes=" << PrettySize(total_allocated_bytes) << "\n";
os << "\n";
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index a9007f226f..35cdd5e9ab 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -56,12 +56,12 @@ class RosAlloc {
size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
DCHECK_GE(byte_size, static_cast<size_t>(0));
- DCHECK_ALIGNED_PARAM(byte_size, kPageSize);
+ DCHECK_ALIGNED_PARAM(byte_size, gPageSize);
return byte_size;
}
void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
REQUIRES(rosalloc->lock_) {
- DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(byte_size % gPageSize, static_cast<size_t>(0));
uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
@@ -102,7 +102,7 @@ class RosAlloc {
void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
uint8_t* start = reinterpret_cast<uint8_t*>(this);
size_t byte_size = ByteSize(rosalloc);
- DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
+ DCHECK_EQ(byte_size % gPageSize, static_cast<size_t>(0));
if (ShouldReleasePages(rosalloc)) {
rosalloc->ReleasePageRange(start, start + byte_size);
}
@@ -390,7 +390,7 @@ class RosAlloc {
return &thread_local_free_list_;
}
void* End() {
- return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
+ return reinterpret_cast<uint8_t*>(this) + gPageSize * numOfPages[size_bracket_idx_];
}
void SetIsThreadLocal(bool is_thread_local) {
is_thread_local_ = is_thread_local ? 1 : 0;
@@ -610,13 +610,13 @@ class RosAlloc {
DCHECK_LE(base_, addr);
DCHECK_LT(addr, base_ + capacity_);
size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
- DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
- return byte_offset / kPageSize;
+ DCHECK_EQ(byte_offset % static_cast<size_t>(gPageSize), static_cast<size_t>(0));
+ return byte_offset / gPageSize;
}
// Returns the page map index from an address with rounding.
size_t RoundDownToPageMapIndex(const void* addr) const {
DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
- return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
+ return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / gPageSize;
}
// A memory allocation request larger than this size is treated as a large object and allocated
@@ -872,7 +872,7 @@ class RosAlloc {
// Returns the size of the allocated slot for a given size.
size_t UsableSize(size_t bytes) {
if (UNLIKELY(bytes > kLargeSizeThreshold)) {
- return RoundUp(bytes, kPageSize);
+ return RoundUp(bytes, gPageSize);
} else {
return RoundToBracketSize(bytes);
}
@@ -911,7 +911,7 @@ class RosAlloc {
return dedicated_full_run_;
}
bool IsFreePage(size_t idx) const {
- DCHECK_LT(idx, capacity_ / kPageSize);
+ DCHECK_LT(idx, capacity_ / gPageSize);
uint8_t pm_type = page_map_[idx];
return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 18fa7b5970..370e01b61e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -144,7 +144,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"concurrent copying sweep array free buffer",
- RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
+ RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), gPageSize),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
@@ -2657,19 +2657,19 @@ void ConcurrentCopying::CaptureRssAtPeak() {
if (Runtime::Current()->GetDumpGCPerformanceOnShutdown()) {
std::list<range_t> gc_ranges;
auto add_gc_range = [&gc_ranges](void* start, size_t size) {
- void* end = static_cast<char*>(start) + RoundUp(size, kPageSize);
+ void* end = static_cast<char*>(start) + RoundUp(size, gPageSize);
gc_ranges.emplace_back(range_t(start, end));
};
// region space
- DCHECK(IsAlignedParam(region_space_->Limit(), kPageSize));
+ DCHECK(IsAlignedParam(region_space_->Limit(), gPageSize));
gc_ranges.emplace_back(range_t(region_space_->Begin(), region_space_->Limit()));
// mark bitmap
add_gc_range(region_space_bitmap_->Begin(), region_space_bitmap_->Size());
// non-moving space
{
- DCHECK(IsAlignedParam(heap_->non_moving_space_->Limit(), kPageSize));
+ DCHECK(IsAlignedParam(heap_->non_moving_space_->Limit(), gPageSize));
gc_ranges.emplace_back(range_t(heap_->non_moving_space_->Begin(),
heap_->non_moving_space_->Limit()));
// mark bitmap
@@ -2689,7 +2689,7 @@ void ConcurrentCopying::CaptureRssAtPeak() {
// large-object space
if (heap_->GetLargeObjectsSpace()) {
heap_->GetLargeObjectsSpace()->ForEachMemMap([&add_gc_range](const MemMap& map) {
- DCHECK(IsAlignedParam(map.BaseSize(), kPageSize));
+ DCHECK(IsAlignedParam(map.BaseSize(), gPageSize));
add_gc_range(map.BaseBegin(), map.BaseSize());
});
// mark bitmap
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 4887bd91b1..47d297bdbb 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -377,7 +377,7 @@ class ConcurrentCopying : public GarbageCollector {
GUARDED_BY(mark_stack_lock_);
// Size of thread local mark stack.
static size_t GetMarkStackSize() {
- return kPageSize;
+ return gPageSize;
}
static constexpr size_t kMarkStackPoolSize = 256;
std::vector<accounting::ObjectStack*> pooled_mark_stacks_
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 252ea982b9..b0680d4e80 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -124,13 +124,13 @@ uint64_t GarbageCollector::ExtractRssFromMincore(
}
size_t length = static_cast<uint8_t*>(it->second) - static_cast<uint8_t*>(it->first);
// Compute max length for vector allocation later.
- vec_len = std::max(vec_len, length / kPageSize);
+ vec_len = std::max(vec_len, length / gPageSize);
}
std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
for (const auto it : *gc_ranges) {
size_t length = static_cast<uint8_t*>(it.second) - static_cast<uint8_t*>(it.first);
if (mincore(it.first, length, vec.get()) == 0) {
- for (size_t i = 0; i < length / kPageSize; i++) {
+ for (size_t i = 0; i < length / gPageSize; i++) {
// Least significant bit represents residency of a page. Other bits are
// reserved.
rss += vec[i] & 0x1;
@@ -140,7 +140,7 @@ uint64_t GarbageCollector::ExtractRssFromMincore(
<< ", 0x" << it.second << std::dec << ") failed: " << strerror(errno);
}
}
- rss *= kPageSize;
+ rss *= gPageSize;
rss_histogram_.AddValue(rss / KB);
#endif
return rss;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 93ad95d10e..33a1d6c639 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -72,8 +72,8 @@ class ImmuneSpacesTest : public CommonArtTest {
for (size_t i = 0; i < kMaxBitmaps; ++i) {
accounting::ContinuousSpaceBitmap bitmap(
accounting::ContinuousSpaceBitmap::Create("bitmap",
- reinterpret_cast<uint8_t*>(kPageSize),
- kPageSize));
+ reinterpret_cast<uint8_t*>(gPageSize),
+ gPageSize));
CHECK(bitmap.IsValid());
live_bitmaps_.push_back(std::move(bitmap));
}
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 22089f9351..69f52386c5 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -98,12 +98,12 @@ using ::android::modules::sdklevel::IsAtLeastT;
namespace art {
static bool HaveMremapDontunmap() {
- void* old = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
+ void* old = mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
CHECK_NE(old, MAP_FAILED);
- void* addr = mremap(old, kPageSize, kPageSize, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, nullptr);
- CHECK_EQ(munmap(old, kPageSize), 0);
+ void* addr = mremap(old, gPageSize, gPageSize, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, nullptr);
+ CHECK_EQ(munmap(old, gPageSize), 0);
if (addr != MAP_FAILED) {
- CHECK_EQ(munmap(addr, kPageSize), 0);
+ CHECK_EQ(munmap(addr, gPageSize), 0);
return true;
} else {
return false;
@@ -394,14 +394,14 @@ static bool IsSigbusFeatureAvailable() {
}
size_t MarkCompact::InitializeInfoMap(uint8_t* p, size_t moving_space_sz) {
- size_t nr_moving_pages = moving_space_sz / kPageSize;
+ size_t nr_moving_pages = moving_space_sz / gPageSize;
chunk_info_vec_ = reinterpret_cast<uint32_t*>(p);
vector_length_ = moving_space_sz / kOffsetChunkSize;
size_t total = vector_length_ * sizeof(uint32_t);
first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
- total += heap_->GetNonMovingSpace()->Capacity() / kPageSize * sizeof(ObjReference);
+ total += heap_->GetNonMovingSpace()->Capacity() / gPageSize * sizeof(ObjReference);
first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
total += nr_moving_pages * sizeof(ObjReference);
@@ -454,8 +454,8 @@ MarkCompact::MarkCompact(Heap* heap)
// Create one MemMap for all the data structures
size_t moving_space_size = bump_pointer_space_->Capacity();
size_t chunk_info_vec_size = moving_space_size / kOffsetChunkSize;
- size_t nr_moving_pages = moving_space_size / kPageSize;
- size_t nr_non_moving_pages = heap->GetNonMovingSpace()->Capacity() / kPageSize;
+ size_t nr_moving_pages = moving_space_size / gPageSize;
+ size_t nr_non_moving_pages = heap->GetNonMovingSpace()->Capacity() / gPageSize;
std::string err_msg;
info_map_ = MemMap::MapAnonymous("Concurrent mark-compact chunk-info vector",
@@ -518,7 +518,7 @@ MarkCompact::MarkCompact(Heap* heap)
1 + (use_uffd_sigbus_ ? kMutatorCompactionBufferCount :
std::min(heap_->GetParallelGCThreadCount(), kMaxNumUffdWorkers));
compaction_buffers_map_ = MemMap::MapAnonymous("Concurrent mark-compact compaction buffers",
- kPageSize * num_pages,
+ gPageSize * num_pages,
PROT_READ | PROT_WRITE,
/*low_4gb=*/kObjPtrPoisoning,
&err_msg);
@@ -554,9 +554,9 @@ MarkCompact::MarkCompact(Heap* heap)
}
void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) {
- DCHECK_ALIGNED_PARAM(begin, kPageSize);
- DCHECK_ALIGNED_PARAM(len, kPageSize);
- DCHECK_GE(len, kPMDSize);
+ DCHECK_ALIGNED_PARAM(begin, gPageSize);
+ DCHECK_ALIGNED_PARAM(len, gPageSize);
+ DCHECK_GE(len, gPMDSize);
size_t alignment = BestPageTableAlignment(len);
bool is_shared = false;
// We use MAP_SHARED on non-zygote processes for leveraging userfaultfd's minor-fault feature.
@@ -583,7 +583,7 @@ void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) {
}
MemMap page_status_map(MemMap::MapAnonymous("linear-alloc page-status map",
- len / kPageSize,
+ len / gPageSize,
PROT_READ | PROT_WRITE,
/*low_4gb=*/false,
&err_msg));
@@ -863,7 +863,7 @@ void MarkCompact::InitMovingSpaceFirstObjects(const size_t vec_len) {
uint32_t page_live_bytes = 0;
while (true) {
- for (; page_live_bytes <= kPageSize; chunk_idx++) {
+ for (; page_live_bytes <= gPageSize; chunk_idx++) {
if (chunk_idx > vec_len) {
moving_first_objs_count_ = to_space_page_idx;
return;
@@ -871,7 +871,7 @@ void MarkCompact::InitMovingSpaceFirstObjects(const size_t vec_len) {
page_live_bytes += chunk_info_vec_[chunk_idx];
}
chunk_idx--;
- page_live_bytes -= kPageSize;
+ page_live_bytes -= gPageSize;
DCHECK_LE(page_live_bytes, kOffsetChunkSize);
DCHECK_LE(page_live_bytes, chunk_info_vec_[chunk_idx])
<< " chunk_idx=" << chunk_idx
@@ -921,7 +921,7 @@ void MarkCompact::InitNonMovingSpaceFirstObjects() {
// There are no live objects in the non-moving space
return;
}
- page_idx = (reinterpret_cast<uintptr_t>(obj) - begin) / kPageSize;
+ page_idx = (reinterpret_cast<uintptr_t>(obj) - begin) / gPageSize;
first_objs_non_moving_space_[page_idx++].Assign(obj);
prev_obj = obj;
}
@@ -931,7 +931,7 @@ void MarkCompact::InitNonMovingSpaceFirstObjects() {
// For every page find the object starting from which we need to call
// VisitReferences. It could either be an object that started on some
// preceding page, or some object starting within this page.
- begin = RoundDown(reinterpret_cast<uintptr_t>(prev_obj) + kPageSize, kPageSize);
+ begin = RoundDown(reinterpret_cast<uintptr_t>(prev_obj) + gPageSize, gPageSize);
while (begin < end) {
// Utilize, if any, large object that started in some preceding page, but
// overlaps with this page as well.
@@ -950,7 +950,7 @@ void MarkCompact::InitNonMovingSpaceFirstObjects() {
// If no live object started in that page and some object had started in
// the page preceding to that page, which was big enough to overlap with
// the current page, then we wouldn't be in the else part.
- prev_obj = bitmap->FindPrecedingObject(begin, begin - kPageSize);
+ prev_obj = bitmap->FindPrecedingObject(begin, begin - gPageSize);
if (prev_obj != nullptr) {
prev_obj_end = reinterpret_cast<uintptr_t>(prev_obj)
+ RoundUp(prev_obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
@@ -967,7 +967,7 @@ void MarkCompact::InitNonMovingSpaceFirstObjects() {
// Find the first live object in this page
bitmap->VisitMarkedRange</*kVisitOnce*/ true>(
begin,
- begin + kPageSize,
+ begin + gPageSize,
[this, page_idx] (mirror::Object* obj) {
first_objs_non_moving_space_[page_idx].Assign(obj);
});
@@ -975,14 +975,14 @@ void MarkCompact::InitNonMovingSpaceFirstObjects() {
// An empty entry indicates that the page has no live objects and hence
// can be skipped.
}
- begin += kPageSize;
+ begin += gPageSize;
page_idx++;
}
non_moving_first_objs_count_ = page_idx;
}
bool MarkCompact::CanCompactMovingSpaceWithMinorFault() {
- size_t min_size = (moving_first_objs_count_ + black_page_count_) * kPageSize;
+ size_t min_size = (moving_first_objs_count_ + black_page_count_) * gPageSize;
return minor_fault_initialized_ && shadow_to_space_map_.IsValid() &&
shadow_to_space_map_.Size() >= min_size;
}
@@ -997,9 +997,9 @@ class MarkCompact::ConcurrentCompactionGcTask : public SelfDeletingTask {
collector_->ConcurrentCompaction<MarkCompact::kMinorFaultMode>(/*buf=*/nullptr);
} else {
// The passed page/buf to ConcurrentCompaction is used by the thread as a
- // kPageSize buffer for compacting and updating objects into and then
+ // gPageSize buffer for compacting and updating objects into and then
// passing the buf to uffd ioctls.
- uint8_t* buf = collector_->compaction_buffers_map_.Begin() + index_ * kPageSize;
+ uint8_t* buf = collector_->compaction_buffers_map_.Begin() + index_ * gPageSize;
collector_->ConcurrentCompaction<MarkCompact::kCopyMode>(buf);
}
}
@@ -1060,8 +1060,8 @@ void MarkCompact::PrepareForCompaction() {
for (size_t i = vector_len; i < vector_length_; i++) {
DCHECK_EQ(chunk_info_vec_[i], 0u);
}
- post_compact_end_ = AlignUp(space_begin + total, kPageSize);
- CHECK_EQ(post_compact_end_, space_begin + moving_first_objs_count_ * kPageSize);
+ post_compact_end_ = AlignUp(space_begin + total, gPageSize);
+ CHECK_EQ(post_compact_end_, space_begin + moving_first_objs_count_ * gPageSize);
black_objs_slide_diff_ = black_allocations_begin_ - post_compact_end_;
// We shouldn't be consuming more space after compaction than pre-compaction.
CHECK_GE(black_objs_slide_diff_, 0);
@@ -1082,7 +1082,7 @@ void MarkCompact::PrepareForCompaction() {
// Register the buffer that we use for terminating concurrent compaction
struct uffdio_register uffd_register;
uffd_register.range.start = reinterpret_cast<uintptr_t>(conc_compaction_termination_page_);
- uffd_register.range.len = kPageSize;
+ uffd_register.range.len = gPageSize;
uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
CHECK_EQ(ioctl(uffd_, UFFDIO_REGISTER, &uffd_register), 0)
<< "ioctl_userfaultfd: register compaction termination page: " << strerror(errno);
@@ -1196,7 +1196,7 @@ void MarkCompact::PrepareForCompaction() {
DCHECK_GE(moving_to_space_fd_, 0);
// Take extra 4MB to reduce the likelihood of requiring resizing this
// map in the pause due to black allocations.
- size_t reqd_size = std::min(moving_first_objs_count_ * kPageSize + 4 * MB,
+ size_t reqd_size = std::min(moving_first_objs_count_ * gPageSize + 4 * MB,
bump_pointer_space_->Capacity());
// We cannot support memory-tool with shadow-map (as it requires
// appending a redzone) in this case because the mapping may have to be expanded
@@ -1351,8 +1351,8 @@ void MarkCompact::MarkingPause() {
// Align-up to page boundary so that black allocations happen from next page
// onwards. Also, it ensures that 'end' is aligned for card-table's
// ClearCardRange().
- black_allocations_begin_ = bump_pointer_space_->AlignEnd(thread_running_gc_, kPageSize, heap_);
- DCHECK_ALIGNED_PARAM(black_allocations_begin_, kPageSize);
+ black_allocations_begin_ = bump_pointer_space_->AlignEnd(thread_running_gc_, gPageSize, heap_);
+ DCHECK_ALIGNED_PARAM(black_allocations_begin_, gPageSize);
// Re-mark root set. Doesn't include thread-roots as they are already marked
// above.
@@ -1607,12 +1607,12 @@ void MarkCompact::CompactPage(mirror::Object* obj,
live_words_bitmap_->VisitLiveStrides(
offset,
black_allocations_begin_,
- kPageSize,
+ gPageSize,
[&addr, &last_stride, &stride_count, &last_stride_begin, verify_obj_callback, this](
uint32_t stride_begin, size_t stride_size, [[maybe_unused]] bool is_last)
REQUIRES_SHARED(Locks::mutator_lock_) {
const size_t stride_in_bytes = stride_size * kAlignment;
- DCHECK_LE(stride_in_bytes, kPageSize);
+ DCHECK_LE(stride_in_bytes, gPageSize);
last_stride_begin = stride_begin;
DCHECK(IsAligned<kAlignment>(addr));
memcpy(addr, from_space_begin_ + stride_begin * kAlignment, stride_in_bytes);
@@ -1623,7 +1623,7 @@ void MarkCompact::CompactPage(mirror::Object* obj,
// stride's first-object may have started on previous
// page. The only exception is the first page of the
// moving space.
- if (stride_count > 0 || stride_begin * kAlignment < kPageSize) {
+ if (stride_count > 0 || stride_begin * kAlignment < gPageSize) {
mirror::Object* o =
reinterpret_cast<mirror::Object*>(space_begin + stride_begin * kAlignment);
CHECK(live_words_bitmap_->Test(o)) << "ref=" << o;
@@ -1636,7 +1636,7 @@ void MarkCompact::CompactPage(mirror::Object* obj,
addr += stride_in_bytes;
stride_count++;
});
- DCHECK_LT(last_stride, start_addr + kPageSize);
+ DCHECK_LT(last_stride, start_addr + gPageSize);
DCHECK_GT(stride_count, 0u);
size_t obj_size = 0;
uint32_t offset_within_obj = offset * kAlignment
@@ -1655,10 +1655,10 @@ void MarkCompact::CompactPage(mirror::Object* obj,
RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true> visitor(this,
to_ref,
start_addr,
- start_addr + kPageSize);
+ start_addr + gPageSize);
obj_size = obj->VisitRefsForCompaction</*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(
visitor, MemberOffset(offset_within_obj), MemberOffset(offset_within_obj
- + kPageSize));
+ + gPageSize));
}
obj_size = RoundUp(obj_size, kAlignment);
DCHECK_GT(obj_size, offset_within_obj)
@@ -1688,7 +1688,7 @@ void MarkCompact::CompactPage(mirror::Object* obj,
}
// Except for the last page being compacted, the pages will have addr ==
- // start_addr + kPageSize.
+ // start_addr + gPageSize.
uint8_t* const end_addr = addr;
addr = start_addr;
size_t bytes_done = obj_size;
@@ -1696,7 +1696,7 @@ void MarkCompact::CompactPage(mirror::Object* obj,
// checks.
DCHECK_LE(addr, last_stride);
size_t bytes_to_visit = last_stride - addr;
- DCHECK_LE(bytes_to_visit, kPageSize);
+ DCHECK_LE(bytes_to_visit, gPageSize);
while (bytes_to_visit > bytes_done) {
mirror::Object* ref = reinterpret_cast<mirror::Object*>(addr + bytes_done);
VerifyObject(ref, verify_obj_callback);
@@ -1713,13 +1713,13 @@ void MarkCompact::CompactPage(mirror::Object* obj,
// which in case of klass requires 'class_size_'.
uint8_t* from_addr = from_space_begin_ + last_stride_begin * kAlignment;
bytes_to_visit = end_addr - addr;
- DCHECK_LE(bytes_to_visit, kPageSize);
+ DCHECK_LE(bytes_to_visit, gPageSize);
while (bytes_to_visit > bytes_done) {
mirror::Object* ref = reinterpret_cast<mirror::Object*>(addr + bytes_done);
obj = reinterpret_cast<mirror::Object*>(from_addr);
VerifyObject(ref, verify_obj_callback);
RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true>
- visitor(this, ref, nullptr, start_addr + kPageSize);
+ visitor(this, ref, nullptr, start_addr + gPageSize);
obj_size = obj->VisitRefsForCompaction(visitor,
MemberOffset(0),
MemberOffset(end_addr - (addr + bytes_done)));
@@ -1748,8 +1748,8 @@ void MarkCompact::CompactPage(mirror::Object* obj,
}
// The last page that we compact may have some bytes left untouched in the
// end, we should zero them as the kernel copies at page granularity.
- if (needs_memset_zero && UNLIKELY(bytes_done < kPageSize)) {
- std::memset(addr + bytes_done, 0x0, kPageSize - bytes_done);
+ if (needs_memset_zero && UNLIKELY(bytes_done < gPageSize)) {
+ std::memset(addr + bytes_done, 0x0, gPageSize - bytes_done);
}
}
@@ -1764,12 +1764,12 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
uint8_t* const pre_compact_page,
uint8_t* dest,
bool needs_memset_zero) {
- DCHECK(IsAlignedParam(pre_compact_page, kPageSize));
+ DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
size_t bytes_copied;
uint8_t* src_addr = reinterpret_cast<uint8_t*>(GetFromSpaceAddr(first_obj));
uint8_t* pre_compact_addr = reinterpret_cast<uint8_t*>(first_obj);
- uint8_t* const pre_compact_page_end = pre_compact_page + kPageSize;
- uint8_t* const dest_page_end = dest + kPageSize;
+ uint8_t* const pre_compact_page_end = pre_compact_page + gPageSize;
+ uint8_t* const dest_page_end = dest + gPageSize;
auto verify_obj_callback = [&] (std::ostream& os) {
os << " first_obj=" << first_obj
@@ -1782,7 +1782,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
// We have empty portion at the beginning of the page. Zero it.
if (pre_compact_addr > pre_compact_page) {
bytes_copied = pre_compact_addr - pre_compact_page;
- DCHECK_LT(bytes_copied, kPageSize);
+ DCHECK_LT(bytes_copied, gPageSize);
if (needs_memset_zero) {
std::memset(dest, 0x0, bytes_copied);
}
@@ -1792,7 +1792,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
size_t offset = pre_compact_page - pre_compact_addr;
pre_compact_addr = pre_compact_page;
src_addr += offset;
- DCHECK(IsAlignedParam(src_addr, kPageSize));
+ DCHECK(IsAlignedParam(src_addr, gPageSize));
}
// Copy the first chunk of live words
std::memcpy(dest, src_addr, first_chunk_size);
@@ -1829,7 +1829,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
/*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(visitor,
MemberOffset(offset),
MemberOffset(offset
- + kPageSize));
+ + gPageSize));
if (first_obj == next_page_first_obj) {
// First object is the only object on this page. So there's nothing else left to do.
return;
@@ -1845,9 +1845,9 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
bool check_last_obj = false;
if (next_page_first_obj != nullptr
&& reinterpret_cast<uint8_t*>(next_page_first_obj) < pre_compact_page_end
- && bytes_copied == kPageSize) {
+ && bytes_copied == gPageSize) {
size_t diff = pre_compact_page_end - reinterpret_cast<uint8_t*>(next_page_first_obj);
- DCHECK_LE(diff, kPageSize);
+ DCHECK_LE(diff, gPageSize);
DCHECK_LE(diff, bytes_to_visit);
bytes_to_visit -= diff;
check_last_obj = true;
@@ -1881,7 +1881,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
}
// Probably a TLAB finished on this page and/or a new TLAB started as well.
- if (bytes_copied < kPageSize) {
+ if (bytes_copied < gPageSize) {
src_addr += first_chunk_size;
pre_compact_addr += first_chunk_size;
// Use mark-bitmap to identify where objects are. First call
@@ -1897,7 +1897,7 @@ void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
[&found_obj](mirror::Object* obj) {
found_obj = obj;
});
- size_t remaining_bytes = kPageSize - bytes_copied;
+ size_t remaining_bytes = gPageSize - bytes_copied;
if (found_obj == nullptr) {
if (needs_memset_zero) {
// No more black objects in this page. Zero the remaining bytes and return.
@@ -1950,19 +1950,19 @@ void MarkCompact::MapProcessedPages(uint8_t* to_space_start,
size_t arr_len) {
DCHECK(minor_fault_initialized_);
DCHECK_LT(arr_idx, arr_len);
- DCHECK_ALIGNED_PARAM(to_space_start, kPageSize);
+ DCHECK_ALIGNED_PARAM(to_space_start, gPageSize);
// Claim all the contiguous pages, which are ready to be mapped, and then do
// so in a single ioctl. This helps avoid the overhead of invoking syscall
// several times and also maps the already-processed pages, avoiding
// unnecessary faults on them.
- size_t length = kFirstPageMapping ? kPageSize : 0;
+ size_t length = kFirstPageMapping ? gPageSize : 0;
if (kFirstPageMapping) {
arr_idx++;
}
// We need to guarantee that we don't end up sucsessfully marking a later
// page 'mapping' and then fail to mark an earlier page. To guarantee that
// we use acq_rel order.
- for (; arr_idx < arr_len; arr_idx++, length += kPageSize) {
+ for (; arr_idx < arr_len; arr_idx++, length += gPageSize) {
PageState expected_state = PageState::kProcessed;
if (!state_arr[arr_idx].compare_exchange_strong(
expected_state, PageState::kProcessedAndMapping, std::memory_order_acq_rel)) {
@@ -1997,17 +1997,17 @@ void MarkCompact::MapProcessedPages(uint8_t* to_space_start,
// Bail out by setting the remaining pages' state back to kProcessed and
// then waking up any waiting threads.
DCHECK_GE(uffd_continue.mapped, 0);
- DCHECK_ALIGNED_PARAM(uffd_continue.mapped, kPageSize);
+ DCHECK_ALIGNED_PARAM(uffd_continue.mapped, gPageSize);
DCHECK_LT(uffd_continue.mapped, static_cast<ssize_t>(length));
if (kFirstPageMapping) {
// In this case the first page must be mapped.
- DCHECK_GE(uffd_continue.mapped, static_cast<ssize_t>(kPageSize));
+ DCHECK_GE(uffd_continue.mapped, static_cast<ssize_t>(gPageSize));
}
// Nobody would modify these pages' state simultaneously so only atomic
// store is sufficient. Use 'release' order to ensure that all states are
// modified sequentially.
for (size_t remaining_len = length - uffd_continue.mapped; remaining_len > 0;
- remaining_len -= kPageSize) {
+ remaining_len -= gPageSize) {
arr_idx--;
DCHECK_EQ(state_arr[arr_idx].load(std::memory_order_relaxed),
PageState::kProcessedAndMapping);
@@ -2031,7 +2031,7 @@ void MarkCompact::MapProcessedPages(uint8_t* to_space_start,
if (use_uffd_sigbus_) {
// Nobody else would modify these pages' state simultaneously so atomic
// store is sufficient.
- for (; uffd_continue.mapped > 0; uffd_continue.mapped -= kPageSize) {
+ for (; uffd_continue.mapped > 0; uffd_continue.mapped -= gPageSize) {
arr_idx--;
DCHECK_EQ(state_arr[arr_idx].load(std::memory_order_relaxed),
PageState::kProcessedAndMapping);
@@ -2043,13 +2043,13 @@ void MarkCompact::MapProcessedPages(uint8_t* to_space_start,
void MarkCompact::ZeropageIoctl(void* addr, bool tolerate_eexist, bool tolerate_enoent) {
struct uffdio_zeropage uffd_zeropage;
- DCHECK(IsAlignedParam(addr, kPageSize));
+ DCHECK(IsAlignedParam(addr, gPageSize));
uffd_zeropage.range.start = reinterpret_cast<uintptr_t>(addr);
- uffd_zeropage.range.len = kPageSize;
+ uffd_zeropage.range.len = gPageSize;
uffd_zeropage.mode = 0;
int ret = ioctl(uffd_, UFFDIO_ZEROPAGE, &uffd_zeropage);
if (LIKELY(ret == 0)) {
- DCHECK_EQ(uffd_zeropage.zeropage, static_cast<ssize_t>(kPageSize));
+ DCHECK_EQ(uffd_zeropage.zeropage, static_cast<ssize_t>(gPageSize));
} else {
CHECK((tolerate_enoent && errno == ENOENT) || (tolerate_eexist && errno == EEXIST))
<< "ioctl_userfaultfd: zeropage failed: " << strerror(errno) << ". addr:" << addr;
@@ -2060,12 +2060,12 @@ void MarkCompact::CopyIoctl(void* dst, void* buffer) {
struct uffdio_copy uffd_copy;
uffd_copy.src = reinterpret_cast<uintptr_t>(buffer);
uffd_copy.dst = reinterpret_cast<uintptr_t>(dst);
- uffd_copy.len = kPageSize;
+ uffd_copy.len = gPageSize;
uffd_copy.mode = 0;
CHECK_EQ(ioctl(uffd_, UFFDIO_COPY, &uffd_copy), 0)
<< "ioctl_userfaultfd: copy failed: " << strerror(errno) << ". src:" << buffer
<< " dst:" << dst;
- DCHECK_EQ(uffd_copy.copy, static_cast<ssize_t>(kPageSize));
+ DCHECK_EQ(uffd_copy.copy, static_cast<ssize_t>(gPageSize));
}
template <int kMode, typename CompactionFn>
@@ -2150,7 +2150,7 @@ void MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode) {
// normal cases as objects are smaller than page size.
if (idx >= moving_first_objs_count_) {
// black-allocated portion of the moving-space
- idx_addr = black_allocations_begin_ + (idx - moving_first_objs_count_) * kPageSize;
+ idx_addr = black_allocations_begin_ + (idx - moving_first_objs_count_) * gPageSize;
reclaim_begin = idx_addr;
mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
if (first_obj != nullptr && reinterpret_cast<uint8_t*>(first_obj) < reclaim_begin) {
@@ -2161,8 +2161,8 @@ void MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode) {
// not used yet. So we can compute its from-space page and use that.
if (obj != first_obj) {
reclaim_begin = obj != nullptr
- ? AlignUp(reinterpret_cast<uint8_t*>(obj), kPageSize)
- : (black_allocations_begin_ + (i - moving_first_objs_count_) * kPageSize);
+ ? AlignUp(reinterpret_cast<uint8_t*>(obj), gPageSize)
+ : (black_allocations_begin_ + (i - moving_first_objs_count_) * gPageSize);
break;
}
}
@@ -2189,12 +2189,12 @@ void MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode) {
reclaim_begin = black_allocations_begin_;
}
}
- reclaim_begin = AlignUp(reclaim_begin, kPageSize);
+ reclaim_begin = AlignUp(reclaim_begin, gPageSize);
}
DCHECK_NE(reclaim_begin, nullptr);
- DCHECK_ALIGNED_PARAM(reclaim_begin, kPageSize);
- DCHECK_ALIGNED_PARAM(last_reclaimed_page_, kPageSize);
+ DCHECK_ALIGNED_PARAM(reclaim_begin, gPageSize);
+ DCHECK_ALIGNED_PARAM(last_reclaimed_page_, gPageSize);
// Check if the 'class_after_obj_map_' map allows pages to be freed.
for (; class_after_obj_iter_ != class_after_obj_ordered_map_.rend(); class_after_obj_iter_++) {
mirror::Object* klass = class_after_obj_iter_->first.AsMirrorPtr();
@@ -2210,7 +2210,7 @@ void MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode) {
if (obj_addr < idx_addr) {
// Its lowest-address object is not compacted yet. Reclaim starting from
// the end of this class.
- reclaim_begin = AlignUp(klass_end, kPageSize);
+ reclaim_begin = AlignUp(klass_end, gPageSize);
} else {
// Continue consuming pairs wherein the lowest address object has already
// been compacted.
@@ -2270,14 +2270,14 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
size_t page_status_arr_len = moving_first_objs_count_ + black_page_count_;
size_t idx = page_status_arr_len;
- uint8_t* to_space_end = bump_pointer_space_->Begin() + page_status_arr_len * kPageSize;
+ uint8_t* to_space_end = bump_pointer_space_->Begin() + page_status_arr_len * gPageSize;
uint8_t* shadow_space_end = nullptr;
if (kMode == kMinorFaultMode) {
- shadow_space_end = shadow_to_space_map_.Begin() + page_status_arr_len * kPageSize;
+ shadow_space_end = shadow_to_space_map_.Begin() + page_status_arr_len * gPageSize;
}
- uint8_t* pre_compact_page = black_allocations_begin_ + (black_page_count_ * kPageSize);
+ uint8_t* pre_compact_page = black_allocations_begin_ + (black_page_count_ * gPageSize);
- DCHECK(IsAlignedParam(pre_compact_page, kPageSize));
+ DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
UpdateClassAfterObjMap();
// These variables are maintained by FreeFromSpacePages().
@@ -2288,10 +2288,10 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
mirror::Object* next_page_first_obj = nullptr;
while (idx > moving_first_objs_count_) {
idx--;
- pre_compact_page -= kPageSize;
- to_space_end -= kPageSize;
+ pre_compact_page -= gPageSize;
+ to_space_end -= gPageSize;
if (kMode == kMinorFaultMode) {
- shadow_space_end -= kPageSize;
+ shadow_space_end -= gPageSize;
page = shadow_space_end;
} else if (kMode == kFallbackMode) {
page = to_space_end;
@@ -2313,7 +2313,7 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
});
// We are sliding here, so no point attempting to madvise for every
// page. Wait for enough pages to be done.
- if (idx % (kMinFromSpaceMadviseSize / kPageSize) == 0) {
+ if (idx % (kMinFromSpaceMadviseSize / gPageSize) == 0) {
FreeFromSpacePages(idx, kMode);
}
}
@@ -2323,9 +2323,9 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
while (idx > 0) {
idx--;
- to_space_end -= kPageSize;
+ to_space_end -= gPageSize;
if (kMode == kMinorFaultMode) {
- shadow_space_end -= kPageSize;
+ shadow_space_end -= gPageSize;
page = shadow_space_end;
} else if (kMode == kFallbackMode) {
page = to_space_end;
@@ -2341,7 +2341,7 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
}
void MarkCompact::UpdateNonMovingPage(mirror::Object* first, uint8_t* page) {
- DCHECK_LT(reinterpret_cast<uint8_t*>(first), page + kPageSize);
+ DCHECK_LT(reinterpret_cast<uint8_t*>(first), page + gPageSize);
// For every object found in the page, visit the previous object. This ensures
// that we can visit without checking page-end boundary.
// Call VisitRefsForCompaction with from-space read-barrier as the klass object and
@@ -2351,14 +2351,14 @@ void MarkCompact::UpdateNonMovingPage(mirror::Object* first, uint8_t* page) {
mirror::Object* curr_obj = first;
non_moving_space_bitmap_->VisitMarkedRange(
reinterpret_cast<uintptr_t>(first) + mirror::kObjectHeaderSize,
- reinterpret_cast<uintptr_t>(page + kPageSize),
+ reinterpret_cast<uintptr_t>(page + gPageSize),
[&](mirror::Object* next_obj) {
// TODO: Once non-moving space update becomes concurrent, we'll
// require fetching the from-space address of 'curr_obj' and then call
// visitor on that.
if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/false>
- visitor(this, curr_obj, page, page + kPageSize);
+ visitor(this, curr_obj, page, page + gPageSize);
MemberOffset begin_offset(page - reinterpret_cast<uint8_t*>(curr_obj));
// Native roots shouldn't be visited as they are done when this
// object's beginning was visited in the preceding page.
@@ -2366,7 +2366,7 @@ void MarkCompact::UpdateNonMovingPage(mirror::Object* first, uint8_t* page) {
visitor, begin_offset, MemberOffset(-1));
} else {
RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false>
- visitor(this, curr_obj, page, page + kPageSize);
+ visitor(this, curr_obj, page, page + gPageSize);
curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
MemberOffset(0),
MemberOffset(-1));
@@ -2374,15 +2374,15 @@ void MarkCompact::UpdateNonMovingPage(mirror::Object* first, uint8_t* page) {
curr_obj = next_obj;
});
- MemberOffset end_offset(page + kPageSize - reinterpret_cast<uint8_t*>(curr_obj));
+ MemberOffset end_offset(page + gPageSize - reinterpret_cast<uint8_t*>(curr_obj));
if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true>
- visitor(this, curr_obj, page, page + kPageSize);
+ visitor(this, curr_obj, page, page + gPageSize);
curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false, /*kVisitNativeRoots*/false>(
visitor, MemberOffset(page - reinterpret_cast<uint8_t*>(curr_obj)), end_offset);
} else {
RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true>
- visitor(this, curr_obj, page, page + kPageSize);
+ visitor(this, curr_obj, page, page + gPageSize);
curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor, MemberOffset(0), end_offset);
}
}
@@ -2395,10 +2395,10 @@ void MarkCompact::UpdateNonMovingSpace() {
// TODO: If and when we make non-moving space update concurrent, implement a
// mechanism to remember class pointers for such objects off-heap and pass it
// to VisitRefsForCompaction().
- uint8_t* page = non_moving_space_->Begin() + non_moving_first_objs_count_ * kPageSize;
+ uint8_t* page = non_moving_space_->Begin() + non_moving_first_objs_count_ * gPageSize;
for (ssize_t i = non_moving_first_objs_count_ - 1; i >= 0; i--) {
mirror::Object* obj = first_objs_non_moving_space_[i].AsMirrorPtr();
- page -= kPageSize;
+ page -= gPageSize;
// null means there are no objects on the page to update references.
if (obj != nullptr) {
UpdateNonMovingPage(obj, page);
@@ -2416,7 +2416,7 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
// size in black_alloc_pages_first_chunk_size_ array.
// For the pages which may have holes after the first chunk, which could happen
// if a new TLAB starts in the middle of the page, we mark the objects in
- // the mark-bitmap. So, if the first-chunk size is smaller than kPageSize,
+ // the mark-bitmap. So, if the first-chunk size is smaller than gPageSize,
// then we use the mark-bitmap for the remainder of the page.
uint8_t* const begin = bump_pointer_space_->Begin();
uint8_t* black_allocs = black_allocations_begin_;
@@ -2469,9 +2469,9 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
}
// Handle objects which cross page boundary, including objects larger
// than page size.
- if (remaining_chunk_size + obj_size >= kPageSize) {
+ if (remaining_chunk_size + obj_size >= gPageSize) {
set_mark_bit = false;
- first_chunk_size += kPageSize - remaining_chunk_size;
+ first_chunk_size += gPageSize - remaining_chunk_size;
remaining_chunk_size += obj_size;
// We should not store first-object and remaining_chunk_size if there were
// unused bytes before this TLAB, in which case we must have already
@@ -2481,13 +2481,13 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
first_objs_moving_space_[black_page_idx].Assign(first_obj);
}
black_page_idx++;
- remaining_chunk_size -= kPageSize;
+ remaining_chunk_size -= gPageSize;
// Consume an object larger than page size.
- while (remaining_chunk_size >= kPageSize) {
- black_alloc_pages_first_chunk_size_[black_page_idx] = kPageSize;
+ while (remaining_chunk_size >= gPageSize) {
+ black_alloc_pages_first_chunk_size_[black_page_idx] = gPageSize;
first_objs_moving_space_[black_page_idx].Assign(obj);
black_page_idx++;
- remaining_chunk_size -= kPageSize;
+ remaining_chunk_size -= gPageSize;
}
first_obj = remaining_chunk_size > 0 ? obj : nullptr;
first_chunk_size = remaining_chunk_size;
@@ -2500,7 +2500,7 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
obj = reinterpret_cast<mirror::Object*>(black_allocs);
}
DCHECK_LE(black_allocs, block_end);
- DCHECK_LT(remaining_chunk_size, kPageSize);
+ DCHECK_LT(remaining_chunk_size, gPageSize);
// consume the unallocated portion of the block
if (black_allocs < block_end) {
// first-chunk of the current page ends here. Store it.
@@ -2510,20 +2510,20 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
}
first_chunk_size = 0;
first_obj = nullptr;
- size_t page_remaining = kPageSize - remaining_chunk_size;
+ size_t page_remaining = gPageSize - remaining_chunk_size;
size_t block_remaining = block_end - black_allocs;
if (page_remaining <= block_remaining) {
block_remaining -= page_remaining;
// current page and the subsequent empty pages in the block
- black_page_idx += 1 + block_remaining / kPageSize;
- remaining_chunk_size = block_remaining % kPageSize;
+ black_page_idx += 1 + block_remaining / gPageSize;
+ remaining_chunk_size = block_remaining % gPageSize;
} else {
remaining_chunk_size += block_remaining;
}
black_allocs = block_end;
}
}
- if (black_page_idx < bump_pointer_space_->Size() / kPageSize) {
+ if (black_page_idx < bump_pointer_space_->Size() / gPageSize) {
// Store the leftover first-chunk, if any, and update page index.
if (black_alloc_pages_first_chunk_size_[black_page_idx] > 0) {
black_page_idx++;
@@ -2571,22 +2571,22 @@ void MarkCompact::UpdateNonMovingSpaceBlackAllocations() {
non_moving_space_bitmap_->Set(obj);
// Clear so that we don't try to set the bit again in the next GC-cycle.
it->Clear();
- size_t idx = (reinterpret_cast<uint8_t*>(obj) - space_begin) / kPageSize;
- uint8_t* page_begin = AlignDown(reinterpret_cast<uint8_t*>(obj), kPageSize);
+ size_t idx = (reinterpret_cast<uint8_t*>(obj) - space_begin) / gPageSize;
+ uint8_t* page_begin = AlignDown(reinterpret_cast<uint8_t*>(obj), gPageSize);
mirror::Object* first_obj = first_objs_non_moving_space_[idx].AsMirrorPtr();
if (first_obj == nullptr
|| (obj < first_obj && reinterpret_cast<uint8_t*>(first_obj) > page_begin)) {
first_objs_non_moving_space_[idx].Assign(obj);
}
mirror::Object* next_page_first_obj = first_objs_non_moving_space_[++idx].AsMirrorPtr();
- uint8_t* next_page_begin = page_begin + kPageSize;
+ uint8_t* next_page_begin = page_begin + gPageSize;
if (next_page_first_obj == nullptr
|| reinterpret_cast<uint8_t*>(next_page_first_obj) > next_page_begin) {
size_t obj_size = RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + obj_size;
while (next_page_begin < obj_end) {
first_objs_non_moving_space_[idx++].Assign(obj);
- next_page_begin += kPageSize;
+ next_page_begin += gPageSize;
}
}
// update first_objs count in case we went past non_moving_first_objs_count_
@@ -2663,8 +2663,8 @@ class MarkCompact::LinearAllocPageUpdater {
void MultiObjectArena(uint8_t* page_begin, uint8_t* first_obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(first_obj != nullptr);
- DCHECK_ALIGNED_PARAM(page_begin, kPageSize);
- uint8_t* page_end = page_begin + kPageSize;
+ DCHECK_ALIGNED_PARAM(page_begin, gPageSize);
+ uint8_t* page_end = page_begin + gPageSize;
uint32_t obj_size;
for (uint8_t* byte = first_obj; byte < page_end;) {
TrackingHeader* header = reinterpret_cast<TrackingHeader*>(byte);
@@ -3076,7 +3076,7 @@ void MarkCompact::KernelPreparation() {
int mode = kCopyMode;
size_t moving_space_register_sz;
if (minor_fault_initialized_) {
- moving_space_register_sz = (moving_first_objs_count_ + black_page_count_) * kPageSize;
+ moving_space_register_sz = (moving_first_objs_count_ + black_page_count_) * gPageSize;
if (shadow_to_space_map_.IsValid()) {
size_t shadow_size = shadow_to_space_map_.Size();
void* addr = shadow_to_space_map_.Begin();
@@ -3190,14 +3190,14 @@ void MarkCompact::ConcurrentCompaction(uint8_t* buf) {
} else {
struct uffdio_range uffd_range;
uffd_range.start = msg.arg.pagefault.address;
- uffd_range.len = kPageSize;
+ uffd_range.len = gPageSize;
CHECK_EQ(ioctl(uffd_, UFFDIO_WAKE, &uffd_range), 0)
<< "ioctl_userfaultfd: wake failed for concurrent-compaction termination page: "
<< strerror(errno);
}
break;
}
- uint8_t* fault_page = AlignDown(fault_addr, kPageSize);
+ uint8_t* fault_page = AlignDown(fault_addr, gPageSize);
if (HasAddress(reinterpret_cast<mirror::Object*>(fault_addr))) {
ConcurrentlyProcessMovingPage<kMode>(fault_page, buf, nr_moving_space_used_pages);
} else if (minor_fault_initialized_) {
@@ -3251,7 +3251,7 @@ bool MarkCompact::SigbusHandler(siginfo_t* info) {
}
ScopedInProgressCount spc(this);
- uint8_t* fault_page = AlignDown(reinterpret_cast<uint8_t*>(info->si_addr), kPageSize);
+ uint8_t* fault_page = AlignDown(reinterpret_cast<uint8_t*>(info->si_addr), gPageSize);
if (!spc.IsCompactionDone()) {
if (HasAddress(reinterpret_cast<mirror::Object*>(fault_page))) {
Thread* self = Thread::Current();
@@ -3325,8 +3325,8 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
};
uint8_t* unused_space_begin =
- bump_pointer_space_->Begin() + nr_moving_space_used_pages * kPageSize;
- DCHECK(IsAlignedParam(unused_space_begin, kPageSize));
+ bump_pointer_space_->Begin() + nr_moving_space_used_pages * gPageSize;
+ DCHECK(IsAlignedParam(unused_space_begin, gPageSize));
DCHECK(kMode == kCopyMode || fault_page < unused_space_begin);
if (kMode == kCopyMode && fault_page >= unused_space_begin) {
// There is a race which allows more than one thread to install a
@@ -3335,7 +3335,7 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
ZeropageIoctl(fault_page, /*tolerate_eexist=*/true, /*tolerate_enoent=*/true);
return;
}
- size_t page_idx = (fault_page - bump_pointer_space_->Begin()) / kPageSize;
+ size_t page_idx = (fault_page - bump_pointer_space_->Begin()) / gPageSize;
DCHECK_LT(page_idx, moving_first_objs_count_ + black_page_count_);
mirror::Object* first_obj = first_objs_moving_space_[page_idx].AsMirrorPtr();
if (first_obj == nullptr) {
@@ -3371,13 +3371,13 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
state, PageState::kMutatorProcessing, std::memory_order_acq_rel)) {
if (kMode == kMinorFaultMode) {
DCHECK_EQ(buf, nullptr);
- buf = shadow_to_space_map_.Begin() + page_idx * kPageSize;
+ buf = shadow_to_space_map_.Begin() + page_idx * gPageSize;
} else if (UNLIKELY(buf == nullptr)) {
DCHECK_EQ(kMode, kCopyMode);
uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed);
// The buffer-map is one page bigger as the first buffer is used by GC-thread.
CHECK_LE(idx, kMutatorCompactionBufferCount);
- buf = compaction_buffers_map_.Begin() + idx * kPageSize;
+ buf = compaction_buffers_map_.Begin() + idx * gPageSize;
DCHECK(compaction_buffers_map_.HasAddress(buf));
Thread::Current()->SetThreadLocalGcBuffer(buf);
}
@@ -3395,7 +3395,7 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
if (page_idx + 1 < moving_first_objs_count_ + black_page_count_) {
next_page_first_obj = first_objs_moving_space_[page_idx + 1].AsMirrorPtr();
}
- DCHECK(IsAlignedParam(pre_compact_page, kPageSize));
+ DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
SlideBlackPage(first_obj,
next_page_first_obj,
first_chunk_size,
@@ -3523,7 +3523,7 @@ void MarkCompact::ConcurrentlyProcessLinearAllocPage(uint8_t* fault_page, bool i
}
DCHECK_NE(space_data, nullptr);
ptrdiff_t diff = space_data->shadow_.Begin() - space_data->begin_;
- size_t page_idx = (fault_page - space_data->begin_) / kPageSize;
+ size_t page_idx = (fault_page - space_data->begin_) / gPageSize;
Atomic<PageState>* state_arr =
reinterpret_cast<Atomic<PageState>*>(space_data->page_status_map_.Begin());
PageState state = state_arr[page_idx].load(use_uffd_sigbus_ ? std::memory_order_acquire :
@@ -3544,7 +3544,7 @@ void MarkCompact::ConcurrentlyProcessLinearAllocPage(uint8_t* fault_page, bool i
if (first_obj != nullptr) {
updater.MultiObjectArena(fault_page + diff, first_obj + diff);
} else {
- updater.SingleObjectArena(fault_page + diff, kPageSize);
+ updater.SingleObjectArena(fault_page + diff, gPageSize);
}
if (kMode == kCopyMode) {
MapUpdatedLinearAllocPage(fault_page,
@@ -3634,7 +3634,7 @@ void MarkCompact::ProcessLinearAlloc() {
continue;
}
uint8_t* last_byte = pair.second;
- DCHECK_ALIGNED_PARAM(last_byte, kPageSize);
+ DCHECK_ALIGNED_PARAM(last_byte, gPageSize);
others_processing = false;
arena_begin = arena->Begin();
arena_size = arena->Size();
@@ -3658,7 +3658,7 @@ void MarkCompact::ProcessLinearAlloc() {
return;
}
LinearAllocPageUpdater updater(this);
- size_t page_idx = (page_begin - space_data->begin_) / kPageSize;
+ size_t page_idx = (page_begin - space_data->begin_) / gPageSize;
DCHECK_LT(page_idx, space_data->page_status_map_.Size());
Atomic<PageState>* state_arr =
reinterpret_cast<Atomic<PageState>*>(space_data->page_status_map_.Begin());
@@ -3675,7 +3675,7 @@ void MarkCompact::ProcessLinearAlloc() {
if (first_obj != nullptr) {
updater.MultiObjectArena(page_begin + diff, first_obj + diff);
} else {
- DCHECK_EQ(page_size, kPageSize);
+ DCHECK_EQ(page_size, gPageSize);
updater.SingleObjectArena(page_begin + diff, page_size);
}
expected_state = PageState::kProcessing;
@@ -3751,7 +3751,7 @@ void MarkCompact::CompactionPhase() {
}
size_t moving_space_size = bump_pointer_space_->Capacity();
- size_t used_size = (moving_first_objs_count_ + black_page_count_) * kPageSize;
+ size_t used_size = (moving_first_objs_count_ + black_page_count_) * gPageSize;
if (CanCompactMovingSpaceWithMinorFault()) {
CompactMovingSpace<kMinorFaultMode>(/*page=*/nullptr);
} else {
@@ -3842,11 +3842,11 @@ void MarkCompact::CompactionPhase() {
count &= ~kSigbusCounterCompactionDoneMask;
}
} else {
- DCHECK(IsAlignedParam(conc_compaction_termination_page_, kPageSize));
+ DCHECK(IsAlignedParam(conc_compaction_termination_page_, gPageSize));
// We will only iterate once if gKernelHasFaultRetry is true.
do {
// madvise the page so that we can get userfaults on it.
- ZeroAndReleaseMemory(conc_compaction_termination_page_, kPageSize);
+ ZeroAndReleaseMemory(conc_compaction_termination_page_, gPageSize);
// The following load triggers 'special' userfaults. When received by the
// thread-pool workers, they will exit out of the compaction task. This fault
// happens because we madvised the page.
@@ -4432,14 +4432,14 @@ void MarkCompact::FinishPhase() {
// physical memory because we already madvised it above and then we triggered a read
// userfault, which maps a special zero-page.
if (use_uffd_sigbus_ || !minor_fault_initialized_ || !shadow_to_space_map_.IsValid() ||
- shadow_to_space_map_.Size() < (moving_first_objs_count_ + black_page_count_) * kPageSize) {
- size_t adjustment = use_uffd_sigbus_ ? 0 : kPageSize;
+ shadow_to_space_map_.Size() < (moving_first_objs_count_ + black_page_count_) * gPageSize) {
+ size_t adjustment = use_uffd_sigbus_ ? 0 : gPageSize;
ZeroAndReleaseMemory(compaction_buffers_map_.Begin() + adjustment,
compaction_buffers_map_.Size() - adjustment);
} else if (shadow_to_space_map_.Size() == bump_pointer_space_->Capacity()) {
// Now that we are going to use minor-faults from next GC cycle, we can
// unmap the buffers used by worker threads.
- compaction_buffers_map_.SetSize(kPageSize);
+ compaction_buffers_map_.SetSize(gPageSize);
}
info_map_.MadviseDontNeedAndZero();
live_words_bitmap_->ClearBitmap();
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 8835a0cb54..4f0e6e04fa 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -333,8 +333,8 @@ class MarkCompact final : public GarbageCollector {
// during concurrent compaction.
void PrepareForCompaction() REQUIRES_SHARED(Locks::mutator_lock_);
- // Copy kPageSize live bytes starting from 'offset' (within the moving space),
- // which must be within 'obj', into the kPageSize sized memory pointed by 'addr'.
+ // Copy gPageSize live bytes starting from 'offset' (within the moving space),
+ // which must be within 'obj', into the gPageSize sized memory pointed by 'addr'.
// Then update the references within the copied objects. The boundary objects are
// partially updated such that only the references that lie in the page are updated.
// This is necessary to avoid cascading userfaults.
@@ -382,7 +382,7 @@ class MarkCompact final : public GarbageCollector {
// Slides (retain the empty holes, which are usually part of some in-use TLAB)
// black page in the moving space. 'first_obj' is the object that overlaps with
// the first byte of the page being slid. pre_compact_page is the pre-compact
- // address of the page being slid. 'dest' is the kPageSize sized memory where
+ // address of the page being slid. 'dest' is the gPageSize sized memory where
// the contents would be copied.
void SlideBlackPage(mirror::Object* first_obj,
mirror::Object* next_page_first_obj,
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 99de1054e4..aadc9e43a5 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,7 +105,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"mark sweep sweep array free buffer",
- RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
+ RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), gPageSize),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 419b8421d1..397271b03d 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -375,7 +375,7 @@ inline void SemiSpace::MarkStackPush(Object* obj) {
}
static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
- if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
+ if (LIKELY(size <= static_cast<size_t>(gPageSize))) {
// We will dirty the current page and somewhere in the middle of the next page. This means
// that the next object copied will also dirty that page.
// TODO: Worth considering the last object copied? We may end up dirtying one page which is
@@ -393,19 +393,19 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size
// Process the start of the page. The page must already be dirty, don't bother with checking.
const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
const uint8_t* limit = byte_src + size;
- size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
+ size_t page_remain = AlignUp(byte_dest, gPageSize) - byte_dest;
// Copy the bytes until the start of the next page.
memcpy(dest, src, page_remain);
byte_src += page_remain;
byte_dest += page_remain;
- DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
+ DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(byte_dest), gPageSize);
DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
- while (byte_src + kPageSize < limit) {
+ while (byte_src + gPageSize < limit) {
bool all_zero = true;
uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
- for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
+ for (size_t i = 0; i < gPageSize / sizeof(*word_src); ++i) {
// Assumes the destination of the copy is all zeros.
if (word_src[i] != 0) {
all_zero = false;
@@ -414,10 +414,10 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size
}
if (all_zero) {
// Avoided copying into the page since it was all zeros.
- saved_bytes += kPageSize;
+ saved_bytes += gPageSize;
}
- byte_src += kPageSize;
- byte_dest += kPageSize;
+ byte_src += gPageSize;
+ byte_dest += gPageSize;
}
// Handle the part of the page at the end.
memcpy(byte_dest, byte_src, limit - byte_src);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6e08a8380b..b4f703ea07 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -492,7 +492,7 @@ Heap::Heap(size_t initial_size,
} else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) {
heap_reservation_size = capacity_;
}
- heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
+ heap_reservation_size = RoundUp(heap_reservation_size, gPageSize);
// Load image space(s).
std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
MemMap heap_reservation;
@@ -1028,12 +1028,12 @@ void Heap::EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) {
if (gUseUserfaultfd) {
// Use volatile to ensure that compiler loads from memory to trigger userfaults, if required.
const uint8_t* start = reinterpret_cast<uint8_t*>(obj.Ptr());
- const uint8_t* end = AlignUp(start + obj->SizeOf(), kPageSize);
+ const uint8_t* end = AlignUp(start + obj->SizeOf(), gPageSize);
// The first page is already touched by SizeOf().
- start += kPageSize;
+ start += gPageSize;
while (start < end) {
ForceRead(start);
- start += kPageSize;
+ start += gPageSize;
}
}
}
@@ -4516,7 +4516,7 @@ mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
// TODO: for large allocations, which are rare, maybe we should allocate
// that object and return. There is no need to revoke the current TLAB,
// particularly if it's mostly unutilized.
- size_t next_tlab_size = RoundDown(alloc_size + kDefaultTLABSize, kPageSize) - alloc_size;
+ size_t next_tlab_size = RoundDown(alloc_size + kDefaultTLABSize, gPageSize) - alloc_size;
if (jhp_enabled) {
next_tlab_size = JHPCalculateNextTlabSize(
self, next_tlab_size, alloc_size, &take_sample, &bytes_until_sample);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 866e95de5b..b823e6564a 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -189,7 +189,7 @@ class Heap {
// Starting size of DlMalloc/RosAlloc spaces.
static size_t GetDefaultStartingSize() {
- return kPageSize;
+ return gPageSize;
}
// Whether the transition-GC heap threshold condition applies or not for non-low memory devices.
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index c63559a555..aa85ba1247 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -25,7 +25,7 @@ namespace gc {
namespace space {
BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity) {
- capacity = RoundUp(capacity, kPageSize);
+ capacity = RoundUp(capacity, gPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
capacity,
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 1edcdbdf91..e5253cd697 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -126,7 +126,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name,
// Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
- size_t starting_size = kPageSize;
+ size_t starting_size = gPageSize;
MemMap mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 0c96e2b8cf..3f79a04a27 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -3773,8 +3773,8 @@ void ImageSpace::ReleaseMetadata() {
const ImageSection& metadata = GetImageHeader().GetMetadataSection();
VLOG(image) << "Releasing " << metadata.Size() << " image metadata bytes";
// Avoid using ZeroAndReleasePages since the zero fill might not be word atomic.
- uint8_t* const page_begin = AlignUp(Begin() + metadata.Offset(), kPageSize);
- uint8_t* const page_end = AlignDown(Begin() + metadata.End(), kPageSize);
+ uint8_t* const page_begin = AlignUp(Begin() + metadata.Offset(), gPageSize);
+ uint8_t* const page_end = AlignDown(Begin() + metadata.End(), gPageSize);
if (page_begin < page_end) {
CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
}
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index ea567f5d29..e0d747ff37 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -89,7 +89,7 @@ class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
private:
static size_t MemoryToolRedZoneBytes() {
- return kPageSize;
+ return gPageSize;
}
static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 2a32b9b5cd..b25c4fab38 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -102,8 +102,8 @@ MemMap MallocSpace::CreateMemMap(const std::string& name,
}
// Page align growth limit and capacity which will be used to manage mmapped storage
- *growth_limit = RoundUp(*growth_limit, kPageSize);
- *capacity = RoundUp(*capacity, kPageSize);
+ *growth_limit = RoundUp(*growth_limit, gPageSize);
+ *capacity = RoundUp(*capacity, gPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
@@ -140,7 +140,7 @@ void MallocSpace::RegisterRecentFree(mirror::Object* ptr) {
}
void MallocSpace::SetGrowthLimit(size_t growth_limit) {
- growth_limit = RoundUp(growth_limit, kPageSize);
+ growth_limit = RoundUp(growth_limit, gPageSize);
growth_limit_ = growth_limit;
if (Size() > growth_limit_) {
SetEnd(begin_ + growth_limit);
@@ -183,12 +183,12 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
// alloc space so that we won't mix thread local runs from different
// alloc spaces.
RevokeAllThreadLocalBuffers();
- SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
+ SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), gPageSize)));
DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize);
DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize);
- DCHECK_ALIGNED_PARAM(begin_, kPageSize);
- DCHECK_ALIGNED_PARAM(End(), kPageSize);
- size_t size = RoundUp(Size(), kPageSize);
+ DCHECK_ALIGNED_PARAM(begin_, gPageSize);
+ DCHECK_ALIGNED_PARAM(End(), gPageSize);
+ size_t size = RoundUp(Size(), gPageSize);
// Trimming the heap should be done by the caller since we may have invalidated the accounting
// stored in between objects.
// Remaining size is for the new alloc space.
@@ -200,7 +200,7 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
<< "Size " << size << "\n"
<< "GrowthLimit " << growth_limit_ << "\n"
<< "Capacity " << Capacity();
- SetGrowthLimit(RoundUp(size, kPageSize));
+ SetGrowthLimit(RoundUp(size, gPageSize));
// FIXME: Do we need reference counted pointers here?
// Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
VLOG(heap) << "Creating new AllocSpace: ";
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index c25770cb70..49ea2d64e6 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -405,8 +405,8 @@ void RegionSpace::ReleaseFreeRegions() {
for (size_t i = 0u; i < num_regions_; ++i) {
if (regions_[i].IsFree()) {
uint8_t* begin = regions_[i].Begin();
- DCHECK_ALIGNED_PARAM(begin, kPageSize);
- DCHECK_ALIGNED_PARAM(regions_[i].End(), kPageSize);
+ DCHECK_ALIGNED_PARAM(begin, gPageSize);
+ DCHECK_ALIGNED_PARAM(regions_[i].End(), gPageSize);
bool res = madvise(begin, regions_[i].End() - begin, MADV_DONTNEED);
CHECK_NE(res, -1) << "madvise failed";
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 170a5760a4..add01fb8e7 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -105,7 +105,7 @@ void ImageHeader::RelocateImageReferences(int64_t delta) {
// to be done in alignment with the dynamic linker's ELF loader as
// otherwise inconsistency would still be possible e.g. when using
// `dlopen`-like calls to load OAT files.
- CHECK_ALIGNED_PARAM(delta, kPageSize) << "relocation delta must be page aligned";
+ CHECK_ALIGNED_PARAM(delta, gPageSize) << "relocation delta must be page aligned";
oat_file_begin_ += delta;
oat_data_begin_ += delta;
oat_data_end_ += delta;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 96e51ce87c..b5d75207f0 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -96,7 +96,7 @@ bool IndirectReferenceTable::Initialize(size_t max_count, std::string* error_msg
// Overflow and maximum check.
CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
- const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), kPageSize);
+ const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), gPageSize);
table_mem_map_ = NewIRTMap(table_bytes, error_msg);
if (!table_mem_map_.IsValid()) {
DCHECK(!error_msg->empty());
@@ -314,11 +314,11 @@ void IndirectReferenceTable::Trim() {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(table_mem_map_.IsValid());
const size_t top_index = Capacity();
- uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
+ uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), gPageSize);
uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
- DCHECK_ALIGNED_PARAM(release_end, kPageSize);
- DCHECK_ALIGNED_PARAM(release_end - release_start, kPageSize);
+ DCHECK_ALIGNED_PARAM(release_end, gPageSize);
+ DCHECK_ALIGNED_PARAM(release_end - release_start, gPageSize);
if (release_start != release_end) {
madvise(release_start, release_end - release_start, MADV_DONTNEED);
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c4877f5ca6..8030459f01 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -612,9 +612,9 @@ void Jit::NotifyZygoteCompilationDone() {
// within a page range. For methods that falls above or below the range,
// the child processes will copy their contents to their private mapping
// in `child_mapping_methods`. See `MapBootImageMethods`.
- uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+ uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
uint8_t* page_end =
- AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+ AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
if (page_end > page_start) {
uint64_t capacity = page_end - page_start;
memcpy(zygote_mapping_methods_.Begin() + offset, page_start, capacity);
@@ -671,9 +671,9 @@ void Jit::NotifyZygoteCompilationDone() {
// within a page range. For methods that falls above or below the range,
// the child processes will copy their contents to their private mapping
// in `child_mapping_methods`. See `MapBootImageMethods`.
- uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+ uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
uint8_t* page_end =
- AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+ AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
if (page_end > page_start) {
uint64_t capacity = page_end - page_start;
if (memcmp(child_mapping_methods.Begin() + offset, page_start, capacity) != 0) {
@@ -699,9 +699,9 @@ void Jit::NotifyZygoteCompilationDone() {
// within a page range. For methods that falls above or below the range,
// the child processes will copy their contents to their private mapping
// in `child_mapping_methods`. See `MapBootImageMethods`.
- uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+ uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
uint8_t* page_end =
- AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+ AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
if (page_end > page_start) {
uint64_t capacity = page_end - page_start;
if (mremap(child_mapping_methods.Begin() + offset,
@@ -850,8 +850,8 @@ class JitDoneCompilingProfileTask final : public SelfDeletingTask {
// Madvise DONTNEED dex files now that we're done compiling methods.
for (const DexFile* dex_file : dex_files_) {
if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
- int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), kPageSize)),
- RoundUp(dex_file->Size(), kPageSize),
+ int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), gPageSize)),
+ RoundUp(dex_file->Size(), gPageSize),
MADV_DONTNEED);
if (result == -1) {
PLOG(WARNING) << "Madvise failed";
@@ -1102,9 +1102,9 @@ void Jit::MapBootImageMethods() {
for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
const ImageHeader& header = space->GetImageHeader();
const ImageSection& section = header.GetMethodsSection();
- uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+ uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
uint8_t* page_end =
- AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+ AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
if (page_end <= page_start) {
// Section doesn't contain one aligned entire page.
continue;
@@ -1223,9 +1223,9 @@ void Jit::CreateThreadPool() {
const ImageHeader& header = space->GetImageHeader();
const ImageSection& section = header.GetMethodsSection();
// Mappings need to be at the page level.
- uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+ uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), gPageSize);
uint8_t* page_end =
- AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+ AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), gPageSize);
if (page_end > page_start) {
total_capacity += (page_end - page_start);
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 96db8aeba8..88b54d5a5b 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -188,7 +188,7 @@ class JitCodeCache {
// collection. It should be at least two pages, however, as the storage is split
// into data and code sections with sizes that should be aligned to page size each
// as that's the unit mspaces use. See also: JitMemoryRegion::Initialize.
- return std::max(kIsDebugBuild ? 8 * KB : 64 * KB, 2 * kPageSize);
+ return std::max(kIsDebugBuild ? 8 * KB : 64 * KB, 2 * gPageSize);
}
// Reserved capacity of the JIT code cache.
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 961ecbb9be..911b7d7aca 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -52,8 +52,8 @@ bool JitMemoryRegion::Initialize(size_t initial_capacity,
CHECK_GE(max_capacity, initial_capacity);
CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
// Align both capacities to page size, as that's the unit mspaces use.
- initial_capacity_ = RoundDown(initial_capacity, 2 * kPageSize);
- max_capacity_ = RoundDown(max_capacity, 2 * kPageSize);
+ initial_capacity_ = RoundDown(initial_capacity, 2 * gPageSize);
+ max_capacity_ = RoundDown(max_capacity, 2 * gPageSize);
current_capacity_ = initial_capacity,
data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
exec_end_ = initial_capacity - data_end_;
@@ -276,7 +276,7 @@ bool JitMemoryRegion::Initialize(size_t initial_capacity,
// Allow mspace to use the full data capacity.
// It will still only use as litle memory as possible and ask for MoreCore as needed.
- CHECK(IsAlignedParam(data_capacity, kPageSize));
+ CHECK(IsAlignedParam(data_capacity, gPageSize));
mspace_set_footprint_limit(data_mspace_, data_capacity);
// Initialize the code heap.
@@ -304,7 +304,7 @@ bool JitMemoryRegion::Initialize(size_t initial_capacity,
void JitMemoryRegion::SetFootprintLimit(size_t new_footprint) {
size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
- DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
+ DCHECK(IsAlignedParam(data_space_footprint, gPageSize));
DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
if (HasCodeMapping()) {
ScopedCodeCacheWrite scc(*this);
diff --git a/runtime/jit/jit_memory_region_test.cc b/runtime/jit/jit_memory_region_test.cc
index a77ea8128e..2f8ae2ca0f 100644
--- a/runtime/jit/jit_memory_region_test.cc
+++ b/runtime/jit/jit_memory_region_test.cc
@@ -58,13 +58,13 @@ class TestZygoteMemory : public testing::Test {
// Zygote JIT memory only works on kernels that don't segfault on flush.
TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
std::string error_msg;
- size_t size = kPageSize;
+ size_t size = gPageSize;
android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
CHECK_NE(fd.get(), -1);
// Create a writable mapping.
int32_t* addr = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
CHECK(addr != nullptr);
CHECK_NE(addr, MAP_FAILED);
@@ -82,7 +82,7 @@ class TestZygoteMemory : public testing::Test {
// Test that we cannot create another writable mapping.
int32_t* addr2 = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
CHECK_EQ(addr2, MAP_FAILED);
// With the existing mapping, we can toggle read/write.
@@ -92,14 +92,14 @@ class TestZygoteMemory : public testing::Test {
// Test mremap with old_size = 0. From the man pages:
// If the value of old_size is zero, and old_address refers to a shareable mapping
// (see mmap(2) MAP_SHARED), then mremap() will create a new mapping of the same pages.
- addr2 = reinterpret_cast<int32_t*>(mremap(addr, 0, kPageSize, MREMAP_MAYMOVE));
+ addr2 = reinterpret_cast<int32_t*>(mremap(addr, 0, gPageSize, MREMAP_MAYMOVE));
CHECK_NE(addr2, MAP_FAILED);
// Test that we can write into the remapped mapping.
addr2[0] = 3;
CHECK_EQ(addr2[0], 3);
- addr2 = reinterpret_cast<int32_t*>(mremap(addr, kPageSize, 2 * kPageSize, MREMAP_MAYMOVE));
+ addr2 = reinterpret_cast<int32_t*>(mremap(addr, gPageSize, 2 * gPageSize, MREMAP_MAYMOVE));
CHECK_NE(addr2, MAP_FAILED);
// Test that we can write into the remapped mapping.
@@ -111,7 +111,7 @@ class TestZygoteMemory : public testing::Test {
// Zygote JIT memory only works on kernels that don't segfault on flush.
TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
std::string error_msg;
- size_t size = kPageSize;
+ size_t size = gPageSize;
int32_t* addr = nullptr;
int32_t* addr2 = nullptr;
{
@@ -120,7 +120,7 @@ class TestZygoteMemory : public testing::Test {
// Create a writable mapping.
addr = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
CHECK(addr != nullptr);
CHECK_NE(addr, MAP_FAILED);
@@ -130,7 +130,7 @@ class TestZygoteMemory : public testing::Test {
// Create a read-only mapping.
addr2 = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
CHECK(addr2 != nullptr);
// Protect the memory.
@@ -144,7 +144,7 @@ class TestZygoteMemory : public testing::Test {
android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
CHECK_NE(fd2.get(), -1);
std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
// Values used for the tests below.
const int32_t parent_value = 66;
@@ -163,7 +163,7 @@ class TestZygoteMemory : public testing::Test {
CHECK_EQ(addr2[0], child_value);
// Unmap the writable mappping.
- munmap(addr, kPageSize);
+ munmap(addr, gPageSize);
CHECK_EQ(addr2[0], child_value);
@@ -198,9 +198,9 @@ class TestZygoteMemory : public testing::Test {
CHECK_EQ(WEXITSTATUS(status), kReturnFromFault);
CHECK_EQ(addr[0], parent_value);
CHECK_EQ(addr2[0], parent_value);
- munmap(addr, kPageSize);
- munmap(addr2, kPageSize);
- munmap(shared, kPageSize);
+ munmap(addr, gPageSize);
+ munmap(addr2, gPageSize);
+ munmap(shared, gPageSize);
}
}
@@ -208,7 +208,7 @@ class TestZygoteMemory : public testing::Test {
// Zygote JIT memory only works on kernels that don't segfault on flush.
TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
std::string error_msg;
- size_t size = kPageSize;
+ size_t size = gPageSize;
int32_t* addr = nullptr;
int32_t* addr2 = nullptr;
{
@@ -217,10 +217,10 @@ class TestZygoteMemory : public testing::Test {
// Create a writable mapping.
addr = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
CHECK(addr != nullptr);
CHECK_NE(addr, MAP_FAILED);
- CHECK_EQ(madvise(addr, kPageSize, MADV_DONTFORK), 0);
+ CHECK_EQ(madvise(addr, gPageSize, MADV_DONTFORK), 0);
// Test that we can write into the mapping.
addr[0] = 42;
@@ -228,7 +228,7 @@ class TestZygoteMemory : public testing::Test {
// Create a read-only mapping.
addr2 = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
CHECK(addr2 != nullptr);
// Protect the memory.
@@ -242,7 +242,7 @@ class TestZygoteMemory : public testing::Test {
android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
CHECK_NE(fd2.get(), -1);
std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
// Values used for the tests below.
const int32_t parent_value = 66;
@@ -288,9 +288,9 @@ class TestZygoteMemory : public testing::Test {
CHECK_EQ(addr[0], parent_value);
CHECK_EQ(addr2[0], parent_value);
- munmap(addr, kPageSize);
- munmap(addr2, kPageSize);
- munmap(shared, kPageSize);
+ munmap(addr, gPageSize);
+ munmap(addr2, gPageSize);
+ munmap(shared, gPageSize);
}
}
@@ -307,14 +307,14 @@ class TestZygoteMemory : public testing::Test {
return;
}
std::string error_msg;
- size_t size = kPageSize;
+ size_t size = gPageSize;
int32_t* addr = nullptr;
android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
CHECK_NE(fd.get(), -1);
// Create a writable mapping.
addr = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
CHECK(addr != nullptr);
CHECK_NE(addr, MAP_FAILED);
@@ -326,7 +326,7 @@ class TestZygoteMemory : public testing::Test {
android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
CHECK_NE(fd2.get(), -1);
std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
- mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
// Protect the memory.
CHECK(JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg));
@@ -342,7 +342,7 @@ class TestZygoteMemory : public testing::Test {
shared[0] = 0;
pid_t pid = fork();
if (pid == 0) {
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
addr);
addr[0] = child_value;
exit(0);
@@ -361,18 +361,18 @@ class TestZygoteMemory : public testing::Test {
if (pid == 0) {
// Map it private with write access. MAP_FIXED will replace the existing
// mapping.
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
addr);
addr[0] = child_value;
CHECK_EQ(addr[0], child_value);
// Check that mapping shared with write access fails.
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd.get(), 0),
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd.get(), 0),
MAP_FAILED);
CHECK_EQ(errno, EPERM);
// Map shared with read access.
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
CHECK_NE(addr[0], child_value);
// Wait for the parent to notify.
@@ -385,13 +385,13 @@ class TestZygoteMemory : public testing::Test {
shared[0] = 2;
// Map it private again.
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
addr);
addr[0] = child_value + 1;
CHECK_EQ(addr[0], child_value + 1);
// And map it back shared.
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
while (shared[0] != 3) {
sched_yield();
}
@@ -425,7 +425,7 @@ class TestZygoteMemory : public testing::Test {
addr[0] = starting_value;
pid = fork();
if (pid == 0) {
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
addr);
CHECK_EQ(addr[0], starting_value);
addr[0] = child_value;
@@ -442,7 +442,7 @@ class TestZygoteMemory : public testing::Test {
CHECK_EQ(addr[0], child_value);
// Test the buffer contains the parent data after a new mmap.
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
addr);
CHECK_EQ(addr[0], parent_value);
exit(0);
@@ -467,7 +467,7 @@ class TestZygoteMemory : public testing::Test {
addr[0] = starting_value;
pid = fork();
if (pid == 0) {
- CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+ CHECK_EQ(mmap(addr, gPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
addr);
CHECK_EQ(addr[0], starting_value);
// Notify the parent for a new update of the buffer.
@@ -489,8 +489,8 @@ class TestZygoteMemory : public testing::Test {
CHECK(WIFEXITED(status)) << strerror(errno);
CHECK_EQ(addr[0], parent_value);
}
- munmap(addr, kPageSize);
- munmap(shared, kPageSize);
+ munmap(addr, gPageSize);
+ munmap(shared, gPageSize);
}
// Test that a readable mapping created befire sealing future writes, can be
@@ -499,7 +499,7 @@ class TestZygoteMemory : public testing::Test {
// Zygote JIT memory only works on kernels that don't segfault on flush.
TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
std::string error_msg;
- size_t size = kPageSize;
+ size_t size = gPageSize;
int32_t* addr = nullptr;
{
android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
@@ -507,7 +507,7 @@ class TestZygoteMemory : public testing::Test {
// Create a shared readable mapping.
addr = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
CHECK(addr != nullptr);
CHECK_NE(addr, MAP_FAILED);
@@ -517,7 +517,7 @@ class TestZygoteMemory : public testing::Test {
}
// At this point, the fd has been dropped, but the memory mappings are still
// there.
- int res = mprotect(addr, kPageSize, PROT_WRITE);
+ int res = mprotect(addr, gPageSize, PROT_WRITE);
CHECK_EQ(res, 0);
}
@@ -526,7 +526,7 @@ class TestZygoteMemory : public testing::Test {
// Zygote JIT memory only works on kernels that don't segfault on flush.
TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
std::string error_msg;
- size_t size = kPageSize;
+ size_t size = gPageSize;
int32_t* addr = nullptr;
{
android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
@@ -538,13 +538,13 @@ class TestZygoteMemory : public testing::Test {
// Create a shared readable mapping.
addr = reinterpret_cast<int32_t*>(
- mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
+ mmap(nullptr, gPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
CHECK(addr != nullptr);
CHECK_NE(addr, MAP_FAILED);
}
// At this point, the fd has been dropped, but the memory mappings are still
// there.
- int res = mprotect(addr, kPageSize, PROT_WRITE);
+ int res = mprotect(addr, gPageSize, PROT_WRITE);
CHECK_EQ(res, -1);
CHECK_EQ(errno, EACCES);
}
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 3d7f7c4b03..7f65925910 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -1555,10 +1555,10 @@ TEST_F(JniInternalTest, NewStringUTF_Validation) {
// For the following tests, allocate two pages, one R/W and the next inaccessible.
std::string error_msg;
MemMap head_map = MemMap::MapAnonymous(
- "head", 2 * kPageSize, PROT_READ | PROT_WRITE, /*low_4gb=*/ false, &error_msg);
+ "head", 2 * gPageSize, PROT_READ | PROT_WRITE, /*low_4gb=*/ false, &error_msg);
ASSERT_TRUE(head_map.IsValid()) << error_msg;
MemMap tail_map = head_map.RemapAtEnd(
- head_map.Begin() + kPageSize, "tail", PROT_NONE, &error_msg);
+ head_map.Begin() + gPageSize, "tail", PROT_NONE, &error_msg);
ASSERT_TRUE(tail_map.IsValid()) << error_msg;
char* utf_src = reinterpret_cast<char*>(head_map.Begin());
@@ -1572,28 +1572,28 @@ TEST_F(JniInternalTest, NewStringUTF_Validation) {
const JNINativeInterface* base_env = down_cast<JNIEnvExt*>(env_)->GetUncheckedFunctions();
// Start with a simple ASCII string consisting of 4095 characters 'x'.
- memset(utf_src, 'x', kPageSize - 1u);
- utf_src[kPageSize - 1u] = 0u;
+ memset(utf_src, 'x', gPageSize - 1u);
+ utf_src[gPageSize - 1u] = 0u;
jstring s = base_env->NewStringUTF(env_, utf_src);
- ASSERT_EQ(mirror::String::GetFlaggedCount(kPageSize - 1u, /* compressible= */ true),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(gPageSize - 1u, /* compressible= */ true),
env_->GetIntField(s, count_fid));
const char* chars = env_->GetStringUTFChars(s, nullptr);
- for (size_t pos = 0; pos != kPageSize - 1u; ++pos) {
+ for (size_t pos = 0; pos != gPageSize - 1u; ++pos) {
ASSERT_EQ('x', chars[pos]) << pos;
}
env_->ReleaseStringUTFChars(s, chars);
// Replace the last character with invalid character that requires continuation.
for (char invalid : { '\xc0', '\xe0', '\xf0' }) {
- utf_src[kPageSize - 2u] = invalid;
+ utf_src[gPageSize - 2u] = invalid;
s = base_env->NewStringUTF(env_, utf_src);
- ASSERT_EQ(mirror::String::GetFlaggedCount(kPageSize - 1u, /* compressible= */ true),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(gPageSize - 1u, /* compressible= */ true),
env_->GetIntField(s, count_fid));
chars = env_->GetStringUTFChars(s, nullptr);
- for (size_t pos = 0; pos != kPageSize - 2u; ++pos) {
+ for (size_t pos = 0; pos != gPageSize - 2u; ++pos) {
ASSERT_EQ('x', chars[pos]) << pos;
}
- EXPECT_EQ('?', chars[kPageSize - 2u]);
+ EXPECT_EQ('?', chars[gPageSize - 2u]);
env_->ReleaseStringUTFChars(s, chars);
}
@@ -1601,14 +1601,14 @@ TEST_F(JniInternalTest, NewStringUTF_Validation) {
utf_src[0] = '\xc2';
utf_src[1] = '\x80';
s = base_env->NewStringUTF(env_, utf_src);
- ASSERT_EQ(mirror::String::GetFlaggedCount(kPageSize - 2u, /* compressible= */ false),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(gPageSize - 2u, /* compressible= */ false),
env_->GetIntField(s, count_fid));
const jchar* jchars = env_->GetStringChars(s, nullptr);
EXPECT_EQ(jchars[0], 0x80u);
- for (size_t pos = 1; pos != kPageSize - 3u; ++pos) {
+ for (size_t pos = 1; pos != gPageSize - 3u; ++pos) {
ASSERT_EQ('x', jchars[pos]) << pos;
}
- EXPECT_EQ('?', jchars[kPageSize - 3u]);
+ EXPECT_EQ('?', jchars[gPageSize - 3u]);
env_->ReleaseStringChars(s, jchars);
// Replace the leading two-byte sequence with a two-byte sequence that decodes as ASCII (0x40).
@@ -1616,14 +1616,14 @@ TEST_F(JniInternalTest, NewStringUTF_Validation) {
utf_src[1] = '\x80';
s = base_env->NewStringUTF(env_, utf_src);
// Note: All invalid characters are replaced by ASCII replacement character.
- ASSERT_EQ(mirror::String::GetFlaggedCount(kPageSize - 2u, /* compressible= */ true),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(gPageSize - 2u, /* compressible= */ true),
env_->GetIntField(s, count_fid));
jchars = env_->GetStringChars(s, nullptr);
EXPECT_EQ('\x40', jchars[0]);
- for (size_t pos = 1; pos != kPageSize - 3u; ++pos) {
+ for (size_t pos = 1; pos != gPageSize - 3u; ++pos) {
ASSERT_EQ('x', jchars[pos]) << pos;
}
- EXPECT_EQ('?', jchars[kPageSize - 3u]);
+ EXPECT_EQ('?', jchars[gPageSize - 3u]);
env_->ReleaseStringChars(s, jchars);
// Replace the leading three bytes with a three-byte sequence that decodes as ASCII (0x40).
@@ -1632,44 +1632,44 @@ TEST_F(JniInternalTest, NewStringUTF_Validation) {
utf_src[2] = '\x80';
s = base_env->NewStringUTF(env_, utf_src);
// Note: All invalid characters are replaced by ASCII replacement character.
- ASSERT_EQ(mirror::String::GetFlaggedCount(kPageSize - 3u, /* compressible= */ true),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(gPageSize - 3u, /* compressible= */ true),
env_->GetIntField(s, count_fid));
jchars = env_->GetStringChars(s, nullptr);
EXPECT_EQ('\x40', jchars[0]);
- for (size_t pos = 1; pos != kPageSize - 4u; ++pos) {
+ for (size_t pos = 1; pos != gPageSize - 4u; ++pos) {
ASSERT_EQ('x', jchars[pos]) << pos;
}
- EXPECT_EQ('?', jchars[kPageSize - 4u]);
+ EXPECT_EQ('?', jchars[gPageSize - 4u]);
env_->ReleaseStringChars(s, jchars);
// Replace the last two characters with a valid two-byte sequence that decodes as 0.
- utf_src[kPageSize - 3u] = '\xc0';
- utf_src[kPageSize - 2u] = '\x80';
+ utf_src[gPageSize - 3u] = '\xc0';
+ utf_src[gPageSize - 2u] = '\x80';
s = base_env->NewStringUTF(env_, utf_src);
- ASSERT_EQ(mirror::String::GetFlaggedCount(kPageSize - 4u, /* compressible= */ false),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(gPageSize - 4u, /* compressible= */ false),
env_->GetIntField(s, count_fid));
jchars = env_->GetStringChars(s, nullptr);
EXPECT_EQ('\x40', jchars[0]);
- for (size_t pos = 1; pos != kPageSize - 5u; ++pos) {
+ for (size_t pos = 1; pos != gPageSize - 5u; ++pos) {
ASSERT_EQ('x', jchars[pos]) << pos;
}
- EXPECT_EQ('\0', jchars[kPageSize - 5u]);
+ EXPECT_EQ('\0', jchars[gPageSize - 5u]);
env_->ReleaseStringChars(s, jchars);
// Replace the last three characters with a three-byte sequence that decodes as 0.
// This is an incorrect encoding but `NewStringUTF()` is permissive.
- utf_src[kPageSize - 4u] = '\xe0';
- utf_src[kPageSize - 3u] = '\x80';
- utf_src[kPageSize - 2u] = '\x80';
+ utf_src[gPageSize - 4u] = '\xe0';
+ utf_src[gPageSize - 3u] = '\x80';
+ utf_src[gPageSize - 2u] = '\x80';
s = base_env->NewStringUTF(env_, utf_src);
- ASSERT_EQ(mirror::String::GetFlaggedCount(kPageSize - 5u, /* compressible= */ false),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(gPageSize - 5u, /* compressible= */ false),
env_->GetIntField(s, count_fid));
jchars = env_->GetStringChars(s, nullptr);
EXPECT_EQ('\x40', jchars[0]);
- for (size_t pos = 1; pos != kPageSize - 6u; ++pos) {
+ for (size_t pos = 1; pos != gPageSize - 6u; ++pos) {
ASSERT_EQ('x', jchars[pos]) << pos;
}
- EXPECT_EQ('\0', jchars[kPageSize - 6u]);
+ EXPECT_EQ('\0', jchars[gPageSize - 6u]);
env_->ReleaseStringChars(s, jchars);
}
diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc
index f701c71347..73d37a2502 100644
--- a/runtime/jni/local_reference_table.cc
+++ b/runtime/jni/local_reference_table.cc
@@ -41,7 +41,7 @@ static constexpr bool kDumpStackOnNonLocalReference = false;
static constexpr bool kDebugLRT = false;
// Number of free lists in the allocator.
-static const size_t kNumLrtSlots = WhichPowerOf2(kPageSize / kInitialLrtBytes);
+static const size_t gNumLrtSlots = WhichPowerOf2(gPageSize / kInitialLrtBytes);
// Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
@@ -53,17 +53,17 @@ static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
}
SmallLrtAllocator::SmallLrtAllocator()
- : free_lists_(kNumLrtSlots, nullptr),
+ : free_lists_(gNumLrtSlots, nullptr),
shared_lrt_maps_(),
lock_("Small LRT allocator lock", LockLevel::kGenericBottomLock) {
}
inline size_t SmallLrtAllocator::GetIndex(size_t size) {
DCHECK_GE(size, kSmallLrtEntries);
- DCHECK_LT(size, kPageSize / sizeof(LrtEntry));
+ DCHECK_LT(size, gPageSize / sizeof(LrtEntry));
DCHECK(IsPowerOfTwo(size));
size_t index = WhichPowerOf2(size / kSmallLrtEntries);
- DCHECK_LT(index, kNumLrtSlots);
+ DCHECK_LT(index, gNumLrtSlots);
return index;
}
@@ -71,17 +71,17 @@ LrtEntry* SmallLrtAllocator::Allocate(size_t size, std::string* error_msg) {
size_t index = GetIndex(size);
MutexLock lock(Thread::Current(), lock_);
size_t fill_from = index;
- while (fill_from != kNumLrtSlots && free_lists_[fill_from] == nullptr) {
+ while (fill_from != gNumLrtSlots && free_lists_[fill_from] == nullptr) {
++fill_from;
}
void* result = nullptr;
- if (fill_from != kNumLrtSlots) {
+ if (fill_from != gNumLrtSlots) {
// We found a slot with enough memory.
result = free_lists_[fill_from];
free_lists_[fill_from] = *reinterpret_cast<void**>(result);
} else {
// We need to allocate a new page and split it into smaller pieces.
- MemMap map = NewLRTMap(kPageSize, error_msg);
+ MemMap map = NewLRTMap(gPageSize, error_msg);
if (!map.IsValid()) {
return nullptr;
}
@@ -104,13 +104,13 @@ LrtEntry* SmallLrtAllocator::Allocate(size_t size, std::string* error_msg) {
void SmallLrtAllocator::Deallocate(LrtEntry* unneeded, size_t size) {
size_t index = GetIndex(size);
MutexLock lock(Thread::Current(), lock_);
- while (index < kNumLrtSlots) {
+ while (index < gNumLrtSlots) {
// Check if we can merge this free block with another block with the same size.
void** other = reinterpret_cast<void**>(
reinterpret_cast<uintptr_t>(unneeded) ^ (kInitialLrtBytes << index));
void** before = &free_lists_[index];
- if (index + 1u == kNumLrtSlots && *before == other && *other == nullptr) {
- // Do not unmap the page if we do not have other free blocks with index `kNumLrtSlots - 1`.
+ if (index + 1u == gNumLrtSlots && *before == other && *other == nullptr) {
+ // Do not unmap the page if we do not have other free blocks with index `gNumLrtSlots - 1`.
// (Keep at least one free block to avoid a situation where creating and destroying a single
// thread with no local references would map and unmap a page in the `SmallLrtAllocator`.)
break;
@@ -128,9 +128,9 @@ void SmallLrtAllocator::Deallocate(LrtEntry* unneeded, size_t size) {
unneeded = reinterpret_cast<LrtEntry*>(
reinterpret_cast<uintptr_t>(unneeded) & reinterpret_cast<uintptr_t>(other));
}
- if (index == kNumLrtSlots) {
+ if (index == gNumLrtSlots) {
// Free the entire page.
- DCHECK(free_lists_[kNumLrtSlots - 1u] != nullptr);
+ DCHECK(free_lists_[gNumLrtSlots - 1u] != nullptr);
auto match = [=](MemMap& map) { return unneeded == reinterpret_cast<LrtEntry*>(map.Begin()); };
auto it = std::find_if(shared_lrt_maps_.begin(), shared_lrt_maps_.end(), match);
DCHECK(it != shared_lrt_maps_.end());
@@ -627,12 +627,12 @@ void LocalReferenceTable::Trim() {
if (start_index != 0u) {
++mem_map_index;
LrtEntry* table = tables_[table_index];
- uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table[start_index]), kPageSize);
+ uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table[start_index]), gPageSize);
uint8_t* release_end = reinterpret_cast<uint8_t*>(&table[table_size]);
DCHECK_GE(reinterpret_cast<uintptr_t>(release_end),
reinterpret_cast<uintptr_t>(release_start));
- DCHECK_ALIGNED_PARAM(release_end, kPageSize);
- DCHECK_ALIGNED_PARAM(release_end - release_start, kPageSize);
+ DCHECK_ALIGNED_PARAM(release_end, gPageSize);
+ DCHECK_ALIGNED_PARAM(release_end - release_start, gPageSize);
if (release_start != release_end) {
madvise(release_start, release_end - release_start, MADV_DONTNEED);
}
diff --git a/runtime/jni/local_reference_table.h b/runtime/jni/local_reference_table.h
index 8dce271ff2..97993da061 100644
--- a/runtime/jni/local_reference_table.h
+++ b/runtime/jni/local_reference_table.h
@@ -400,7 +400,7 @@ class LocalReferenceTable {
}
static size_t MaxSmallTables() {
- return NumTablesForSize(kPageSize / sizeof(LrtEntry));
+ return NumTablesForSize(gPageSize / sizeof(LrtEntry));
}
LrtEntry* GetEntry(size_t entry_index) const {
diff --git a/runtime/jni/local_reference_table_test.cc b/runtime/jni/local_reference_table_test.cc
index abf87158af..5839c60d0a 100644
--- a/runtime/jni/local_reference_table_test.cc
+++ b/runtime/jni/local_reference_table_test.cc
@@ -541,14 +541,14 @@ TEST_F(LocalReferenceTableTest, BasicResizeTest) {
BasicResizeTest(/*check_jni=*/ false, 20u);
BasicResizeTest(/*check_jni=*/ false, /*max_count=*/ kSmallLrtEntries);
BasicResizeTest(/*check_jni=*/ false, /*max_count=*/ 2u * kSmallLrtEntries);
- BasicResizeTest(/*check_jni=*/ false, /*max_count=*/ kPageSize / sizeof(LrtEntry));
+ BasicResizeTest(/*check_jni=*/ false, /*max_count=*/ gPageSize / sizeof(LrtEntry));
}
TEST_F(LocalReferenceTableTest, BasicResizeTestCheckJNI) {
BasicResizeTest(/*check_jni=*/ true, 20u);
BasicResizeTest(/*check_jni=*/ true, /*max_count=*/ kSmallLrtEntries);
BasicResizeTest(/*check_jni=*/ true, /*max_count=*/ 2u * kSmallLrtEntries);
- BasicResizeTest(/*check_jni=*/ true, /*max_count=*/ kPageSize / sizeof(LrtEntry));
+ BasicResizeTest(/*check_jni=*/ true, /*max_count=*/ gPageSize / sizeof(LrtEntry));
}
void LocalReferenceTableTest::TestAddRemove(bool check_jni, size_t max_count, size_t fill_count) {
@@ -830,7 +830,7 @@ TEST_F(LocalReferenceTableTest, RegressionTestB276864369) {
// Add refs to fill all small tables and one bigger table.
const LRTSegmentState cookie0 = kLRTFirstSegment;
- const size_t refs_per_page = kPageSize / sizeof(LrtEntry);
+ const size_t refs_per_page = gPageSize / sizeof(LrtEntry);
std::vector<IndirectRef> refs;
for (size_t i = 0; i != 2 * refs_per_page; ++i) {
refs.push_back(lrt.Add(cookie0, c, &error_msg));
@@ -854,7 +854,7 @@ TEST_F(LocalReferenceTableTest, Trim) {
// Add refs to fill all small tables.
const LRTSegmentState cookie0 = kLRTFirstSegment;
- const size_t refs_per_page = kPageSize / sizeof(LrtEntry);
+ const size_t refs_per_page = gPageSize / sizeof(LrtEntry);
std::vector<IndirectRef> refs0;
for (size_t i = 0; i != refs_per_page; ++i) {
refs0.push_back(lrt.Add(cookie0, c, &error_msg));
@@ -978,7 +978,7 @@ TEST_F(LocalReferenceTableTest, PruneBeforeTrim) {
// Add refs to fill all small tables and one bigger table.
const LRTSegmentState cookie0 = kLRTFirstSegment;
- const size_t refs_per_page = kPageSize / sizeof(LrtEntry);
+ const size_t refs_per_page = gPageSize / sizeof(LrtEntry);
std::vector<IndirectRef> refs;
for (size_t i = 0; i != 2 * refs_per_page; ++i) {
refs.push_back(lrt.Add(cookie0, c, &error_msg));
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index c773f65b4b..6addf25b99 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -626,7 +626,7 @@ bool OatFileBase::Setup(int zip_fd,
// however not aligned to kElfSegmentAlignment. While technically this isn't
// correct as per requirement in the ELF header, it has to be supported for
// now. See also the comment at ImageHeader::RelocateImageReferences.
- if (!IsAlignedParam(bss_begin_, kPageSize) ||
+ if (!IsAlignedParam(bss_begin_, gPageSize) ||
!IsAlignedParam(bss_methods_, static_cast<size_t>(pointer_size)) ||
!IsAlignedParam(bss_roots_, static_cast<size_t>(pointer_size)) ||
!IsAligned<alignof(GcRoot<mirror::Object>)>(bss_end_)) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 06bf34234f..e3d1fdf1c4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1495,7 +1495,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
using Opt = RuntimeArgumentMap;
Opt runtime_options(std::move(runtime_options_in));
ScopedTrace trace(__FUNCTION__);
- CHECK_EQ(static_cast<size_t>(sysconf(_SC_PAGE_SIZE)), kPageSize);
+ CHECK_EQ(static_cast<size_t>(sysconf(_SC_PAGE_SIZE)), gPageSize);
// Reload all the flags value (from system properties and device configs).
ReloadAllFlags(__FUNCTION__);
@@ -1526,10 +1526,10 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// leading to logspam.
{
const uintptr_t sentinel_addr =
- RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
+ RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), gPageSize);
protected_fault_page_ = MemMap::MapAnonymous("Sentinel fault page",
reinterpret_cast<uint8_t*>(sentinel_addr),
- kPageSize,
+ gPageSize,
PROT_NONE,
/*low_4gb=*/ true,
/*reuse=*/ false,
@@ -3517,8 +3517,8 @@ void Runtime::MadviseFileForRange(size_t madvise_size_limit_bytes,
const uint8_t* map_begin,
const uint8_t* map_end,
const std::string& file_name) {
- map_begin = AlignDown(map_begin, kPageSize);
- map_size_bytes = RoundUp(map_size_bytes, kPageSize);
+ map_begin = AlignDown(map_begin, gPageSize);
+ map_size_bytes = RoundUp(map_size_bytes, gPageSize);
#ifdef ART_TARGET_ANDROID
// Short-circuit the madvise optimization for background processes. This
// avoids IO and memory contention with foreground processes, particularly
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 2ba73cf47a..e08d919c34 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -199,7 +199,7 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava)
TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
std::string error_msg;
MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
- 128 * kPageSize, // Just some small stack.
+ 128 * gPageSize, // Just some small stack.
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
diff --git a/runtime/runtime_globals.h b/runtime/runtime_globals.h
index 6daeba36f7..d4371d62af 100644
--- a/runtime/runtime_globals.h
+++ b/runtime/runtime_globals.h
@@ -27,7 +27,7 @@ static constexpr size_t kVRegSize = 4;
// Returns whether the given memory offset can be used for generating
// an implicit null check.
static inline bool CanDoImplicitNullCheckOn(uintptr_t offset) {
- return offset < kPageSize;
+ return offset < gPageSize;
}
// Required object alignment
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 1e34986814..d4019f1acc 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -153,7 +153,7 @@ static constexpr size_t kSuspendTimeDuringFlip = 5'000;
// of the stack (lowest memory). The higher portion of the memory
// is protected against reads and the lower is available for use while
// throwing the StackOverflow exception.
-static const size_t kStackOverflowProtectedSize = kMemoryToolStackGuardSizeScale * kPageSize;
+static const size_t gStackOverflowProtectedSize = kMemoryToolStackGuardSizeScale * gPageSize;
static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
@@ -734,7 +734,7 @@ static size_t FixStackSize(size_t stack_size) {
}
// Some systems require the stack size to be a multiple of the system page size, so round up.
- stack_size = RoundUp(stack_size, kPageSize);
+ stack_size = RoundUp(stack_size, gPageSize);
return stack_size;
}
@@ -743,26 +743,26 @@ static size_t FixStackSize(size_t stack_size) {
NO_INLINE
static uint8_t* FindStackTop() {
return reinterpret_cast<uint8_t*>(
- AlignDown(__builtin_frame_address(0), kPageSize));
+ AlignDown(__builtin_frame_address(0), gPageSize));
}
// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
// overflow is detected. It is located right below the stack_begin_.
ATTRIBUTE_NO_SANITIZE_ADDRESS
void Thread::InstallImplicitProtection() {
- uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+ uint8_t* pregion = tlsPtr_.stack_begin - gStackOverflowProtectedSize;
// Page containing current top of stack.
uint8_t* stack_top = FindStackTop();
// Try to directly protect the stack.
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
- static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
+ static_cast<void*>(pregion + gStackOverflowProtectedSize - 1);
if (ProtectStack(/* fatal_on_error= */ false)) {
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
size_t unwanted_size =
- reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - kPageSize;
+ reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - gPageSize;
madvise(pregion, unwanted_size, MADV_DONTNEED);
return;
}
@@ -812,12 +812,12 @@ void Thread::InstallImplicitProtection() {
#endif
// Keep space uninitialized as it can overflow the stack otherwise (should Clang actually
// auto-initialize this local variable).
- volatile char space[kPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
+ volatile char space[gPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
[[maybe_unused]] char sink = space[zero];
// Remove tag from the pointer. Nop in non-hwasan builds.
uintptr_t addr = reinterpret_cast<uintptr_t>(
__hwasan_tag_pointer != nullptr ? __hwasan_tag_pointer(space, 0) : space);
- if (addr >= target + kPageSize) {
+ if (addr >= target + gPageSize) {
Touch(target);
}
zero *= 2; // Try to avoid tail recursion.
@@ -828,7 +828,7 @@ void Thread::InstallImplicitProtection() {
VLOG(threads) << "(again) installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
- static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
+ static_cast<void*>(pregion + gStackOverflowProtectedSize - 1);
// Protect the bottom of the stack to prevent read/write to it.
ProtectStack(/* fatal_on_error= */ true);
@@ -836,7 +836,7 @@ void Thread::InstallImplicitProtection() {
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
size_t unwanted_size =
- reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - kPageSize;
+ reinterpret_cast<uintptr_t>(stack_top) - reinterpret_cast<uintptr_t>(pregion) - gPageSize;
madvise(pregion, unwanted_size, MADV_DONTNEED);
}
@@ -1363,9 +1363,9 @@ bool Thread::InitStackHwm() {
//
// On systems with 4K page size, typically the minimum stack size will be 4+8+4 = 16K.
// The thread won't be able to do much with this stack: even the GC takes between 8K and 12K.
- DCHECK_ALIGNED_PARAM(kStackOverflowProtectedSize, kPageSize);
- size_t min_stack = kStackOverflowProtectedSize +
- RoundUp(GetStackOverflowReservedBytes(kRuntimeISA) + 4 * KB, kPageSize);
+ DCHECK_ALIGNED_PARAM(gStackOverflowProtectedSize, gPageSize);
+ size_t min_stack = gStackOverflowProtectedSize +
+ RoundUp(GetStackOverflowReservedBytes(kRuntimeISA) + 4 * KB, gPageSize);
if (read_stack_size <= min_stack) {
// Note, as we know the stack is small, avoid operations that could use a lot of stack.
LogHelper::LogLineLowStack(__PRETTY_FUNCTION__,
@@ -1395,9 +1395,9 @@ bool Thread::InitStackHwm() {
// to install our own region so we need to move the limits
// of the stack to make room for it.
- tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
- tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
- tlsPtr_.stack_size -= read_guard_size + kStackOverflowProtectedSize;
+ tlsPtr_.stack_begin += read_guard_size + gStackOverflowProtectedSize;
+ tlsPtr_.stack_end += read_guard_size + gStackOverflowProtectedSize;
+ tlsPtr_.stack_size -= read_guard_size + gStackOverflowProtectedSize;
InstallImplicitProtection();
}
@@ -4640,14 +4640,14 @@ std::ostream& operator<<(std::ostream& os, const Thread& thread) {
}
bool Thread::ProtectStack(bool fatal_on_error) {
- void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+ void* pregion = tlsPtr_.stack_begin - gStackOverflowProtectedSize;
VLOG(threads) << "Protecting stack at " << pregion;
- if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
+ if (mprotect(pregion, gStackOverflowProtectedSize, PROT_NONE) == -1) {
if (fatal_on_error) {
// b/249586057, LOG(FATAL) times out
LOG(ERROR) << "Unable to create protected region in stack for implicit overflow check. "
"Reason: "
- << strerror(errno) << " size: " << kStackOverflowProtectedSize;
+ << strerror(errno) << " size: " << gStackOverflowProtectedSize;
exit(1);
}
return false;
@@ -4656,9 +4656,9 @@ bool Thread::ProtectStack(bool fatal_on_error) {
}
bool Thread::UnprotectStack() {
- void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+ void* pregion = tlsPtr_.stack_begin - gStackOverflowProtectedSize;
VLOG(threads) << "Unprotecting stack at " << pregion;
- return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
+ return mprotect(pregion, gStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
}
size_t Thread::NumberOfHeldMutexes() const {
diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc
index df4511b0d4..00604a9656 100644
--- a/runtime/thread_android.cc
+++ b/runtime/thread_android.cc
@@ -39,8 +39,8 @@ void Thread::MadviseAwayAlternateSignalStack() {
// create different arbitrary alternate signal stacks and we do not want to erroneously
// `madvise()` away pages that may hold data other than the alternate signal stack.
if ((old_ss.ss_flags & SS_DISABLE) == 0 &&
- IsAlignedParam(old_ss.ss_sp, kPageSize) &&
- IsAlignedParam(old_ss.ss_size, kPageSize)) {
+ IsAlignedParam(old_ss.ss_sp, gPageSize) &&
+ IsAlignedParam(old_ss.ss_size, gPageSize)) {
CHECK_EQ(old_ss.ss_flags & SS_ONSTACK, 0);
// Note: We're testing and benchmarking ART on devices with old kernels
// which may not support `MADV_FREE`, so we do not check the result.
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 98af0cd87a..de9ccbaba5 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -55,18 +55,18 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
// a guard page, so don't do anything special on Bionic libc.
if (kUseCustomThreadPoolStack) {
// Add an inaccessible page to catch stack overflow.
- stack_size += kPageSize;
+ stack_size += gPageSize;
stack_ = MemMap::MapAnonymous(name.c_str(),
stack_size,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
&error_msg);
CHECK(stack_.IsValid()) << error_msg;
- CHECK_ALIGNED_PARAM(stack_.Begin(), kPageSize);
+ CHECK_ALIGNED_PARAM(stack_.Begin(), gPageSize);
CheckedCall(mprotect,
"mprotect bottom page of thread pool worker stack",
stack_.Begin(),
- kPageSize,
+ gPageSize,
PROT_NONE);
}
const char* reason = "new thread pool worker thread";
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
index 240827b0d8..e459a75da4 100644
--- a/test/305-other-fault-handler/fault_handler.cc
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -36,7 +36,7 @@ class TestFaultHandler final : public FaultHandler {
map_error_(),
target_map_(MemMap::MapAnonymous("test-305-mmap",
/* addr */ nullptr,
- /* byte_count */ kPageSize,
+ /* byte_count */ gPageSize,
/* prot */ PROT_NONE,
/* low_4gb */ false,
/* reuse */ false,