summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Ruben Ayrapetyan <ruben.ayrapetyan@arm.com> 2023-09-28 16:24:15 +0100
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2023-11-30 00:02:56 +0000
commit14165e156cc23a5bbe7d70d606b3b260721e48dd (patch)
treefdbd20ab22084b81fbd6d197e5de22b3401554a9
parent54417d2c7e5254f8941119f8f16476c1a45e028a (diff)
Initialize gPageSize at runtime via sysconf
This changes gPageSize to a global constant const in page size agnostic configuration, dynamically initialized with the runtime-determined page size value. This finishes adding basic support of page size agnostic ART configuration, however without yet enabling it. With page size agnosticism disabled, gPageSize etc. derived values in the global scope are still constexpr. As part of that, introduce helpers for gPageSize and the derived constants in the global scope, in a way that guarantees correct static initialization order: - GlobalConst is a helper class that acts as a global constant; - ART_PAGE_SIZE_AGNOSTIC_DECLARE etc. - helper macros for declaring the constants either as the global constants or constexpr depends on the configuration. The helpers are used for the gPageSize and derived values stored in the global scope. Test: Same as for I5430741a8494b340ed7fd2d8692c41a59ad9c530. The whole patches chain was tested as a whole. Change-Id: Id1c18004346ba5c6c94e02cdf8b0b0bb3b99af70
-rw-r--r--compiler/common_compiler_test.cc2
-rw-r--r--libartbase/base/globals.h103
-rw-r--r--libartbase/base/globals_unix.cc4
-rw-r--r--libartbase/base/mem_map.cc7
-rw-r--r--libartbase/base/mem_map_test.cc2
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc5
-rw-r--r--runtime/jni/local_reference_table.cc3
-rw-r--r--runtime/thread.cc6
8 files changed, 102 insertions, 30 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 206020fa82..eb8c21c0d1 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -57,7 +57,7 @@ class CommonCompilerTestImpl::CodeAndMetadata {
: sizeof(OatQuickMethodHeader) + vmap_table.size();
OatQuickMethodHeader method_header(vmap_table_offset);
const size_t code_alignment = GetInstructionSetCodeAlignment(instruction_set);
- DCHECK_ALIGNED_PARAM(gPageSize, code_alignment);
+ DCHECK_ALIGNED_PARAM(static_cast<size_t>(gPageSize), code_alignment);
const uint32_t code_offset = RoundUp(vmap_table.size() + sizeof(method_header), code_alignment);
const uint32_t capacity = RoundUp(code_offset + code_size, gPageSize);
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index e2323f4747..45bb0260e4 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -20,6 +20,8 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/macros.h"
+
namespace art {
static constexpr size_t KB = 1024;
@@ -34,10 +36,6 @@ static constexpr int kBitsPerIntPtrT = sizeof(intptr_t) * kBitsPerByte;
// Required stack alignment
static constexpr size_t kStackAlignment = 16;
-// System page size. We check this against sysconf(_SC_PAGE_SIZE) at runtime, but use a simple
-// compile-time constant so the compiler can generate better code.
-static constexpr size_t gPageSize = 4096;
-
// Minimum supported page size.
static constexpr size_t kMinPageSize = 4096;
@@ -57,17 +55,6 @@ static constexpr size_t kMaxPageSize = kMinPageSize;
// this is the value to be used in images files for aligning contents to page size.
static constexpr size_t kElfSegmentAlignment = kMaxPageSize;
-// Address range covered by 1 Page Middle Directory (PMD) entry in the page table
-extern const size_t gPMDSize;
-
-// Address range covered by 1 Page Upper Directory (PUD) entry in the page table
-extern const size_t gPUDSize;
-
-// Returns the ideal alignment corresponding to page-table levels for the
-// given size.
-static inline size_t BestPageTableAlignment(size_t size) {
- return size < gPUDSize ? gPMDSize : gPUDSize;
-}
// Clion, clang analyzer, etc can falsely believe that "if (kIsDebugBuild)" always
// returns the same value. By wrapping into a call to another constexpr function, we force it
// to realize that is not actually always evaluating to the same value.
@@ -143,6 +130,92 @@ static constexpr bool kHostStaticBuildEnabled = false;
static constexpr char kPhDisableCompactDex[] =
"persist.device_config.runtime_native_boot.disable_compact_dex";
+// Helper class that acts as a global constant which can be initialized with
+// a dynamically computed value while not being subject to static initialization
+// order issues via gating access to the value through a function which ensures
+// the value is initialized before being accessed.
+//
+// The Initialize function should return T type. It shouldn't have side effects
+// and should always return the same value.
+template<typename T, auto Initialize>
+struct GlobalConst {
+ operator T() const {
+ static T data = Initialize();
+ return data;
+ }
+};
+
+// Helper macros for declaring and defining page size agnostic global values
+// which are constants in page size agnostic configuration and constexpr
+// in non page size agnostic configuration.
+//
+// For the former case, this uses the GlobalConst class initializing it with given expression
+// which might be the same as for the non page size agnostic configuration (then
+// ART_PAGE_SIZE_AGNOSTIC_DECLARE is most suitable to avoid duplication) or might be different
+// (in which case ART_PAGE_SIZE_AGNOSTIC_DECLARE_ALT should be used).
+//
+// The motivation behind these helpers is mainly to provide a way to declare / define / initialize
+// the global constants protected from static initialization order issues.
+//
+// Adding a new value e.g. `const uint32_t gNewVal = function(gPageSize);` can be done,
+// for example, via:
+// - declaring it using ART_PAGE_SIZE_AGNOSTIC_DECLARE in this header;
+// - and defining it with ART_PAGE_SIZE_AGNOSTIC_DEFINE in the globals_unix.cc
+// or another suitable module.
+// The statements might look as follows:
+// ART_PAGE_SIZE_AGNOSTIC_DECLARE(uint32_t, gNewVal, function(gPageSize));
+// ART_PAGE_SIZE_AGNOSTIC_DEFINE(uint32_t, gNewVal);
+//
+// NOTE:
+// The initializer expressions shouldn't have side effects
+// and should always return the same value.
+
+#ifdef ART_PAGE_SIZE_AGNOSTIC
+// Declaration (page size agnostic version).
+#define ART_PAGE_SIZE_AGNOSTIC_DECLARE_ALT(type, name, page_size_agnostic_expr, const_expr) \
+ inline type __attribute__((visibility("default"))) \
+ name ## _Initializer(void) { \
+ return (page_size_agnostic_expr); \
+ } \
+ extern GlobalConst<type, name ## _Initializer> name
+// Definition (page size agnostic version).
+#define ART_PAGE_SIZE_AGNOSTIC_DEFINE(type, name) GlobalConst<type, name ## _Initializer> name
+#else
+// Declaration (non page size agnostic version).
+#define ART_PAGE_SIZE_AGNOSTIC_DECLARE_ALT(type, name, page_size_agnostic_expr, const_expr) \
+ static constexpr type name = (const_expr)
+// Definition (non page size agnostic version).
+#define ART_PAGE_SIZE_AGNOSTIC_DEFINE(type, name)
+#endif // ART_PAGE_SIZE_AGNOSTIC
+
+// ART_PAGE_SIZE_AGNOSTIC_DECLARE is same as ART_PAGE_SIZE_AGNOSTIC_DECLARE_ALT
+// for the case when the initializer expressions are the same.
+#define ART_PAGE_SIZE_AGNOSTIC_DECLARE(type, name, expr) \
+ ART_PAGE_SIZE_AGNOSTIC_DECLARE_ALT(type, name, expr, expr)
+
+// Declaration and definition combined.
+#define ART_PAGE_SIZE_AGNOSTIC_DECLARE_AND_DEFINE(type, name, expr) \
+ ART_PAGE_SIZE_AGNOSTIC_DECLARE(type, name, expr); \
+ ART_PAGE_SIZE_AGNOSTIC_DEFINE(type, name)
+
+// System page size. We check this against sysconf(_SC_PAGE_SIZE) at runtime,
+// but for non page size agnostic configuration we use a simple compile-time
+// constant so the compiler can generate better code.
+ART_PAGE_SIZE_AGNOSTIC_DECLARE_ALT(size_t, gPageSize, sysconf(_SC_PAGE_SIZE), 4096);
+
+// TODO: Kernels for arm and x86 in both, 32-bit and 64-bit modes use 512 entries per page-table
+// page. Find a way to confirm that in userspace.
+// Address range covered by 1 Page Middle Directory (PMD) entry in the page table
+ART_PAGE_SIZE_AGNOSTIC_DECLARE(size_t, gPMDSize, (gPageSize / sizeof(uint64_t)) * gPageSize);
+// Address range covered by 1 Page Upper Directory (PUD) entry in the page table
+ART_PAGE_SIZE_AGNOSTIC_DECLARE(size_t, gPUDSize, (gPageSize / sizeof(uint64_t)) * gPMDSize);
+
+// Returns the ideal alignment corresponding to page-table levels for the
+// given size.
+static inline size_t BestPageTableAlignment(size_t size) {
+ return size < gPUDSize ? gPMDSize : gPUDSize;
+}
+
} // namespace art
#endif // ART_LIBARTBASE_BASE_GLOBALS_H_
diff --git a/libartbase/base/globals_unix.cc b/libartbase/base/globals_unix.cc
index 9a67dc49dd..07226241db 100644
--- a/libartbase/base/globals_unix.cc
+++ b/libartbase/base/globals_unix.cc
@@ -22,6 +22,10 @@
namespace art {
+ART_PAGE_SIZE_AGNOSTIC_DEFINE(size_t, gPageSize);
+ART_PAGE_SIZE_AGNOSTIC_DEFINE(size_t, gPMDSize);
+ART_PAGE_SIZE_AGNOSTIC_DEFINE(size_t, gPUDSize);
+
#ifndef ART_STATIC_LIBARTBASE
#ifdef __APPLE__
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 9bfcea7592..e4d1faa464 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -56,13 +56,6 @@ using AllocationTrackingMultiMap =
using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
-// TODO: Kernels for arm and x86 in both, 32-bit and 64-bit modes use 512 entries per page-table
-// page. Find a way to confirm that in userspace.
-// Address range covered by 1 Page Middle Directory (PMD) entry in the page table
-const size_t gPMDSize = (gPageSize / sizeof(uint64_t)) * gPageSize;
-// Address range covered by 1 Page Upper Directory (PUD) entry in the page table
-const size_t gPUDSize = (gPageSize / sizeof(uint64_t)) * gPMDSize;
-
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index 742836d517..73a5be7d83 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -346,7 +346,7 @@ TEST_F(MemMapTest, MapAnonymousFailNullError) {
CommonInit();
// Test that we don't crash with a null error_str when mapping at an invalid location.
MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
- reinterpret_cast<uint8_t*>(gPageSize),
+ reinterpret_cast<uint8_t*>(static_cast<size_t>(gPageSize)),
0x20000,
PROT_READ | PROT_WRITE,
/*low_4gb=*/ false,
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 33a1d6c639..da0d09c3da 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -71,9 +71,8 @@ class ImmuneSpacesTest : public CommonArtTest {
// do not need to cover the image spaces though.
for (size_t i = 0; i < kMaxBitmaps; ++i) {
accounting::ContinuousSpaceBitmap bitmap(
- accounting::ContinuousSpaceBitmap::Create("bitmap",
- reinterpret_cast<uint8_t*>(gPageSize),
- gPageSize));
+ accounting::ContinuousSpaceBitmap::Create(
+ "bitmap", reinterpret_cast<uint8_t*>(static_cast<size_t>(gPageSize)), gPageSize));
CHECK(bitmap.IsValid());
live_bitmaps_.push_back(std::move(bitmap));
}
diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc
index 73d37a2502..93ad671655 100644
--- a/runtime/jni/local_reference_table.cc
+++ b/runtime/jni/local_reference_table.cc
@@ -41,7 +41,8 @@ static constexpr bool kDumpStackOnNonLocalReference = false;
static constexpr bool kDebugLRT = false;
// Number of free lists in the allocator.
-static const size_t gNumLrtSlots = WhichPowerOf2(gPageSize / kInitialLrtBytes);
+ART_PAGE_SIZE_AGNOSTIC_DECLARE_AND_DEFINE(size_t, gNumLrtSlots,
+ WhichPowerOf2(gPageSize / kInitialLrtBytes));
// Mmap an "indirect ref table region. Table_bytes is a multiple of a page size.
static inline MemMap NewLRTMap(size_t table_bytes, std::string* error_msg) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d4019f1acc..73008eb183 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -153,7 +153,8 @@ static constexpr size_t kSuspendTimeDuringFlip = 5'000;
// of the stack (lowest memory). The higher portion of the memory
// is protected against reads and the lower is available for use while
// throwing the StackOverflow exception.
-static const size_t gStackOverflowProtectedSize = kMemoryToolStackGuardSizeScale * gPageSize;
+ART_PAGE_SIZE_AGNOSTIC_DECLARE_AND_DEFINE(size_t, gStackOverflowProtectedSize,
+ kMemoryToolStackGuardSizeScale * gPageSize);
static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
@@ -1363,7 +1364,8 @@ bool Thread::InitStackHwm() {
//
// On systems with 4K page size, typically the minimum stack size will be 4+8+4 = 16K.
// The thread won't be able to do much with this stack: even the GC takes between 8K and 12K.
- DCHECK_ALIGNED_PARAM(gStackOverflowProtectedSize, gPageSize);
+ DCHECK_ALIGNED_PARAM(static_cast<size_t>(gStackOverflowProtectedSize),
+ static_cast<int32_t>(gPageSize));
size_t min_stack = gStackOverflowProtectedSize +
RoundUp(GetStackOverflowReservedBytes(kRuntimeISA) + 4 * KB, gPageSize);
if (read_stack_size <= min_stack) {