summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Ruben Ayrapetyan <ruben.ayrapetyan@arm.com> 2023-09-26 10:57:11 +0100
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2023-11-23 01:07:32 +0000
commit24b01d435ddc2a04b80673824a3d8bf2aa3d56f8 (patch)
tree4888a32a25f3702396e543ddc46db96a4420ca6c
parent2503bcc1913b8c067fa563ec1ff9624586cb7cec (diff)
16k: Make page size agnostic rounding optional.
This is a performance optimization to remove unnecessary rounding operations in non page size agnostic configuration. Test: Same as for I5430741a8494b340ed7fd2d8692c41a59ad9c530. The whole patches chain was tested as a whole. Author: Branislav Rankov <branislav.rankov@arm.com> Co-authored-by: Ruben Ayrapetyan <ruben.ayrapetyan@arm.com> Change-Id: Ieaf8698de39d29cf5c2be2c51139279ff61b29d2
-rw-r--r--libartbase/base/bit_utils.h9
-rw-r--r--libartbase/base/globals.h2
-rw-r--r--runtime/gc/space/image_space.cc6
-rw-r--r--runtime/oat_file.cc4
4 files changed, 17 insertions, 4 deletions
diff --git a/libartbase/base/bit_utils.h b/libartbase/base/bit_utils.h
index ffaffc3a30..a6986857ff 100644
--- a/libartbase/base/bit_utils.h
+++ b/libartbase/base/bit_utils.h
@@ -175,6 +175,15 @@ constexpr T RoundUp(T x, std::remove_reference_t<T> n) {
return RoundDown(x + n - 1, n);
}
+template<bool kRoundUp, typename T>
+constexpr T CondRoundUp(T x, std::remove_reference_t<T> n) {
+ if (kRoundUp) {
+ return RoundUp(x, n);
+ } else {
+ return x;
+ }
+}
+
// For aligning pointers.
template<typename T>
inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 06b1f7e0aa..8dcf1e13ae 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -42,9 +42,11 @@ static constexpr size_t kPageSize = 4096;
static constexpr size_t kMinPageSize = 4096;
#if defined(ART_PAGE_SIZE_AGNOSTIC)
+static constexpr bool kPageSizeAgnostic = true;
// Maximum supported page size.
static constexpr size_t kMaxPageSize = 16384;
#else
+static constexpr bool kPageSizeAgnostic = false;
// Maximum supported page size.
static constexpr size_t kMaxPageSize = kMinPageSize;
#endif
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e55b5b4af9..0c96e2b8cf 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1002,7 +1002,8 @@ class ImageSpace::Loader {
// The reserved memory size is aligned up to kElfSegmentAlignment to ensure
// that the next reserved area will be aligned to the value.
return MemMap::MapFileAtAddress(address,
- RoundUp(image_header.GetImageSize(), kElfSegmentAlignment),
+ CondRoundUp<kPageSizeAgnostic>(image_header.GetImageSize(),
+ kElfSegmentAlignment),
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
@@ -1018,7 +1019,8 @@ class ImageSpace::Loader {
// The reserved memory size is aligned up to kElfSegmentAlignment to ensure
// that the next reserved area will be aligned to the value.
MemMap map = MemMap::MapAnonymous(image_location,
- RoundUp(image_header.GetImageSize(), kElfSegmentAlignment),
+ CondRoundUp<kPageSizeAgnostic>(image_header.GetImageSize(),
+ kElfSegmentAlignment),
PROT_READ | PROT_WRITE,
/*low_4gb=*/ true,
image_reservation,
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index ae861d202a..189d3b853f 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1423,8 +1423,8 @@ bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
// PROT_NONE. We need to unmap the memory when destroying this oat file.
// The reserved memory size is aligned up to kElfSegmentAlignment to ensure
// that the next reserved area will be aligned to the value.
- dlopen_mmaps_.push_back(reservation->TakeReservedMemory(RoundUp(context.max_size,
- kElfSegmentAlignment)));
+ dlopen_mmaps_.push_back(reservation->TakeReservedMemory(
+ CondRoundUp<kPageSizeAgnostic>(context.max_size, kElfSegmentAlignment)));
}
#else
static_assert(!kIsTargetBuild || kIsTargetLinux || kIsTargetFuchsia,