summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Ruben Ayrapetyan <ruben.ayrapetyan@arm.com> 2023-12-22 13:04:00 +0000
committer Steven Moreland <smoreland@google.com> 2023-12-26 19:40:46 +0000
commitc411c29cbf80bb48d63cc5bd8fbb69ae97a95902 (patch)
tree2de967ad75544b3d2236d9e92b34690ba04ad872
parentc4c2517940348137a780ae924253589f1423c23c (diff)
Use runtime page size for large object alignment
In general, this removes `kLargeObjectAlignment` and replaces it with a dynamic `LargeObjectSpace::ObjectAlignment()` static method. It avoids consuming extra memory to align up large object to `kMaxPageSize` (16K), when the runtime page size is 4K. However, in order to keep `SpaceBitmap<kAlignment>` optimized, this keeps the internal bitmap alignment fixed to the minimal page size (4K), We then accept that if the page size is 16K at runtime, then only 1 bit out of every 4 bits will actually be in-use. Assuming a heap capacity of 256M, the size of the bitmap is as follows: - 4K: (256M / 4K) -> 64K pages -> 8K needed to store one bit per page. - 16K: (256M / 16K) -> 16K pages -> 2K needed to store one bit per page. As a temporary solution, it's acceptable to consume 8K regardless of the runtime page size. In the future, as 16K page sizes becomes more common, we may need to re-think how large object spaces work, and if it still makes sense to have the large object alignment tied to the OS page size. To cover testing, this CL turns the existing space bitmap unit tests into type-paramaterized tests, based on the SpaceBitmap implementation and the object alignment they can support: | SpaceBitmapType | SpaceBitmap alignment | Object alignment | |-----------------------+-----------------------+--------------------| | ContinuousSpaceBitmap | kObjectAlignment | kObjectAlignment | | LargeObjectBimap | kMinPageSize (4K) | kMinPageSize (4K) | | LargeObjectBimap | kMinPageSize (4K) | kMaxPageSize (16K) | In passing, this also removes the templated version of MapAnonymousAligned(), as it was only used to allocate large objects, outside of tests. And highlights that its alignment must be higher than gPageSize. The image tests are also adapted accordingly. Test: art/test/testrunner/testrunner.py -b --host on host Test: art/test/testrunner/testrunner.py --target --64 \ --optimizing --gcverify --gcstress Test: art/tools/run-gtests.sh -- -j6 Test: art/tools/run-gtests.sh \ /apex/com.android.art/bin/art/arm64/art_runtime_tests \ -- --gtest_filter="*SpaceBitmap*" on target Pixel6 Author: Pierre Langlois <pierre.langlois@arm.com> Change-Id: Ib1251c88be1380168e3b5cf9126611e5daef5db1
-rw-r--r--libartbase/base/mem_map.cc12
-rw-r--r--libartbase/base/mem_map.h26
-rw-r--r--runtime/gc/accounting/space_bitmap.cc2
-rw-r--r--runtime/gc/accounting/space_bitmap.h15
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc116
-rw-r--r--runtime/gc/collector/concurrent_copying.cc16
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc44
-rw-r--r--runtime/gc/collector/mark_compact.cc6
-rw-r--r--runtime/gc/collector/mark_sweep.cc3
-rw-r--r--runtime/gc/collector/semi_space-inl.h2
-rw-r--r--runtime/gc/heap.h6
-rw-r--r--runtime/gc/space/large_object_space.cc63
-rw-r--r--runtime/gc/space/large_object_space.h14
-rw-r--r--runtime/runtime_globals.h2
14 files changed, 179 insertions, 148 deletions
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index e4d1faa464..0d4150c50b 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -393,19 +393,7 @@ MemMap MemMap::MapAnonymousAligned(const char* name,
size_t alignment,
/*out=*/std::string* error_msg) {
DCHECK(IsPowerOfTwo(alignment));
-
-#ifdef ART_PAGE_SIZE_AGNOSTIC
- // In page size agnostic configuration, the gPageSize is not known
- // statically, so this interface has to support the case when alignment
- // requested is greater than minimum page size however lower or equal to
- // the actual page size.
- DCHECK_GT(alignment, kMinPageSize);
- if (alignment <= gPageSize) {
- return MapAnonymous(name, byte_count, prot, low_4gb, error_msg);
- }
-#else
DCHECK_GT(alignment, gPageSize);
-#endif
// Allocate extra 'alignment - gPageSize' bytes so that the mapping can be aligned.
MemMap ret = MapAnonymous(name,
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 120caa3b13..4321b4304d 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -141,9 +141,11 @@ class MemMap {
/*out*/std::string* error_msg,
bool use_debug_name = true);
- // Request an aligned anonymous region. We can't directly ask for a MAP_SHARED (anonymous or
- // otherwise) mapping to be aligned as in that case file offset is involved and could make
- // the starting offset to be out of sync with another mapping of the same file.
+ // Request an aligned anonymous region, where the alignment must be higher
+ // than the runtime gPageSize. We can't directly ask for a MAP_SHARED
+ // (anonymous or otherwise) mapping to be aligned as in that case file offset
+ // is involved and could make the starting offset to be out of sync with
+ // another mapping of the same file.
static MemMap MapAnonymousAligned(const char* name,
size_t byte_count,
int prot,
@@ -181,24 +183,6 @@ class MemMap {
error_msg);
}
- // Request an aligned anonymous region with statically known alignment.
- // This is a wrapper choosing between MapAnonymousAligned and MapAnonymous
- // depends on whether MapAnonymous would guarantee the requested alignment.
- template<size_t alignment>
- static MemMap MapAnonymousAligned(const char* name,
- size_t byte_count,
- int prot,
- bool low_4gb,
- /*out*/std::string* error_msg) {
- static_assert(IsPowerOfTwo(alignment));
-
- if (alignment <= kMinPageSize) {
- return MapAnonymous(name, byte_count, prot, low_4gb, error_msg);
- } else {
- return MapAnonymousAligned(name, byte_count, prot, low_4gb, alignment, error_msg);
- }
- }
-
// Create placeholder for a region allocated by direct call to mmap.
// This is useful when we do not have control over the code calling mmap,
// but when we still want to keep track of it in the list.
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index e8dd5972f7..6d755aaa26 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -250,7 +250,7 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm
}
template class SpaceBitmap<kObjectAlignment>;
-template class SpaceBitmap<kLargeObjectAlignment>;
+template class SpaceBitmap<kMinPageSize>;
} // namespace accounting
} // namespace gc
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 2f3ba91a47..5b9e55b5c9 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -274,7 +274,20 @@ class SpaceBitmap {
};
using ContinuousSpaceBitmap = SpaceBitmap<kObjectAlignment>;
-using LargeObjectBitmap = SpaceBitmap<kLargeObjectAlignment>;
+
+// We pick the lowest supported page size to ensure that it's a constexpr, so
+// that we can keep bitmap accesses optimized. However, this means that when the
+// large-object alignment is higher than kMinPageSize, then not all bits in the
+// bitmap are actually in use.
+// In practice, this happens when running with a kernel that uses 16kB as the
+// page size, where 1 out of every 4 bits of the bitmap is used.
+
+// TODO: In the future, we should consider alternative fixed alignments for
+// large objects, disassociated from the page size. This would allow us to keep
+// accesses optimized, while also packing the bitmap efficiently, and reducing
+// its size enough that it would no longer make sense to allocate it with
+// mmap().
+using LargeObjectBitmap = SpaceBitmap<kMinPageSize>;
template<size_t kAlignment>
std::ostream& operator << (std::ostream& stream, const SpaceBitmap<kAlignment>& bitmap);
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 277f95fb37..3641fb72ad 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -21,6 +21,7 @@
#include "base/common_art_test.h"
#include "base/mutex.h"
+#include "gc/space/large_object_space.h"
#include "runtime_globals.h"
#include "space_bitmap-inl.h"
@@ -28,19 +29,52 @@ namespace art {
namespace gc {
namespace accounting {
+template <typename T>
class SpaceBitmapTest : public CommonArtTest {};
-TEST_F(SpaceBitmapTest, Init) {
+// Main test parameters. For each test case, we pair together a SpaceBitmap
+// implementation with an object alignment. The object alignment may be larger
+// than the underlying SpaceBitmap alignment.
+template <typename T, size_t kAlignment>
+struct SpaceBitmapTestType {
+ using SpaceBitmap = T;
+ static const size_t gObjectAlignment = kAlignment;
+};
+
+// This is a special case where gObjectAlignment is set to large-object
+// alignment at runtime.
+template <typename T>
+struct SpaceBitmapTestPageSizeType {
+ using SpaceBitmap = T;
+ static const size_t gObjectAlignment;
+};
+
+template <typename T>
+const size_t SpaceBitmapTestPageSizeType<T>::gObjectAlignment =
+ space::LargeObjectSpace::ObjectAlignment();
+
+using SpaceBitmapTestTypes =
+ ::testing::Types<SpaceBitmapTestType<ContinuousSpaceBitmap, kObjectAlignment>,
+ // Large objects are aligned to the OS page size, try
+ // different supported values, including the current
+ // runtime page size.
+ SpaceBitmapTestType<LargeObjectBitmap, kMinPageSize>,
+ SpaceBitmapTestPageSizeType<LargeObjectBitmap>,
+ SpaceBitmapTestType<LargeObjectBitmap, kMaxPageSize>>;
+
+TYPED_TEST_CASE(SpaceBitmapTest, SpaceBitmapTestTypes);
+
+TYPED_TEST(SpaceBitmapTest, Init) {
uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
- ContinuousSpaceBitmap space_bitmap(
- ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
+ auto space_bitmap(TypeParam::SpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(space_bitmap.IsValid());
}
+template <typename SpaceBitmap>
class BitmapVerify {
public:
- BitmapVerify(ContinuousSpaceBitmap* bitmap, const mirror::Object* begin,
+ BitmapVerify(SpaceBitmap* bitmap, const mirror::Object* begin,
const mirror::Object* end)
: bitmap_(bitmap),
begin_(begin),
@@ -52,23 +86,23 @@ class BitmapVerify {
EXPECT_EQ(bitmap_->Test(obj), ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
}
- ContinuousSpaceBitmap* const bitmap_;
+ SpaceBitmap* const bitmap_;
const mirror::Object* begin_;
const mirror::Object* end_;
};
-TEST_F(SpaceBitmapTest, ScanRange) {
+TYPED_TEST(SpaceBitmapTest, ScanRange) {
uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
+ const size_t gObjectAlignment = TypeParam::gObjectAlignment;
- ContinuousSpaceBitmap space_bitmap(
- ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
+ auto space_bitmap(TypeParam::SpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(space_bitmap.IsValid());
// Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
const mirror::Object* obj =
- reinterpret_cast<mirror::Object*>(heap_begin + j * kObjectAlignment);
+ reinterpret_cast<mirror::Object*>(heap_begin + j * gObjectAlignment);
if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
space_bitmap.Set(obj);
}
@@ -79,35 +113,36 @@ TEST_F(SpaceBitmapTest, ScanRange) {
// words.
for (size_t i = 0; i < static_cast<size_t>(kBitsPerIntPtrT); ++i) {
mirror::Object* start =
- reinterpret_cast<mirror::Object*>(heap_begin + i * kObjectAlignment);
+ reinterpret_cast<mirror::Object*>(heap_begin + i * gObjectAlignment);
for (size_t j = 0; j < static_cast<size_t>(kBitsPerIntPtrT * 2); ++j) {
mirror::Object* end =
- reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * kObjectAlignment);
+ reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * gObjectAlignment);
BitmapVerify(&space_bitmap, start, end);
}
}
}
-TEST_F(SpaceBitmapTest, ClearRange) {
+TYPED_TEST(SpaceBitmapTest, ClearRange) {
uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
+ const size_t gObjectAlignment = TypeParam::gObjectAlignment;
- ContinuousSpaceBitmap bitmap(
- ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
+ auto bitmap(TypeParam::SpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(bitmap.IsValid());
// Set all of the bits in the bitmap.
- for (size_t j = 0; j < heap_capacity; j += kObjectAlignment) {
+ for (size_t j = 0; j < heap_capacity; j += gObjectAlignment) {
const mirror::Object* obj = reinterpret_cast<mirror::Object*>(heap_begin + j);
bitmap.Set(obj);
}
std::vector<std::pair<uintptr_t, uintptr_t>> ranges = {
- {0, 10 * KB + kObjectAlignment},
- {kObjectAlignment, kObjectAlignment},
- {kObjectAlignment, 2 * kObjectAlignment},
- {kObjectAlignment, 5 * kObjectAlignment},
- {1 * KB + kObjectAlignment, 2 * KB + 5 * kObjectAlignment},
+ {0, RoundUp(10 * KB, gObjectAlignment) + gObjectAlignment},
+ {gObjectAlignment, gObjectAlignment},
+ {gObjectAlignment, 2 * gObjectAlignment},
+ {gObjectAlignment, 5 * gObjectAlignment},
+ {RoundUp(1 * KB, gObjectAlignment) + gObjectAlignment,
+ RoundUp(2 * KB, gObjectAlignment) + 5 * gObjectAlignment},
};
// Try clearing a few ranges.
for (const std::pair<uintptr_t, uintptr_t>& range : ranges) {
@@ -115,14 +150,14 @@ TEST_F(SpaceBitmapTest, ClearRange) {
const mirror::Object* obj_end = reinterpret_cast<mirror::Object*>(heap_begin + range.second);
bitmap.ClearRange(obj_begin, obj_end);
// Boundaries should still be marked.
- for (uintptr_t i = 0; i < range.first; i += kObjectAlignment) {
+ for (uintptr_t i = 0; i < range.first; i += gObjectAlignment) {
EXPECT_TRUE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
}
- for (uintptr_t i = range.second; i < range.second + gPageSize; i += kObjectAlignment) {
+ for (uintptr_t i = range.second; i < range.second + gPageSize; i += gObjectAlignment) {
EXPECT_TRUE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
}
// Everything inside should be cleared.
- for (uintptr_t i = range.first; i < range.second; i += kObjectAlignment) {
+ for (uintptr_t i = range.first; i < range.second; i += gObjectAlignment) {
EXPECT_FALSE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
bitmap.Set(reinterpret_cast<mirror::Object*>(heap_begin + i));
}
@@ -151,7 +186,7 @@ class RandGen {
uint32_t val_;
};
-template <typename TestFn>
+template <typename SpaceBitmap, typename TestFn>
static void RunTest(size_t alignment, TestFn&& fn) NO_THREAD_SAFETY_ANALYSIS {
uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
@@ -160,8 +195,7 @@ static void RunTest(size_t alignment, TestFn&& fn) NO_THREAD_SAFETY_ANALYSIS {
RandGen r(0x1234);
for (int i = 0; i < 5 ; ++i) {
- ContinuousSpaceBitmap space_bitmap(
- ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
+ SpaceBitmap space_bitmap(SpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
for (int j = 0; j < 10000; ++j) {
size_t offset = RoundDown(r.next() % heap_capacity, alignment);
@@ -194,8 +228,9 @@ static void RunTest(size_t alignment, TestFn&& fn) NO_THREAD_SAFETY_ANALYSIS {
}
}
-static void RunTestCount(size_t alignment) {
- auto count_test_fn = [](ContinuousSpaceBitmap* space_bitmap,
+TYPED_TEST(SpaceBitmapTest, VisitorAlignment) {
+ using SpaceBitmap = typename TypeParam::SpaceBitmap;
+ auto count_test_fn = [](SpaceBitmap* space_bitmap,
uintptr_t range_begin,
uintptr_t range_end,
size_t manual_count) {
@@ -204,19 +239,12 @@ static void RunTestCount(size_t alignment) {
space_bitmap->VisitMarkedRange(range_begin, range_end, count_fn);
EXPECT_EQ(count, manual_count);
};
- RunTest(alignment, count_test_fn);
+ RunTest<SpaceBitmap>(TypeParam::gObjectAlignment, count_test_fn);
}
-TEST_F(SpaceBitmapTest, VisitorObjectAlignment) {
- RunTestCount(kObjectAlignment);
-}
-
-TEST_F(SpaceBitmapTest, VisitorPageAlignment) {
- RunTestCount(gPageSize);
-}
-
-void RunTestOrder(size_t alignment) {
- auto order_test_fn = [](ContinuousSpaceBitmap* space_bitmap,
+TYPED_TEST(SpaceBitmapTest, OrderAlignment) {
+ using SpaceBitmap = typename TypeParam::SpaceBitmap;
+ auto order_test_fn = [](SpaceBitmap* space_bitmap,
uintptr_t range_begin,
uintptr_t range_end,
size_t manual_count)
@@ -240,15 +268,7 @@ void RunTestOrder(size_t alignment) {
EXPECT_NE(nullptr, last_ptr);
}
};
- RunTest(alignment, order_test_fn);
-}
-
-TEST_F(SpaceBitmapTest, OrderObjectAlignment) {
- RunTestOrder(kObjectAlignment);
-}
-
-TEST_F(SpaceBitmapTest, OrderPageAlignment) {
- RunTestOrder(gPageSize);
+ RunTest<SpaceBitmap>(TypeParam::gObjectAlignment, order_test_fn);
}
} // namespace accounting
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 90d8cd4f4d..af4848a17b 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1217,9 +1217,9 @@ bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
return true;
} else {
- // Should be a large object. Must be page aligned and the LOS must exist.
- if (kIsDebugBuild
- && (!IsAligned<kLargeObjectAlignment>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // Should be a large object. Must be aligned and the LOS must exist.
+ if (kIsDebugBuild && (!IsAlignedParam(ref, space::LargeObjectSpace::ObjectAlignment()) ||
+ heap_->GetLargeObjectsSpace() == nullptr)) {
// It must be heap corruption. Remove memory protection and dump data.
region_space_->Unprotect();
heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
@@ -1246,9 +1246,9 @@ bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
return true;
} else {
- // Should be a large object. Must be page aligned and the LOS must exist.
- if (kIsDebugBuild
- && (!IsAligned<kLargeObjectAlignment>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // Should be a large object. Must be aligned and the LOS must exist.
+ if (kIsDebugBuild && (!IsAlignedParam(ref, space::LargeObjectSpace::ObjectAlignment()) ||
+ heap_->GetLargeObjectsSpace() == nullptr)) {
// It must be heap corruption. Remove memory protection and dump data.
region_space_->Unprotect();
heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
@@ -2305,7 +2305,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
heap_->GetNonMovingSpace()->GetMarkBitmap();
const bool is_los = !mark_bitmap->HasAddress(to_ref);
if (is_los) {
- if (!IsAligned<kLargeObjectAlignment>(to_ref)) {
+ if (!IsAlignedParam(to_ref, space::LargeObjectSpace::ObjectAlignment())) {
// Ref is a large object that is not aligned, it must be heap
// corruption. Remove memory protection and dump data before
// AtomicSetReadBarrierState since it will fault if the address is not
@@ -3680,7 +3680,7 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
accounting::LargeObjectBitmap* los_bitmap = nullptr;
const bool is_los = !mark_bitmap->HasAddress(ref);
if (is_los) {
- if (!IsAligned<kLargeObjectAlignment>(ref)) {
+ if (!IsAlignedParam(ref, space::LargeObjectSpace::ObjectAlignment())) {
// Ref is a large object that is not aligned, it must be heap
// corruption. Remove memory protection and dump data before
// AtomicSetReadBarrierState since it will fault if the address is not
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index da0d09c3da..0ea9888179 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -78,6 +78,26 @@ class ImmuneSpacesTest : public CommonArtTest {
}
}
+ MemMap ReserveImage(size_t image_size, /*out*/ std::string* error_str) {
+ // If the image is aligned to the current runtime page size, it will already
+ // be naturally aligned. On the other hand, MayAnonymousAligned() requires
+ // that the requested alignment is higher.
+ DCHECK_LE(gPageSize, kElfSegmentAlignment);
+ if (gPageSize == kElfSegmentAlignment) {
+ return MemMap::MapAnonymous("reserve",
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/true,
+ error_str);
+ }
+ return MemMap::MapAnonymousAligned("reserve",
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/true,
+ kElfSegmentAlignment,
+ error_str);
+ }
+
// Create an image space, the oat file is optional.
FakeImageSpace* CreateImageSpace(size_t image_size,
size_t oat_size,
@@ -193,12 +213,7 @@ TEST_F(ImmuneSpacesTest, AppendAfterImage) {
constexpr size_t kOtherSpaceSize = 100 * kElfSegmentAlignment;
std::string error_str;
- MemMap reservation = MemMap::MapAnonymousAligned<kElfSegmentAlignment>(
- "reserve",
- kImageSize + kImageOatSize + kOtherSpaceSize,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/ true,
- &error_str);
+ MemMap reservation = ReserveImage(kImageSize + kImageOatSize + kOtherSpaceSize, &error_str);
ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
ASSERT_TRUE(image_reservation.IsValid());
@@ -257,8 +272,7 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
std::string error_str;
- MemMap reservation = MemMap::MapAnonymousAligned<kElfSegmentAlignment>(
- "reserve", kMemorySize, PROT_READ | PROT_WRITE, /*low_4gb=*/ true, &error_str);
+ MemMap reservation = ReserveImage(kMemorySize, &error_str);
ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
ASSERT_TRUE(image_reservation.IsValid());
@@ -328,12 +342,7 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
constexpr size_t kImage4Size = kImageBytes - kElfSegmentAlignment;
constexpr size_t kImage4OatSize = kElfSegmentAlignment;
- reservation = MemMap::MapAnonymousAligned<kElfSegmentAlignment>(
- "reserve",
- kImage4Size + kImage4OatSize + kGuardSize * 2,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/ true,
- &error_str);
+ reservation = ReserveImage(kImage4Size + kImage4OatSize + kGuardSize * 2, &error_str);
ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
MemMap guard = reservation.TakeReservedMemory(kGuardSize);
ASSERT_TRUE(guard.IsValid());
@@ -368,12 +377,7 @@ TEST_F(ImmuneSpacesTest, MultiImage) {
// Layout: [guard page][image][oat][guard page]
constexpr size_t kImage5Size = kImageBytes + kElfSegmentAlignment;
constexpr size_t kImage5OatSize = kElfSegmentAlignment;
- reservation = MemMap::MapAnonymousAligned<kElfSegmentAlignment>(
- "reserve",
- kImage5Size + kImage5OatSize + kGuardSize * 2,
- PROT_READ | PROT_WRITE,
- /*low_4gb=*/ true,
- &error_str);
+ reservation = ReserveImage(kImage5Size + kImage5OatSize + kGuardSize * 2, &error_str);
ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
guard = reservation.TakeReservedMemory(kGuardSize);
ASSERT_TRUE(guard.IsValid());
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 27bb5f26a6..cf80840a1a 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -4281,8 +4281,8 @@ inline bool MarkCompact::MarkObjectNonNullNoPush(mirror::Object* obj,
return false;
} else {
// Must be a large-object space, otherwise it's a case of heap corruption.
- if (!IsAligned<kLargeObjectAlignment>(obj)) {
- // Objects in large-object space are aligned to kLargeObjectAlignment.
+ if (!IsAlignedParam(obj, space::LargeObjectSpace::ObjectAlignment())) {
+ // Objects in large-object space are aligned to the large-object alignment.
// So if we have an object which doesn't belong to any space and is not
// page-aligned as well, then it's memory corruption.
// TODO: implement protect/unprotect in bump-pointer space.
@@ -4379,7 +4379,7 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
<< " doesn't belong to any of the spaces and large object space doesn't exist";
accounting::LargeObjectBitmap* los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
if (los_bitmap->HasAddress(obj)) {
- DCHECK(IsAligned<kLargeObjectAlignment>(obj));
+ DCHECK(IsAlignedParam(obj, space::LargeObjectSpace::ObjectAlignment()));
return los_bitmap->Test(obj) ? obj : nullptr;
} else {
// The given obj is not in any of the known spaces, so return null. This could
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index aadc9e43a5..fdaf5a2f57 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -440,7 +440,8 @@ class MarkSweep::MarkObjectSlowPath {
++mark_sweep_->large_object_mark_;
}
space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
- if (UNLIKELY(obj == nullptr || !IsAligned<kLargeObjectAlignment>(obj) ||
+ if (UNLIKELY(obj == nullptr ||
+ !IsAlignedParam(obj, space::LargeObjectSpace::ObjectAlignment()) ||
(kIsDebugBuild && large_object_space != nullptr &&
!large_object_space->Contains(obj)))) {
// Lowest priority logging first:
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 0353632c78..ef6df08b45 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -63,7 +63,7 @@ inline void SemiSpace::MarkObject(CompressedReferenceType* obj_ptr) {
auto slow_path = [this](const mirror::Object* ref) {
CHECK(!to_space_->HasAddress(ref)) << "Marking " << ref << " in to_space_";
// Marking a large object, make sure its aligned as a consistency check.
- CHECK_ALIGNED(ref, kLargeObjectAlignment);
+ CHECK_ALIGNED_PARAM(ref, space::LargeObjectSpace::ObjectAlignment());
};
if (!mark_bitmap_->Set(obj, slow_path)) {
// This object was not previously marked.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b823e6564a..06cf842fae 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -693,9 +693,9 @@ class Heap {
// Mark all the objects in the allocation stack in the specified bitmap.
// TODO: Refactor?
- void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
- accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
- accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
+ void MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
+ accounting::ContinuousSpaceBitmap* bitmap2,
+ accounting::LargeObjectBitmap* large_objects,
accounting::ObjectStack* stack)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index e0d747ff37..b0db30b176 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -138,10 +138,15 @@ LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
+ DCHECK_LE(gPageSize, ObjectAlignment())
+ << "MapAnonymousAligned() should be used if the large-object alignment is larger than the "
+ "runtime page size";
std::string error_msg;
- MemMap mem_map = MemMap::MapAnonymousAligned<kLargeObjectAlignment>(
- "large object space allocation", num_bytes, PROT_READ | PROT_WRITE,
- /*low_4gb=*/ true, &error_msg);
+ MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
+ num_bytes,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/true,
+ &error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
return nullptr;
@@ -263,19 +268,20 @@ class AllocationInfo {
public:
AllocationInfo() : prev_free_(0), alloc_size_(0) {
}
- // Return the number of kLargeObjectAlignment-sized blocks that the allocation info covers.
+ // Return the number of blocks, of the large-object alignment in size each, that the allocation
+ // info covers.
size_t AlignSize() const {
return alloc_size_ & kFlagsMask;
}
// Returns the allocation size in bytes.
size_t ByteSize() const {
- return AlignSize() * kLargeObjectAlignment;
+ return AlignSize() * LargeObjectSpace::ObjectAlignment();
}
// Updates the allocation size and whether or not it is free.
void SetByteSize(size_t size, bool free) {
DCHECK_EQ(size & ~kFlagsMask, 0u);
- DCHECK_ALIGNED(size, kLargeObjectAlignment);
- alloc_size_ = (size / kLargeObjectAlignment) | (free ? kFlagFree : 0u);
+ DCHECK_ALIGNED_PARAM(size, LargeObjectSpace::ObjectAlignment());
+ alloc_size_ = (size / LargeObjectSpace::ObjectAlignment()) | (free ? kFlagFree : 0u);
}
// Returns true if the block is free.
bool IsFree() const {
@@ -308,29 +314,30 @@ class AllocationInfo {
mirror::Object* GetObjectAddress() {
return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
}
- // Return how many kLargeObjectAlignment units there are before the free block.
+ // Return how many units, the large-object alignment value in size,
+ // there are before the free block.
size_t GetPrevFree() const {
return prev_free_;
}
// Returns how many free bytes there are before the block.
size_t GetPrevFreeBytes() const {
- return GetPrevFree() * kLargeObjectAlignment;
+ return GetPrevFree() * LargeObjectSpace::ObjectAlignment();
}
// Update the size of the free block prior to the allocation.
void SetPrevFreeBytes(size_t bytes) {
- DCHECK_ALIGNED(bytes, kLargeObjectAlignment);
- prev_free_ = bytes / kLargeObjectAlignment;
+ DCHECK_ALIGNED_PARAM(bytes, LargeObjectSpace::ObjectAlignment());
+ prev_free_ = bytes / LargeObjectSpace::ObjectAlignment();
}
private:
static constexpr uint32_t kFlagFree = 0x80000000; // If block is free.
static constexpr uint32_t kFlagZygote = 0x40000000; // If the large object is a zygote object.
static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote); // Combined flags for masking.
- // Contains the size of the previous free block with kLargeObjectAlignment as the unit. If 0 then
- // the allocation before us is not free.
+ // Contains the size of the previous free block with the large-object alignment value as the
+ // unit. If 0 then the allocation before us is not free.
// These variables are undefined in the middle of allocations / free blocks.
uint32_t prev_free_;
- // Allocation size of this object in kLargeObjectAlignment as the unit.
+ // Allocation size of this object in the large-object alignment value as the unit.
uint32_t alloc_size_;
};
@@ -358,10 +365,16 @@ inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
}
FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
- CHECK_EQ(size % kLargeObjectAlignment, 0U);
+ CHECK_ALIGNED_PARAM(size, ObjectAlignment());
+ DCHECK_LE(gPageSize, ObjectAlignment())
+ << "MapAnonymousAligned() should be used if the large-object alignment is larger than the "
+ "runtime page size";
std::string error_msg;
- MemMap mem_map = MemMap::MapAnonymousAligned<kLargeObjectAlignment>(
- name.c_str(), size, PROT_READ | PROT_WRITE, /*low_4gb=*/ true, &error_msg);
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/true,
+ &error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
}
@@ -374,8 +387,8 @@ FreeListSpace::FreeListSpace(const std::string& name,
mem_map_(std::move(mem_map)) {
const size_t space_capacity = end - begin;
free_end_ = space_capacity;
- CHECK_ALIGNED(space_capacity, kLargeObjectAlignment);
- const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kLargeObjectAlignment);
+ CHECK_ALIGNED_PARAM(space_capacity, ObjectAlignment());
+ const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / ObjectAlignment());
std::string error_msg;
allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
@@ -389,7 +402,7 @@ FreeListSpace::FreeListSpace(const std::string& name,
void FreeListSpace::ClampGrowthLimit(size_t new_capacity) {
MutexLock mu(Thread::Current(), lock_);
- new_capacity = RoundUp(new_capacity, kLargeObjectAlignment);
+ new_capacity = RoundUp(new_capacity, ObjectAlignment());
CHECK_LE(new_capacity, Size());
size_t diff = Size() - new_capacity;
// If we don't have enough free-bytes at the end to clamp, then do the best
@@ -399,7 +412,7 @@ void FreeListSpace::ClampGrowthLimit(size_t new_capacity) {
diff = free_end_;
}
- size_t alloc_info_size = sizeof(AllocationInfo) * (new_capacity / kLargeObjectAlignment);
+ size_t alloc_info_size = sizeof(AllocationInfo) * (new_capacity / ObjectAlignment());
allocation_info_map_.SetSize(alloc_info_size);
mem_map_.SetSize(new_capacity);
// We don't need to change anything in 'free_blocks_' as the free block at
@@ -445,12 +458,12 @@ void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
<< reinterpret_cast<void*>(End());
- DCHECK_ALIGNED(obj, kLargeObjectAlignment);
+ DCHECK_ALIGNED_PARAM(obj, ObjectAlignment());
AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
DCHECK(!info->IsFree());
const size_t allocation_size = info->ByteSize();
DCHECK_GT(allocation_size, 0U);
- DCHECK_ALIGNED(allocation_size, kLargeObjectAlignment);
+ DCHECK_ALIGNED_PARAM(allocation_size, ObjectAlignment());
// madvise the pages without lock
madvise(obj, allocation_size, MADV_DONTNEED);
@@ -490,7 +503,7 @@ size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
AllocationInfo* next_next_info = next_info->GetNextInfo();
// Next next info can't be free since we always coalesce.
DCHECK(!next_next_info->IsFree());
- DCHECK_ALIGNED(next_next_info->ByteSize(), kLargeObjectAlignment);
+ DCHECK_ALIGNED_PARAM(next_next_info->ByteSize(), ObjectAlignment());
new_free_info = next_next_info;
new_free_size += next_next_info->GetPrevFreeBytes();
RemoveFreePrev(next_next_info);
@@ -522,7 +535,7 @@ size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
MutexLock mu(self, lock_);
- const size_t allocation_size = RoundUp(num_bytes, kLargeObjectAlignment);
+ const size_t allocation_size = RoundUp(num_bytes, ObjectAlignment());
AllocationInfo temp_info;
temp_info.SetPrevFreeBytes(allocation_size);
temp_info.SetByteSize(0, false);
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 497239dcae..5588f5f7c6 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -118,6 +118,16 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
// Clamp the space size to the given capacity.
virtual void ClampGrowthLimit(size_t capacity) = 0;
+ // The way large object spaces are implemented, the object alignment has to be
+ // the same as the *runtime* OS page size. However, in the future this may
+ // change so it is important to use LargeObjectSpace::ObjectAlignment() rather
+ // than gPageSize when appropriate.
+#if defined(ART_PAGE_SIZE_AGNOSTIC)
+ static ALWAYS_INLINE size_t ObjectAlignment() { return gPageSize; }
+#else
+ static constexpr size_t ObjectAlignment() { return kMinPageSize; }
+#endif
+
protected:
explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
const char* lock_name);
@@ -206,13 +216,13 @@ class FreeListSpace final : public LargeObjectSpace {
FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
size_t GetSlotIndexForAddress(uintptr_t address) const {
DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
- return (address - reinterpret_cast<uintptr_t>(Begin())) / kLargeObjectAlignment;
+ return (address - reinterpret_cast<uintptr_t>(Begin())) / ObjectAlignment();
}
size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
uintptr_t GetAllocationAddressForSlot(size_t slot) const {
- return reinterpret_cast<uintptr_t>(Begin()) + slot * kLargeObjectAlignment;
+ return reinterpret_cast<uintptr_t>(Begin()) + slot * ObjectAlignment();
}
uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
diff --git a/runtime/runtime_globals.h b/runtime/runtime_globals.h
index d4371d62af..468c75cd8a 100644
--- a/runtime/runtime_globals.h
+++ b/runtime/runtime_globals.h
@@ -33,8 +33,6 @@ static inline bool CanDoImplicitNullCheckOn(uintptr_t offset) {
// Required object alignment
static constexpr size_t kObjectAlignmentShift = 3;
static constexpr size_t kObjectAlignment = 1u << kObjectAlignmentShift;
-static constexpr size_t kLargeObjectAlignment = kMaxPageSize;
-static_assert(kLargeObjectAlignment <= 16 * KB, "Consider redesign if more than 16K is required.");
// Garbage collector constants.
static constexpr bool kMovingCollector = true;