summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2024-09-04 00:27:53 +0000
committer Hans Boehm <hboehm@google.com> 2024-09-26 15:14:48 +0000
commitea269f69d05fe333e4b36634b925c3c40fc8ce95 (patch)
treedb1feb8fb32fd875bf899a45409610da99aa30f0
parentf255d9c2c508443ae4a0fb57f903db46ff3e39be (diff)
Revert^4 "Object.clone() allocates more movable objects"
This reverts commit fc0ed57a5f25d8a7b3fbf200224bd880ed05eff5. PS1 is identical to aosp/3231043 PS2 adds 2280-address-of test, re-allows non-movable objects to be allocated in LOS, and more generally tracks non-movable objects in other spaces. PS3 Style tweak PS4 Disable test on jvm Bug: 355291033 Bug: 354087169 Bug: 360363656 Bug: 361327909 Bug: 364629185 Test: Build and boot AOSP Test: testrunner.py --host -b --all-gc -t 2280-address-of Change-Id: I4b2929c353a6ede916762de509056817f001d6f8
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--libartbase/base/bit_vector.h6
-rw-r--r--openjdkjvmti/ti_heap.cc4
-rw-r--r--runtime/base/mutex.h3
-rw-r--r--runtime/gc/heap.cc105
-rw-r--r--runtime/gc/heap.h81
-rw-r--r--runtime/gc/space/large_object_space.cc3
-rw-r--r--runtime/jit/jit_code_cache.cc2
-rw-r--r--runtime/jni/jni_internal.cc12
-rw-r--r--runtime/jni/jni_internal_test.cc2
-rw-r--r--runtime/mirror/array-alloc-inl.h10
-rw-r--r--runtime/mirror/array.cc5
-rw-r--r--runtime/mirror/array.h3
-rw-r--r--runtime/mirror/object.cc8
-rw-r--r--runtime/mirror/object_array-alloc-inl.h5
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc29
-rw-r--r--runtime/runtime.cc3
-rw-r--r--runtime/runtime.h4
-rw-r--r--test/070-nio-buffer/src/Main.java24
-rw-r--r--test/2280-address-of/expected-stderr.txt0
-rw-r--r--test/2280-address-of/expected-stdout.txt1
-rw-r--r--test/2280-address-of/info.txt3
-rw-r--r--test/2280-address-of/src-art/Main.java108
-rw-r--r--test/knownfailures.json3
24 files changed, 359 insertions, 67 deletions
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 06ea1c6ffb..8abe6a4563 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -218,7 +218,7 @@ void IntrinsicVisitor::AssertNonMovableStringClass() {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> string_class = GetClassRoot<mirror::String>();
- CHECK(!art::Runtime::Current()->GetHeap()->IsMovableObject(string_class));
+ CHECK(!art::Runtime::Current()->GetHeap()->ObjectMayMove(string_class));
}
}
diff --git a/libartbase/base/bit_vector.h b/libartbase/base/bit_vector.h
index ec94efb09f..2eed9702bf 100644
--- a/libartbase/base/bit_vector.h
+++ b/libartbase/base/bit_vector.h
@@ -31,9 +31,9 @@ class ArenaBitVector;
/*
* Expanding bitmap. Bits are numbered starting from zero. All operations on a BitVector are
- * unsynchronized. New BitVectors are not necessarily zeroed out. If the used allocator doesn't do
- * clear the vector (e.g. ScopedArenaAllocator), the responsibility of clearing it relies on the
- * caller (e.g. ArenaBitVector).
+ * unsynchronized. New BitVectors are not necessarily zeroed out. If the used allocator doesn't
+ * clear the vector (e.g. ScopedArenaAllocator), the caller is responsible for clearing it (e.g.
+ * ArenaBitVector).
*/
class BitVector {
public:
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 80bfa0ff43..f8589f1d1a 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -1916,6 +1916,10 @@ jvmtiError HeapExtensions::ChangeArraySize(jvmtiEnv* env, jobject arr, jsize new
art::StackHandleScope<2> hs(self);
art::Handle<art::mirror::Array> old_arr(hs.NewHandle(soa.Decode<art::mirror::Array>(arr)));
art::MutableHandle<art::mirror::Array> new_arr(hs.NewHandle<art::mirror::Array>(nullptr));
+ if (!art::Runtime::Current()->GetHeap()->PossiblyAllocatedMovable(old_arr.Get())) {
+ JVMTI_LOG(INFO, env) << "Cannot resize a nonmovable array";
+ return ERR(ILLEGAL_ARGUMENT);
+ }
if (klass->IsObjectArrayClass()) {
new_arr.Assign(
art::mirror::ObjectArray<art::mirror::Object>::Alloc(self, old_arr->GetClass(), new_size));
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e3d89e7449..fafa374a61 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -526,9 +526,10 @@ class SCOPED_CAPABILITY MutexLock {
// Pretend to acquire a mutex for checking purposes, without actually doing so. Use with
// extreme caution when it is known the condition that the mutex would guard against cannot arise.
+template <typename MutexType>
class SCOPED_CAPABILITY FakeMutexLock {
public:
- explicit FakeMutexLock(Mutex& mu) ACQUIRE(mu) NO_THREAD_SAFETY_ANALYSIS {}
+ explicit FakeMutexLock(MutexType& mu) ACQUIRE(mu) NO_THREAD_SAFETY_ANALYSIS {}
~FakeMutexLock() RELEASE() NO_THREAD_SAFETY_ANALYSIS {}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9bed9833b7..9342578d42 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -377,11 +377,10 @@ Heap::Heap(size_t initial_size,
* verification is enabled, we limit the size of allocation stacks to speed up their
* searching.
*/
- max_allocation_stack_size_(kGCALotMode
- ? kGcAlotAllocationStackSize
- : (kVerifyObjectSupport > kVerifyObjectModeFast)
- ? kVerifyObjectAllocationStackSize
- : kDefaultAllocationStackSize),
+ max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize :
+ (kVerifyObjectSupport > kVerifyObjectModeFast) ?
+ kVerifyObjectAllocationStackSize :
+ kDefaultAllocationStackSize),
current_allocator_(kAllocatorTypeDlMalloc),
current_non_moving_allocator_(kAllocatorTypeNonMoving),
bump_pointer_space_(nullptr),
@@ -432,7 +431,9 @@ Heap::Heap(size_t initial_size,
boot_image_spaces_(),
boot_images_start_address_(0u),
boot_images_size_(0u),
- pre_oome_gc_count_(0u) {
+ pre_oome_gc_count_(0u),
+ stray_non_movable_objects_(),
+ stray_objects_lock_("lock for stray_non_movable_objects", kGenericBottomLock) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -2380,7 +2381,7 @@ class ZygoteCompactingCollector final : public collector::SemiSpace {
bin_live_bitmap_ = space->GetLiveBitmap();
bin_mark_bitmap_ = space->GetMarkBitmap();
uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ Heap* heap = Runtime::Current()->GetHeap();
// Note: This requires traversing the space in increasing order of object addresses.
auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
@@ -2388,7 +2389,11 @@ class ZygoteCompactingCollector final : public collector::SemiSpace {
// Add the bin consisting of the end of the previous object to the start of the current object.
AddBin(bin_size, prev);
prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
+ if (!obj->IsClass()) {
+ heap->AddStrayNonMovableObject(obj);
+ }
};
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
bin_live_bitmap_->Walk(visitor);
// Add the last bin which spans after the last object to the end of the space.
AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
@@ -2493,7 +2498,15 @@ void Heap::IncrementFreedEver() {
// This has a large frame, but shouldn't be run anywhere near the stack limit.
// FIXME: BUT it did exceed... http://b/197647048
# pragma clang diagnostic ignored "-Wframe-larger-than="
-void Heap::PreZygoteFork() {
+void Heap::PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS /* TODO: Why? */ {
+ Thread* self = Thread::Current();
+ // Opportunistically log here; empirically logs from the initial PreZygoteFork() are lost.
+ // But for the main zygote, this is typically entered at least twice.
+ {
+ MutexLock(self, stray_objects_lock_);
+ LOG(INFO) << "PreZygoteFork(): stray_non_movable_objects_.size() = "
+ << stray_non_movable_objects_.size();
+ }
if (!HasZygoteSpace()) {
// We still want to GC in case there is some unreachable non moving objects that could cause a
// suboptimal bin packing when we compact the zygote space.
@@ -2504,7 +2517,6 @@ void Heap::PreZygoteFork() {
}
// We need to close userfaultfd fd for app/webview zygotes to avoid getattr
// (stat) on the fd during fork.
- Thread* self = Thread::Current();
MutexLock mu(self, zygote_creation_lock_);
// Try to see if we have any Zygote spaces.
if (HasZygoteSpace()) {
@@ -2521,6 +2533,18 @@ void Heap::PreZygoteFork() {
// there.
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const bool same_space = non_moving_space_ == main_space_;
+ // We create the ZygoteSpace by performing a semi-space collection to copy the main allocation
+ // space into what was the non-moving space. We do so by ignoring and overwriting the meta-
+ // information from the non-moving (dlmalloc) space. An initial pass identifies unused sections
+ // of the heap that we usually try to copy into first. We copy any remaining objects past the
+ // previous end of the old non-moving space. Eeverything up to the last allocated object in the
+ // old non-moving space then becomes ZygoteSpace. Everything after that becomes the new
+ // non-moving space.
+ // There is a subtlety here in that Object.clone() treats objects allocated as non-movable
+ // differently from other objects, and this ZygoteSpace creation process doesn't automatically
+ // preserve that distinction. Thus we must explicitly track this in stray_non_movable_objects_.
+ // Otherwise we have to treat the entire ZygoteSpace as non-movable, which could cause some
+ // weird programming styles to eventually render most of the heap non-movable.
if (kCompactZygote) {
// Temporarily disable rosalloc verification because the zygote
// compaction will mess up the rosalloc internal metadata.
@@ -2549,6 +2573,12 @@ void Heap::PreZygoteFork() {
zygote_collector.SetToSpace(&target_space);
zygote_collector.SetSwapSemiSpaces(false);
zygote_collector.Run(kGcCauseCollectorTransition, false);
+ {
+ MutexLock(self, stray_objects_lock_);
+ uint32_t num_nonmovable = stray_non_movable_objects_.size();
+ // For an AOSP boot, we saw num_nonmovable around a dozen.
+ DCHECK_LT(num_nonmovable, 1000u) << " Too many nonmovable zygote objects?";
+ }
if (reset_main_space) {
main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
@@ -3741,7 +3771,62 @@ void Heap::SetIdealFootprint(size_t target_footprint) {
target_footprint_.store(target_footprint, std::memory_order_relaxed);
}
-bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
+void Heap::AddStrayNonMovableObject(mirror::Object* obj) {
+ MutexLock mu(Thread::Current(), stray_objects_lock_);
+ stray_non_movable_objects_.insert(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj));
+ if (kIsDebugBuild) {
+ size_t current_size = stray_non_movable_objects_.size();
+ static size_t last_reported_size /* GUARDED_BY(stray_objects_lock_) */ = 0;
+ if (current_size % 100 == 0 && current_size > last_reported_size) {
+ LOG(WARNING) << "stray_non_movable_objects_.size() = " << current_size;
+ last_reported_size = current_size;
+ }
+ }
+}
+
+void Heap::RemoveStrayNonMovableObject(mirror::Object* obj) {
+ MutexLock mu(Thread::Current(), stray_objects_lock_);
+ // FromMirrorPtr normally needs the mutator lock. But we only traffic unreachable and
+ // nonmovable objects, so we don't care.
+ FakeMutexLock mu2(*Locks::mutator_lock_);
+ auto iter = stray_non_movable_objects_.find(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj));
+ if (iter != stray_non_movable_objects_.end()) {
+ stray_non_movable_objects_.erase(iter);
+ }
+}
+
+bool Heap::IsNonMovable(ObjPtr<mirror::Object> obj) const {
+ DCHECK(!obj.Ptr()->IsClass()); // We do not correctly track classes in zygote space.
+ if (GetNonMovingSpace()->Contains(obj.Ptr())) {
+ return true;
+ }
+ if ((zygote_space_ != nullptr && zygote_space_->Contains(obj.Ptr())) ||
+ (GetLargeObjectsSpace() != nullptr && GetLargeObjectsSpace()->Contains(obj.Ptr()))) {
+ MutexLock mu(Thread::Current(), stray_objects_lock_);
+ return stray_non_movable_objects_.contains(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj.Ptr()));
+ }
+ return false; // E.g. in the main moving space.
+}
+
+bool Heap::PossiblyAllocatedMovable(ObjPtr<mirror::Object> obj) const {
+ // The CC collector may copy movable objects into NonMovingSpace. It does that only when it
+ // runs out of space, so we assume this does not affect ZygoteSpace.
+ if (!gUseReadBarrier && GetNonMovingSpace()->Contains(obj.Ptr())) {
+ return false;
+ }
+ if ((zygote_space_ != nullptr && zygote_space_->Contains(obj.Ptr())) ||
+ (GetLargeObjectsSpace() != nullptr && GetLargeObjectsSpace()->Contains(obj.Ptr()))) {
+ MutexLock mu(Thread::Current(), stray_objects_lock_);
+ return !stray_non_movable_objects_.contains(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj.Ptr()));
+ }
+ return true;
+}
+
+bool Heap::ObjectMayMove(ObjPtr<mirror::Object> obj) const {
if (kMovingCollector) {
space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
if (space != nullptr) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 58a73af74b..2edb0ee8a8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -268,19 +268,32 @@ class Heap {
ObjPtr<mirror::Class> klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*gc_complete_lock_,
- !*pending_task_lock_,
- !*backtrace_lock_,
- !process_state_update_lock_,
- !Roles::uninterruptible_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_,
+ !*pending_task_lock_,
+ !*backtrace_lock_,
+ !stray_objects_lock_,
+ !process_state_update_lock_,
+ !Roles::uninterruptible_) {
+ bool checked_large_object =
+ (kIsDebugBuild && ShouldAllocLargeObject(klass, num_bytes)) ? true : false;
+
mirror::Object* obj = AllocObjectWithAllocator<kInstrumented>(self,
klass,
num_bytes,
GetCurrentNonMovingAllocator(),
pre_fence_visitor);
+ space::LargeObjectSpace* los = GetLargeObjectsSpace();
+ if (los != nullptr && los->Contains(obj)) {
+ // We cannot easily avoid this by leaving these in the NonMovable space.
+ // Empirically it is important to unmap these promptly once collected. (b/360363656)
+ DCHECK(checked_large_object);
+ AddStrayNonMovableObject(obj);
+ } else {
+ DCHECK(!checked_large_object || los == nullptr);
+ }
// Java Heap Profiler check and sample allocation.
if (GetHeapSampler().IsEnabled()) {
+ // TODO (b/365184044) This seems to double sample for large objects?
JHPCheckNonTlabSampleAllocation(self, obj, num_bytes);
}
return obj;
@@ -383,8 +396,29 @@ class Heap {
bool sorted = false)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- // Returns true if there is any chance that the object (obj) will move.
- bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Approximates whether the object in question was explicitly requested to be nonmovable.
+ // May rarely err on the side of claiming immovability for objects that were allocated movable,
+ // but will not be moved.
+ // Returns true if and only if one of the following is true:
+ // 1) The object was allocated as nonmovable, whether or not it has been redirected to
+ // ZygoteSpace or LargeObjectsSpace.
+ // 2) All objects are being allocated in a non-movable space.
+ // 3) The CC collector decided to spuriously allocate in non-moving space because it ran
+ // out of memory at an inopportune time.
+ // This is used primarily to determine Object.clone() behavior, where (2)
+ // doesn't matter. (3) is unfortunate, but we can live with it.
+ // SHOULD NOT BE CALLED ON CLASS OBJECTS.
+ bool IsNonMovable(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // The negation of the above, but resolves ambiguous cases in the direction of assuming
+ // movability. Used for partial error checking where an object must be movable.
+ EXPORT bool PossiblyAllocatedMovable(ObjPtr<mirror::Object> obj) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Returns true if there is any chance that the object (obj) will move. Returns false for image
+ // and zygote space, since we don't actually move objects in those spaces. Unlike the preceding
+ // function, the result here depends on whether the object was moved to zygote or image space.
+ bool ObjectMayMove(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Enables us to compacting GC until objects are released.
EXPORT void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
@@ -684,7 +718,7 @@ class Heap {
return allocation_stack_.get();
}
- void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
+ void PreZygoteFork();
// Mark and empty stack.
EXPORT void FlushAllocStack() REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1027,6 +1061,18 @@ class Heap {
return size < pud_size ? pmd_size : pud_size;
}
+ // Add a reference to the set of preexisting zygote and LOS nonmovable objects.
+ void AddStrayNonMovableObject(mirror::Object* obj) REQUIRES(!stray_objects_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Remove a reference from the set of preexisting zygote and LOS nonmovable objects. Do nothing
+ // if it was not present. The argument should no longer be accessible to the mutator or
+ // subsequent GCs.
+ void RemoveStrayNonMovableObject(mirror::Object* obj) REQUIRES(!stray_objects_lock_);
+
+ bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -1085,8 +1131,6 @@ class Heap {
collector_type == kCollectorTypeCMCBackground ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
- bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
- REQUIRES_SHARED(Locks::mutator_lock_);
// Checks whether we should garbage collect:
ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
@@ -1345,7 +1389,7 @@ class Heap {
std::vector<space::AllocSpace*> alloc_spaces_;
// A space where non-movable objects are allocated, when compaction is enabled it contains
- // Classes, ArtMethods, ArtFields, and non moving objects.
+ // Classes, and non moving objects.
space::MallocSpace* non_moving_space_;
// Space which we use for the kAllocatorTypeROSAlloc.
@@ -1759,6 +1803,19 @@ class Heap {
std::unique_ptr<Verification> verification_;
+ // Non-class immovable objects allocated in zygote soace or LOS instead of the nonmovable object
+ // space.
+ // TODO: We may need a smaller data structure. Unfortunately, HashSets minimum size is too big.
+ struct CRComparator {
+ bool operator()(mirror::CompressedReference<mirror::Object> x,
+ mirror::CompressedReference<mirror::Object> y) const {
+ return x.AsVRegValue() < y.AsVRegValue();
+ }
+ };
+ std::set<mirror::CompressedReference<mirror::Object>, CRComparator> stray_non_movable_objects_
+ GUARDED_BY(stray_objects_lock_);
+ mutable Mutex stray_objects_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::ConcurrentCopying;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a4b9d24184..e01c8ee3ae 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -201,6 +201,7 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
}
+ Runtime::Current()->GetHeap()->RemoveStrayNonMovableObject(ptr);
const size_t map_size = it->second.mem_map.BaseSize();
DCHECK_GE(num_bytes_allocated_, map_size);
size_t allocation_size = map_size;
@@ -465,6 +466,8 @@ size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
DCHECK_GT(allocation_size, 0U);
DCHECK_ALIGNED_PARAM(allocation_size, ObjectAlignment());
+ Runtime::Current()->GetHeap()->RemoveStrayNonMovableObject(obj);
+
// madvise the pages without lock
madvise(obj, allocation_size, MADV_DONTNEED);
if (kIsDebugBuild) {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2b83eff44f..1b7cf7d76c 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -397,7 +397,7 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots
}
// Ensure that we don't put movable objects in the shared region.
if (is_shared_region) {
- CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
+ CHECK(Runtime::Current()->GetHeap()->IsNonMovable(object.Get()));
}
}
}
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 51350dc713..48428f6c3d 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -2126,7 +2126,7 @@ class JNI {
ScopedObjectAccess soa(env);
ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_string);
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->IsMovableObject(s) || s->IsCompressed()) {
+ if (heap->ObjectMayMove(s) || s->IsCompressed()) {
jchar* chars = new jchar[s->GetLength()];
if (s->IsCompressed()) {
int32_t length = s->GetLength();
@@ -2174,7 +2174,7 @@ class JNI {
}
return chars;
} else {
- if (heap->IsMovableObject(s)) {
+ if (heap->ObjectMayMove(s)) {
StackHandleScope<1> hs(soa.Self());
HandleWrapperObjPtr<mirror::String> h(hs.NewHandleWrapper(&s));
if (!gUseReadBarrier && !gUseUserfaultfd) {
@@ -2202,7 +2202,7 @@ class JNI {
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_string);
- if (!s->IsCompressed() && heap->IsMovableObject(s)) {
+ if (!s->IsCompressed() && heap->ObjectMayMove(s)) {
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->DecrementDisableMovingGC(soa.Self());
} else {
@@ -2369,7 +2369,7 @@ class JNI {
return nullptr;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->IsMovableObject(array)) {
+ if (heap->ObjectMayMove(array)) {
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->IncrementDisableMovingGC(soa.Self());
} else {
@@ -2966,7 +2966,7 @@ class JNI {
return nullptr;
}
// Only make a copy if necessary.
- if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
+ if (Runtime::Current()->GetHeap()->ObjectMayMove(array)) {
if (is_copy != nullptr) {
*is_copy = JNI_TRUE;
}
@@ -3026,7 +3026,7 @@ class JNI {
if (mode != JNI_COMMIT) {
if (is_copy) {
delete[] reinterpret_cast<uint64_t*>(elements);
- } else if (heap->IsMovableObject(array)) {
+ } else if (heap->ObjectMayMove(array)) {
// Non copy to a movable object must means that we had disabled the moving GC.
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->DecrementDisableMovingGC(soa.Self());
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index ed97e4d4c8..a575981d9b 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -1829,7 +1829,7 @@ TEST_F(JniInternalTest, GetStringChars_ReleaseStringChars) {
jboolean is_copy = JNI_FALSE;
chars = env_->GetStringChars(s, &is_copy);
- if (Runtime::Current()->GetHeap()->IsMovableObject(s_m)) {
+ if (Runtime::Current()->GetHeap()->ObjectMayMove(s_m)) {
EXPECT_EQ(JNI_TRUE, is_copy);
} else {
EXPECT_EQ(JNI_FALSE, is_copy);
diff --git a/runtime/mirror/array-alloc-inl.h b/runtime/mirror/array-alloc-inl.h
index b905fd1727..69d2e2017f 100644
--- a/runtime/mirror/array-alloc-inl.h
+++ b/runtime/mirror/array-alloc-inl.h
@@ -143,16 +143,14 @@ inline ObjPtr<Array> Array::Alloc(Thread* self,
ObjPtr<Array> result;
if (!kFillUsable) {
SetLengthVisitor visitor(component_count);
- result = ObjPtr<Array>::DownCast(
- heap->AllocObjectWithAllocator<kIsInstrumented>(
- self, array_class, size, allocator_type, visitor));
+ result = ObjPtr<Array>::DownCast(heap->AllocObjectWithAllocator<kIsInstrumented>(
+ self, array_class, size, allocator_type, visitor));
} else {
SetLengthToUsableSizeVisitor visitor(component_count,
DataOffset(1U << component_size_shift).SizeValue(),
component_size_shift);
- result = ObjPtr<Array>::DownCast(
- heap->AllocObjectWithAllocator<kIsInstrumented>(
- self, array_class, size, allocator_type, visitor));
+ result = ObjPtr<Array>::DownCast(heap->AllocObjectWithAllocator<kIsInstrumented>(
+ self, array_class, size, allocator_type, visitor));
}
if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
array_class = result->GetClass(); // In case the array class moved.
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index a4f6c88e4c..65371d0f5e 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -144,9 +144,8 @@ ObjPtr<Array> Array::CopyOf(Handle<Array> h_this, Thread* self, int32_t new_leng
CHECK(klass->IsPrimitiveArray()) << "Will miss write barriers";
DCHECK_GE(new_length, 0);
auto* heap = Runtime::Current()->GetHeap();
- gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
- ? heap->GetCurrentAllocator()
- : heap->GetCurrentNonMovingAllocator();
+ DCHECK(!heap->IsNonMovable(h_this.Get()));
+ gc::AllocatorType allocator_type = heap->GetCurrentAllocator();
const auto component_size = klass->GetComponentSize();
const auto component_shift = klass->GetComponentSizeShift();
ObjPtr<Array> new_array =
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 7a0976ab48..876fbf061b 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -50,8 +50,7 @@ class MANAGED Array : public Object {
int32_t component_count,
size_t component_size_shift,
gc::AllocatorType allocator_type)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static ObjPtr<Array> CreateMultiArray(Thread* self,
Handle<Class> element_class,
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index b28978603c..448409c7a7 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -162,9 +162,11 @@ ObjPtr<Object> Object::Clone(Handle<Object> h_this, Thread* self) {
gc::Heap* heap = Runtime::Current()->GetHeap();
size_t num_bytes = h_this->SizeOf();
CopyObjectVisitor visitor(&h_this, num_bytes);
- ObjPtr<Object> copy = heap->IsMovableObject(h_this.Get())
- ? heap->AllocObject(self, h_this->GetClass(), num_bytes, visitor)
- : heap->AllocNonMovableObject(self, h_this->GetClass(), num_bytes, visitor);
+ // Unclear whether this should ever allocate a nonmovable object. This is conservative.
+ ObjPtr<Object> copy =
+ heap->IsNonMovable(h_this.Get()) ?
+ heap->AllocNonMovableObject(self, h_this->GetClass(), num_bytes, visitor) :
+ heap->AllocObject(self, h_this->GetClass(), num_bytes, visitor);
if (h_this->GetClass()->IsFinalizable()) {
heap->AddFinalizerReference(self, &copy);
}
diff --git a/runtime/mirror/object_array-alloc-inl.h b/runtime/mirror/object_array-alloc-inl.h
index e79d154f84..d3688762a4 100644
--- a/runtime/mirror/object_array-alloc-inl.h
+++ b/runtime/mirror/object_array-alloc-inl.h
@@ -66,9 +66,8 @@ inline ObjPtr<ObjectArray<T>> ObjectArray<T>::CopyOf(Handle<ObjectArray<T>> h_th
int32_t new_length) {
DCHECK_GE(new_length, 0);
gc::Heap* heap = Runtime::Current()->GetHeap();
- gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
- ? heap->GetCurrentAllocator()
- : heap->GetCurrentNonMovingAllocator();
+ DCHECK(heap->PossiblyAllocatedMovable(h_this.Get()));
+ gc::AllocatorType allocator_type = heap->GetCurrentAllocator();
ObjPtr<ObjectArray<T>> new_array = Alloc(self, h_this->GetClass(), new_length, allocator_type);
if (LIKELY(new_array != nullptr)) {
new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0e9660aaac..1a29619430 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -21,13 +21,12 @@
#include <sys/time.h>
extern "C" void android_set_application_target_sdk_version(uint32_t version);
#endif
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
#include <inttypes.h>
-#include <limits>
#include <limits.h>
-#include "nativehelper/scoped_utf_chars.h"
-#include <android-base/stringprintf.h>
-#include <android-base/strings.h>
+#include <limits>
#include "android-base/properties.h"
#include "arch/instruction_set.h"
@@ -46,6 +45,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "gc/heap.h"
#include "gc/space/dlmalloc_space.h"
#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
#include "gc/task_processor.h"
#include "intern_table.h"
#include "jit/jit.h"
@@ -58,6 +58,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "native_util.h"
#include "nativehelper/jni_macros.h"
#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "runtime.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
@@ -115,12 +116,17 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle
if (UNLIKELY(array_class == nullptr)) {
return nullptr;
}
- gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentNonMovingAllocator();
- ObjPtr<mirror::Array> result = mirror::Array::Alloc(soa.Self(),
- array_class,
- length,
- array_class->GetComponentSizeShift(),
- allocator);
+ gc::Heap* heap = runtime->GetHeap();
+ gc::AllocatorType allocator = heap->GetCurrentNonMovingAllocator();
+ ObjPtr<mirror::Array> result = mirror::Array::Alloc(
+ soa.Self(), array_class, length, array_class->GetComponentSizeShift(), allocator);
+ gc::space::LargeObjectSpace* los = heap->GetLargeObjectsSpace();
+ if (los != nullptr && los->Contains(result.Ptr())) {
+ // We cannot easily avoid this by leaving these in the NonMovable space.
+ // Empirically it is important to unmap these promptly once collected. (b/360363656)
+ heap->AddStrayNonMovableObject(result.Ptr());
+ }
+ // TODO (b/365184044) This fails to sample small objects in JHP?
return soa.AddLocalReference<jobject>(result);
}
@@ -167,10 +173,11 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
ThrowIllegalArgumentException("not a primitive array");
return 0;
}
- if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
+ if (!Runtime::Current()->GetHeap()->IsNonMovable(array)) {
ThrowRuntimeException("Trying to get address of movable array object");
return 0;
}
+ DCHECK(!Runtime::Current()->GetHeap()->ObjectMayMove(array));
return reinterpret_cast<uintptr_t>(array->GetRawData(array->GetClass()->GetComponentSize(), 0));
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 53c1cb152f..46a73cd1e4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1404,7 +1404,8 @@ static size_t OpenBootDexFiles(ArrayRef<const std::string> dex_filenames,
void Runtime::SetSentinel(ObjPtr<mirror::Object> sentinel) {
CHECK(sentinel_.Read() == nullptr);
CHECK(sentinel != nullptr);
- CHECK(!heap_->IsMovableObject(sentinel));
+ // IsNonMovable(sentinel) doesn't hold if it came from an image.
+ CHECK(!heap_->ObjectMayMove(sentinel));
sentinel_ = GcRoot<mirror::Object>(sentinel);
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index e43d1d3432..6cc768cee2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -1206,8 +1206,8 @@ class Runtime {
// for differentiating between unfilled imt slots vs conflict slots in superclasses.
ArtMethod* imt_unimplemented_method_;
- // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
- // JDWP (invalid references).
+ // Special sentinel object used to indicate invalid conditions in JNI (cleared weak references)
+ // and JDWP (invalid references).
GcRoot<mirror::Object> sentinel_;
InstructionSet instruction_set_;
diff --git a/test/070-nio-buffer/src/Main.java b/test/070-nio-buffer/src/Main.java
index a3eeb3fda6..3f4a59db9f 100644
--- a/test/070-nio-buffer/src/Main.java
+++ b/test/070-nio-buffer/src/Main.java
@@ -32,6 +32,7 @@ public class Main {
intFloatTest();
basicShortTest();
primTest();
+ largeBuffersTest();
}
/*
@@ -118,6 +119,29 @@ public class Main {
storeValues(directBuf);
}
+ /*
+ * Check that we can allocate half of maxMemory() in large direct ByteBuffers.
+ * Some apps need this in spite of the fact that we have a lower limit for smaller buffers.
+ */
+ public static void largeBuffersTest() {
+ final int bufSize = 4_000_000;
+ final int nBuffers = (int) (Runtime.getRuntime().maxMemory() / bufSize) / 2;
+ ByteBuffer[] directBufs = new ByteBuffer[nBuffers];
+ for (int i = 0; i < directBufs.length; ++i) {
+ directBufs[i] = ByteBuffer.allocateDirect(bufSize);
+ directBufs[i].asIntBuffer().put(i, i);
+ }
+ for (int i = 0; i < directBufs.length; ++i) {
+ if (directBufs[i].asIntBuffer().get(i) != i) {
+ System.out.println("ERROR: Contents check failed for " + i);
+ }
+ if (directBufs[i].asIntBuffer().get(50) != 0) {
+ System.out.println("ERROR: Zero check failed for buffer " + i);
+ }
+ directBufs[i] = null;
+ }
+ }
+
static void storeValues(ByteBuffer directBuf) {
directBuf.position(0);
ShortBuffer shortBuf = directBuf.asShortBuffer();
diff --git a/test/2280-address-of/expected-stderr.txt b/test/2280-address-of/expected-stderr.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/2280-address-of/expected-stderr.txt
diff --git a/test/2280-address-of/expected-stdout.txt b/test/2280-address-of/expected-stdout.txt
new file mode 100644
index 0000000000..a965a70ed4
--- /dev/null
+++ b/test/2280-address-of/expected-stdout.txt
@@ -0,0 +1 @@
+Done
diff --git a/test/2280-address-of/info.txt b/test/2280-address-of/info.txt
new file mode 100644
index 0000000000..5279e0098a
--- /dev/null
+++ b/test/2280-address-of/info.txt
@@ -0,0 +1,3 @@
+Check for correct interaction of nonmovable allocation, clone and addressOf().
+
+Motivated by b/363849521.
diff --git a/test/2280-address-of/src-art/Main.java b/test/2280-address-of/src-art/Main.java
new file mode 100644
index 0000000000..a9a918310a
--- /dev/null
+++ b/test/2280-address-of/src-art/Main.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.VMRuntime;
+
+public class Main {
+ private static final boolean SHOULD_PRINT = false; // True causes failure.
+
+ static VMRuntime runtime = VMRuntime.getRuntime();
+
+ static boolean objects_can_move = false; // Usually set to true in main.
+
+ public static void main(String[] args) throws Exception {
+ Object[] x = new Object[12];
+ int[] int_array = new int[10];
+ int[] large_int_array = new int[100_000];
+
+ try {
+ checkNonMovable(int_array);
+ } catch (RuntimeException e) {
+ if (!e.getMessage().contains(" movable ")) {
+ throw e;
+ }
+ objects_can_move = true;
+ }
+ // Objects_can_move will be true for all collectors we care about.
+ for (int i = 0; i < 10; ++i) {
+ if (i != 0) {
+ for (Object y : x) {
+ checkNonMovable(y);
+ }
+ }
+ checkMovable(int_array);
+ checkMovable(large_int_array);
+ // For completeness, we test various primitive types.
+ // `addressOf` doesn't work for reference types, so we skip those here.
+ // We check both large-object-space and regular objects.
+ x[0] = runtime.newNonMovableArray(int.class, 100);
+ x[1] = runtime.newNonMovableArray(int.class, 100_000);
+ x[2] = runtime.newNonMovableArray(byte.class, 100);
+ x[3] = runtime.newNonMovableArray(byte.class, 100_000);
+ x[4] = runtime.newNonMovableArray(long.class, 100);
+ x[5] = runtime.newNonMovableArray(long.class, 100_000);
+ x[6] = runtime.newNonMovableArray(char.class, 100);
+ x[7] = runtime.newNonMovableArray(char.class, 100_000);
+ x[8] = runtime.newNonMovableArray(float.class, 100);
+ x[9] = runtime.newNonMovableArray(float.class, 100_000);
+ x[10] = runtime.newNonMovableArray(double.class, 100);
+ x[11] = runtime.newNonMovableArray(double.class, 100_000);
+ for (Object y : x) {
+ checkNonMovable(y);
+ }
+ if (i % 2 == 1) {
+ // Doesn't compile without casts because Object.clone() is protected.
+ x[0] = ((int[])x[0]).clone();
+ x[1] = ((int[])x[1]).clone();
+ x[2] = ((byte[])x[2]).clone();
+ x[3] = ((byte[])x[3]).clone();
+ x[4] = ((long[])x[4]).clone();
+ x[5] = ((long[])x[5]).clone();
+ int_array = int_array.clone();
+ } else {
+ x[6] = ((char[])x[6]).clone();
+ x[7] = ((char[])x[7]).clone();
+ x[8] = ((float[])x[8]).clone();
+ x[9] = ((float[])x[9]).clone();
+ x[10] = ((double[])x[10]).clone();
+ x[11] = ((double[])x[11]).clone();
+ large_int_array = large_int_array.clone();
+ }
+ System.gc();
+ System.runFinalization();
+ }
+ System.out.println("Done");
+ }
+
+ public static void checkNonMovable(Object x) {
+ // If things go wrong, this will throw instead. A zero return would be very strange indeed.
+ if (runtime.addressOf(x) == 0) {
+ System.out.println("Unexpectedly got 0 address");
+ }
+ }
+
+ public static void checkMovable(Object x) {
+ if (!objects_can_move) {
+ return;
+ }
+ try {
+ if (runtime.addressOf(x) != 0) {
+ System.out.println("addressOf succeeded on movable object");
+ }
+ System.out.println("Unexpectedly got 0 address in checkMovable");
+ } catch (RuntimeException e) { /* expected */ }
+ }
+}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index e58a6cd4a2..eb12db4a9f 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1236,7 +1236,8 @@
"2263-method-trace-jit",
"2270-mh-internal-hiddenapi-use",
"2271-profile-inline-cache",
- "2279-aconfig-flags"],
+ "2279-aconfig-flags",
+ "2280-address-of"],
"variant": "jvm",
"description": ["Doesn't run on RI."]
},