summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2024-07-26 13:09:10 -0700
committer Hans Boehm <hboehm@google.com> 2024-08-14 20:07:27 +0000
commita5001fed23788c966fd87048d7f17ba8c0b51914 (patch)
treefc0b0e72db8cc316703c80480c8a8163abb80953
parent3b6024d5db60c891c5ef6dc18cc17f8ece56c796 (diff)
Object.clone() allocates more movable objects
Make Object.clone() allocate an unmovable object only if the original was specifically allocated as nonmovable. Or at least get much closer to that. In the process, we stop allocated nonmovable objects in LargeObjectsSpace, so that we can better identifty them. Objects in image space cannot have been allocated as nonmovable: newNonMovableArray() involves JNI call, and this will cause a transaction failure. This is good, since the act of including the object in an ImageSpace would move it. The ZygoteSpace is allocated by copying objects into nonmoving space. To avoid having clone() treat the whole ZygoteSpace as nonmovable, we explicitly remember the non-class objects that were already there. Currently we use a std::set data structure for this. This seems a bit suboptimal; a sorted array may be an improvement. But empirically the set only contains a dozen or two elements for AOSP. We do implicitly allocate classes using the nonnmoving allocator. But those cannot be cloned. Thus we do not bother tracking them. For Array::CopyOf, we DCHECK that the argument was movable, and fix one of the callers to fail in a more appropriate way if we would otherwise violate that. Prevent jvmti from resizing a nonmovable array. I don't think anything good could have come of that anyway. This should prevent us from creating unrequested nonmovable objects, except as a result of the CC collector using that as a backup when it otherwise runs out of space during copying. Rename IsMovableObject() to somewhat clarify that it queries an implementation property, where IsNonMovable() is a query about intended object semantics, NOT implementation artifacts. Various drive-by documentation fixes for issues I encountered while trying to understand the code. Bug: 355291033 Bug: 354087169 Test: Build and boot AOSP Change-Id: Ia24dd1c2623d3d588c397332f87be45cc0f4bf27
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--libartbase/base/bit_vector.h6
-rw-r--r--openjdkjvmti/ti_heap.cc4
-rw-r--r--runtime/gc/heap.cc64
-rw-r--r--runtime/gc/heap.h42
-rw-r--r--runtime/jit/jit_code_cache.cc2
-rw-r--r--runtime/jni/jni_internal.cc12
-rw-r--r--runtime/jni/jni_internal_test.cc2
-rw-r--r--runtime/mirror/array-alloc-inl.h10
-rw-r--r--runtime/mirror/array.cc5
-rw-r--r--runtime/mirror/array.h5
-rw-r--r--runtime/mirror/object.cc8
-rw-r--r--runtime/mirror/object_array-alloc-inl.h5
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc13
-rw-r--r--runtime/runtime.cc3
-rw-r--r--runtime/runtime.h4
16 files changed, 138 insertions, 49 deletions
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index b87f6f3975..8ae2494357 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -218,7 +218,7 @@ void IntrinsicVisitor::AssertNonMovableStringClass() {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> string_class = GetClassRoot<mirror::String>();
- CHECK(!art::Runtime::Current()->GetHeap()->IsMovableObject(string_class));
+ CHECK(!art::Runtime::Current()->GetHeap()->ObjectMayMove(string_class));
}
}
diff --git a/libartbase/base/bit_vector.h b/libartbase/base/bit_vector.h
index ec94efb09f..2eed9702bf 100644
--- a/libartbase/base/bit_vector.h
+++ b/libartbase/base/bit_vector.h
@@ -31,9 +31,9 @@ class ArenaBitVector;
/*
* Expanding bitmap. Bits are numbered starting from zero. All operations on a BitVector are
- * unsynchronized. New BitVectors are not necessarily zeroed out. If the used allocator doesn't do
- * clear the vector (e.g. ScopedArenaAllocator), the responsibility of clearing it relies on the
- * caller (e.g. ArenaBitVector).
+ * unsynchronized. New BitVectors are not necessarily zeroed out. If the used allocator doesn't
+ * clear the vector (e.g. ScopedArenaAllocator), the caller is responsible for clearing it (e.g.
+ * ArenaBitVector).
*/
class BitVector {
public:
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 80bfa0ff43..f8589f1d1a 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -1916,6 +1916,10 @@ jvmtiError HeapExtensions::ChangeArraySize(jvmtiEnv* env, jobject arr, jsize new
art::StackHandleScope<2> hs(self);
art::Handle<art::mirror::Array> old_arr(hs.NewHandle(soa.Decode<art::mirror::Array>(arr)));
art::MutableHandle<art::mirror::Array> new_arr(hs.NewHandle<art::mirror::Array>(nullptr));
+ if (!art::Runtime::Current()->GetHeap()->PossiblyAllocatedMovable(old_arr.Get())) {
+ JVMTI_LOG(INFO, env) << "Cannot resize a nonmovable array";
+ return ERR(ILLEGAL_ARGUMENT);
+ }
if (klass->IsObjectArrayClass()) {
new_arr.Assign(
art::mirror::ObjectArray<art::mirror::Object>::Alloc(self, old_arr->GetClass(), new_size));
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ad3e690938..0f5dd99dc0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -377,11 +377,10 @@ Heap::Heap(size_t initial_size,
* verification is enabled, we limit the size of allocation stacks to speed up their
* searching.
*/
- max_allocation_stack_size_(kGCALotMode
- ? kGcAlotAllocationStackSize
- : (kVerifyObjectSupport > kVerifyObjectModeFast)
- ? kVerifyObjectAllocationStackSize
- : kDefaultAllocationStackSize),
+ max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize :
+ (kVerifyObjectSupport > kVerifyObjectModeFast) ?
+ kVerifyObjectAllocationStackSize :
+ kDefaultAllocationStackSize),
current_allocator_(kAllocatorTypeDlMalloc),
current_non_moving_allocator_(kAllocatorTypeNonMoving),
bump_pointer_space_(nullptr),
@@ -432,7 +431,8 @@ Heap::Heap(size_t initial_size,
boot_image_spaces_(),
boot_images_start_address_(0u),
boot_images_size_(0u),
- pre_oome_gc_count_(0u) {
+ pre_oome_gc_count_(0u),
+ non_movable_zygote_objects_() {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -2378,7 +2378,7 @@ class ZygoteCompactingCollector final : public collector::SemiSpace {
bin_live_bitmap_ = space->GetLiveBitmap();
bin_mark_bitmap_ = space->GetMarkBitmap();
uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ Heap* heap = Runtime::Current()->GetHeap();
// Note: This requires traversing the space in increasing order of object addresses.
auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
@@ -2386,7 +2386,11 @@ class ZygoteCompactingCollector final : public collector::SemiSpace {
// Add the bin consisting of the end of the previous object to the start of the current object.
AddBin(bin_size, prev);
prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
+ if (!obj->IsClass()) {
+ heap->AddNonMovableZygoteObject(obj);
+ }
};
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
bin_live_bitmap_->Walk(visitor);
// Add the last bin which spans after the last object to the end of the space.
AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
@@ -2492,6 +2496,10 @@ void Heap::IncrementFreedEver() {
// FIXME: BUT it did exceed... http://b/197647048
# pragma clang diagnostic ignored "-Wframe-larger-than="
void Heap::PreZygoteFork() {
+ // Opportunistically log here; empirically logs from the initial PreZygoteFork() are lost.
+ // But for the main zygote, this is typically entered at least twice.
+ LOG(INFO) << "PreZygoteFork(): non_movable_zygote_objects_.size() = "
+ << non_movable_zygote_objects_.size();
if (!HasZygoteSpace()) {
// We still want to GC in case there is some unreachable non moving objects that could cause a
// suboptimal bin packing when we compact the zygote space.
@@ -2519,6 +2527,18 @@ void Heap::PreZygoteFork() {
// there.
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const bool same_space = non_moving_space_ == main_space_;
+ // We create the ZygoteSpace by performing a semi-space collection to copy the main allocation
+ // space into what was the non-moving space. We do so by ignoring and overwriting the meta-
+ // information from the non-moving (dlmalloc) space. An initial pass identifies unused sections
+ // of the heap that we usually try to copy into first. We copy any remaining objects past the
+ // previous end of the old non-moving space. Eeverything up to the last allocated object in the
+ // old non-moving space then becomes ZygoteSpace. Everything after that becomes the new
+ // non-moving space.
+ // There is a subtlety here in that Object.clone() treats objects allocated as non-movable
+ // differently from other objects, and this ZygoteSpace creation process doesn't automatically
+ // preserve that distinction. Thus we must explicitly track this in non_movable_zygote_objects_.
+ // Otherwise we have to treat the entire ZygoteSpace as non-movable, which could cause some
+ // weird programming styles to eventually render most of the heap non-movable.
if (kCompactZygote) {
// Temporarily disable rosalloc verification because the zygote
// compaction will mess up the rosalloc internal metadata.
@@ -2547,6 +2567,9 @@ void Heap::PreZygoteFork() {
zygote_collector.SetToSpace(&target_space);
zygote_collector.SetSwapSemiSpaces(false);
zygote_collector.Run(kGcCauseCollectorTransition, false);
+ uint32_t num_nonmovable = non_movable_zygote_objects_.size();
+ // For an AOSP boot, we saw num_nonmovable around a dozen.
+ DCHECK_LT(num_nonmovable, 1000u) << " Too many nonmovable zygote objects?";
if (reset_main_space) {
main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
@@ -3736,7 +3759,32 @@ void Heap::SetIdealFootprint(size_t target_footprint) {
target_footprint_.store(target_footprint, std::memory_order_relaxed);
}
-bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
+bool Heap::IsNonMovable(ObjPtr<mirror::Object> obj) const {
+ DCHECK(!obj.Ptr()->IsClass()); // We do not correctly track classes in zygote space.
+ if (GetNonMovingSpace()->Contains(obj.Ptr())) {
+ return true;
+ }
+ if (zygote_space_ != nullptr && zygote_space_->Contains(obj.Ptr())) {
+ return non_movable_zygote_objects_.contains(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj.Ptr()));
+ }
+ return false; // E.g. in LargeObjectsSpace.
+}
+
+bool Heap::PossiblyAllocatedMovable(ObjPtr<mirror::Object> obj) const {
+ // The CC collector may copy movable objects into NonMovingSpace. It does that only when it
+ // runs out of space, so we assume this does not affect ZygoteSpace.
+ if (!gUseReadBarrier && GetNonMovingSpace()->Contains(obj.Ptr())) {
+ return false;
+ }
+ if (zygote_space_ != nullptr && zygote_space_->Contains(obj.Ptr())) {
+ return !non_movable_zygote_objects_.contains(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj.Ptr()));
+ }
+ return true;
+}
+
+bool Heap::ObjectMayMove(ObjPtr<mirror::Object> obj) const {
if (kMovingCollector) {
space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
if (space != nullptr) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index bbddf57090..0593e45613 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -383,8 +383,28 @@ class Heap {
bool sorted = false)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- // Returns true if there is any chance that the object (obj) will move.
- bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Approximates whether the object in question was explicitly requested to be nonmovable.
+ // May rarely err on the side of claiming immovability for objects that were allocated movable,
+ // but will not be moved.
+ // Returns true if and only if one of the following is true:
+ // 1) The object was allocated as nonmovable, whether or not it has moved to ZygoteSpace.
+ // 2) All objects are being allocated in a non-movable space.
+ // 3) The CC collector decided to spuriously allocate in non-moving space because it ran
+ // out of memory at an inopportune time.
+ // This is used primarily to determine Object.clone() behavior, where (2)
+ // doesn't matter. (3) is unfortunate, but we can live with it.
+ // SHOULD NOT BE CALLED ON CLASS OBJECTS.
+ bool IsNonMovable(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // The negation of the above, but resolves ambiguous cases in the direction of assuming
+ // movability. Used for partial error checking where an object must be movable.
+ EXPORT bool PossiblyAllocatedMovable(ObjPtr<mirror::Object> obj) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Returns true if there is any chance that the object (obj) will move. Returns false for image
+ // and zygote space, since we don't actually move objects in those spaces. Unlike the preceding
+ // function, the result here depends on whether the object was moved to zygote or image space.
+ bool ObjectMayMove(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Enables us to compacting GC until objects are released.
EXPORT void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
@@ -1027,6 +1047,12 @@ class Heap {
return size < pud_size ? pmd_size : pud_size;
}
+ // Add a reference to the set of preexisting zygote nonmovable objects.
+ void AddNonMovableZygoteObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ non_movable_zygote_objects_.insert(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj));
+ }
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -1345,7 +1371,7 @@ class Heap {
std::vector<space::AllocSpace*> alloc_spaces_;
// A space where non-movable objects are allocated, when compaction is enabled it contains
- // Classes, ArtMethods, ArtFields, and non moving objects.
+ // Classes, and non moving objects.
space::MallocSpace* non_moving_space_;
// Space which we use for the kAllocatorTypeROSAlloc.
@@ -1759,6 +1785,16 @@ class Heap {
std::unique_ptr<Verification> verification_;
+ // Non-class immovable objects allocated before we created zygote space.
+ // TODO: We may need a smaller data structure. Unfortunately, HashSets minimum size is too big.
+ struct CRComparator {
+ bool operator()(mirror::CompressedReference<mirror::Object> x,
+ mirror::CompressedReference<mirror::Object> y) const {
+ return x.AsVRegValue() < y.AsVRegValue();
+ }
+ };
+ std::set<mirror::CompressedReference<mirror::Object>, CRComparator> non_movable_zygote_objects_;
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::ConcurrentCopying;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2b83eff44f..1b7cf7d76c 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -397,7 +397,7 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots
}
// Ensure that we don't put movable objects in the shared region.
if (is_shared_region) {
- CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
+ CHECK(Runtime::Current()->GetHeap()->IsNonMovable(object.Get()));
}
}
}
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index a30b31f6c9..b26013a910 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -2126,7 +2126,7 @@ class JNI {
ScopedObjectAccess soa(env);
ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_string);
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->IsMovableObject(s) || s->IsCompressed()) {
+ if (heap->ObjectMayMove(s) || s->IsCompressed()) {
jchar* chars = new jchar[s->GetLength()];
if (s->IsCompressed()) {
int32_t length = s->GetLength();
@@ -2174,7 +2174,7 @@ class JNI {
}
return chars;
} else {
- if (heap->IsMovableObject(s)) {
+ if (heap->ObjectMayMove(s)) {
StackHandleScope<1> hs(soa.Self());
HandleWrapperObjPtr<mirror::String> h(hs.NewHandleWrapper(&s));
if (!gUseReadBarrier && !gUseUserfaultfd) {
@@ -2202,7 +2202,7 @@ class JNI {
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_string);
- if (!s->IsCompressed() && heap->IsMovableObject(s)) {
+ if (!s->IsCompressed() && heap->ObjectMayMove(s)) {
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->DecrementDisableMovingGC(soa.Self());
} else {
@@ -2369,7 +2369,7 @@ class JNI {
return nullptr;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->IsMovableObject(array)) {
+ if (heap->ObjectMayMove(array)) {
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->IncrementDisableMovingGC(soa.Self());
} else {
@@ -2965,7 +2965,7 @@ class JNI {
return nullptr;
}
// Only make a copy if necessary.
- if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
+ if (Runtime::Current()->GetHeap()->ObjectMayMove(array)) {
if (is_copy != nullptr) {
*is_copy = JNI_TRUE;
}
@@ -3025,7 +3025,7 @@ class JNI {
if (mode != JNI_COMMIT) {
if (is_copy) {
delete[] reinterpret_cast<uint64_t*>(elements);
- } else if (heap->IsMovableObject(array)) {
+ } else if (heap->ObjectMayMove(array)) {
// Non copy to a movable object must means that we had disabled the moving GC.
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->DecrementDisableMovingGC(soa.Self());
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index ed97e4d4c8..a575981d9b 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -1829,7 +1829,7 @@ TEST_F(JniInternalTest, GetStringChars_ReleaseStringChars) {
jboolean is_copy = JNI_FALSE;
chars = env_->GetStringChars(s, &is_copy);
- if (Runtime::Current()->GetHeap()->IsMovableObject(s_m)) {
+ if (Runtime::Current()->GetHeap()->ObjectMayMove(s_m)) {
EXPECT_EQ(JNI_TRUE, is_copy);
} else {
EXPECT_EQ(JNI_FALSE, is_copy);
diff --git a/runtime/mirror/array-alloc-inl.h b/runtime/mirror/array-alloc-inl.h
index b905fd1727..8e182a3158 100644
--- a/runtime/mirror/array-alloc-inl.h
+++ b/runtime/mirror/array-alloc-inl.h
@@ -115,7 +115,7 @@ class SetLengthToUsableSizeVisitor {
DISALLOW_COPY_AND_ASSIGN(SetLengthToUsableSizeVisitor);
};
-template <bool kIsInstrumented, bool kFillUsable>
+template <bool kIsInstrumented, bool kFillUsable, bool kCheckLargeObject>
inline ObjPtr<Array> Array::Alloc(Thread* self,
ObjPtr<Class> array_class,
int32_t component_count,
@@ -143,15 +143,15 @@ inline ObjPtr<Array> Array::Alloc(Thread* self,
ObjPtr<Array> result;
if (!kFillUsable) {
SetLengthVisitor visitor(component_count);
- result = ObjPtr<Array>::DownCast(
- heap->AllocObjectWithAllocator<kIsInstrumented>(
+ result =
+ ObjPtr<Array>::DownCast(heap->AllocObjectWithAllocator<kIsInstrumented, kCheckLargeObject>(
self, array_class, size, allocator_type, visitor));
} else {
SetLengthToUsableSizeVisitor visitor(component_count,
DataOffset(1U << component_size_shift).SizeValue(),
component_size_shift);
- result = ObjPtr<Array>::DownCast(
- heap->AllocObjectWithAllocator<kIsInstrumented>(
+ result =
+ ObjPtr<Array>::DownCast(heap->AllocObjectWithAllocator<kIsInstrumented, kCheckLargeObject>(
self, array_class, size, allocator_type, visitor));
}
if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index a4f6c88e4c..65371d0f5e 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -144,9 +144,8 @@ ObjPtr<Array> Array::CopyOf(Handle<Array> h_this, Thread* self, int32_t new_leng
CHECK(klass->IsPrimitiveArray()) << "Will miss write barriers";
DCHECK_GE(new_length, 0);
auto* heap = Runtime::Current()->GetHeap();
- gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
- ? heap->GetCurrentAllocator()
- : heap->GetCurrentNonMovingAllocator();
+ DCHECK(!heap->IsNonMovable(h_this.Get()));
+ gc::AllocatorType allocator_type = heap->GetCurrentAllocator();
const auto component_size = klass->GetComponentSize();
const auto component_shift = klass->GetComponentSizeShift();
ObjPtr<Array> new_array =
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 7a0976ab48..1fb7f2e955 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -44,14 +44,13 @@ class MANAGED Array : public Object {
// Allocates an array with the given properties, if kFillUsable is true the array will be of at
// least component_count size, however, if there's usable space at the end of the allocation the
// array will fill it.
- template <bool kIsInstrumented = true, bool kFillUsable = false>
+ template <bool kIsInstrumented = true, bool kFillUsable = false, bool kCheckLargeObject = true>
ALWAYS_INLINE static ObjPtr<Array> Alloc(Thread* self,
ObjPtr<Class> array_class,
int32_t component_count,
size_t component_size_shift,
gc::AllocatorType allocator_type)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static ObjPtr<Array> CreateMultiArray(Thread* self,
Handle<Class> element_class,
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index b28978603c..448409c7a7 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -162,9 +162,11 @@ ObjPtr<Object> Object::Clone(Handle<Object> h_this, Thread* self) {
gc::Heap* heap = Runtime::Current()->GetHeap();
size_t num_bytes = h_this->SizeOf();
CopyObjectVisitor visitor(&h_this, num_bytes);
- ObjPtr<Object> copy = heap->IsMovableObject(h_this.Get())
- ? heap->AllocObject(self, h_this->GetClass(), num_bytes, visitor)
- : heap->AllocNonMovableObject(self, h_this->GetClass(), num_bytes, visitor);
+ // Unclear whether this should ever allocate a nonmovable object. This is conservative.
+ ObjPtr<Object> copy =
+ heap->IsNonMovable(h_this.Get()) ?
+ heap->AllocNonMovableObject(self, h_this->GetClass(), num_bytes, visitor) :
+ heap->AllocObject(self, h_this->GetClass(), num_bytes, visitor);
if (h_this->GetClass()->IsFinalizable()) {
heap->AddFinalizerReference(self, &copy);
}
diff --git a/runtime/mirror/object_array-alloc-inl.h b/runtime/mirror/object_array-alloc-inl.h
index e79d154f84..d3688762a4 100644
--- a/runtime/mirror/object_array-alloc-inl.h
+++ b/runtime/mirror/object_array-alloc-inl.h
@@ -66,9 +66,8 @@ inline ObjPtr<ObjectArray<T>> ObjectArray<T>::CopyOf(Handle<ObjectArray<T>> h_th
int32_t new_length) {
DCHECK_GE(new_length, 0);
gc::Heap* heap = Runtime::Current()->GetHeap();
- gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
- ? heap->GetCurrentAllocator()
- : heap->GetCurrentNonMovingAllocator();
+ DCHECK(heap->PossiblyAllocatedMovable(h_this.Get()));
+ gc::AllocatorType allocator_type = heap->GetCurrentAllocator();
ObjPtr<ObjectArray<T>> new_array = Alloc(self, h_this->GetClass(), new_length, allocator_type);
if (LIKELY(new_array != nullptr)) {
new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0e9660aaac..66c0fe6d39 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -116,11 +116,11 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle
return nullptr;
}
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentNonMovingAllocator();
- ObjPtr<mirror::Array> result = mirror::Array::Alloc(soa.Self(),
- array_class,
- length,
- array_class->GetComponentSizeShift(),
- allocator);
+ // To keep these allocations distinguishable, do not fall back to LargeObjectsSpace:
+ ObjPtr<mirror::Array> result = mirror::Array::Alloc</* kIsInstrumented= */ true,
+ /* kfillUsable= */ false,
+ /* kCheckLargeObject= */ false>(
+ soa.Self(), array_class, length, array_class->GetComponentSizeShift(), allocator);
return soa.AddLocalReference<jobject>(result);
}
@@ -167,10 +167,11 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
ThrowIllegalArgumentException("not a primitive array");
return 0;
}
- if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
+ if (!Runtime::Current()->GetHeap()->IsNonMovable(array)) {
ThrowRuntimeException("Trying to get address of movable array object");
return 0;
}
+ DCHECK(!Runtime::Current()->GetHeap()->ObjectMayMove(array));
return reinterpret_cast<uintptr_t>(array->GetRawData(array->GetClass()->GetComponentSize(), 0));
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 989763022f..edea356dab 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1404,7 +1404,8 @@ static size_t OpenBootDexFiles(ArrayRef<const std::string> dex_filenames,
void Runtime::SetSentinel(ObjPtr<mirror::Object> sentinel) {
CHECK(sentinel_.Read() == nullptr);
CHECK(sentinel != nullptr);
- CHECK(!heap_->IsMovableObject(sentinel));
+ // IsNonMovable(sentinel) doesn't hold if it came from an image.
+ CHECK(!heap_->ObjectMayMove(sentinel));
sentinel_ = GcRoot<mirror::Object>(sentinel);
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index afb8ebd3c0..d09c9fadec 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -1208,8 +1208,8 @@ class Runtime {
// for differentiating between unfilled imt slots vs conflict slots in superclasses.
ArtMethod* imt_unimplemented_method_;
- // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
- // JDWP (invalid references).
+ // Special sentinel object used to indicate invalid conditions in JNI (cleared weak references)
+ // and JDWP (invalid references).
GcRoot<mirror::Object> sentinel_;
InstructionSet instruction_set_;