summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--libartbase/base/bit_vector.h6
-rw-r--r--openjdkjvmti/ti_heap.cc4
-rw-r--r--runtime/gc/heap.cc64
-rw-r--r--runtime/gc/heap.h42
-rw-r--r--runtime/jit/jit_code_cache.cc2
-rw-r--r--runtime/jni/jni_internal.cc12
-rw-r--r--runtime/jni/jni_internal_test.cc2
-rw-r--r--runtime/mirror/array-alloc-inl.h10
-rw-r--r--runtime/mirror/array.cc5
-rw-r--r--runtime/mirror/array.h5
-rw-r--r--runtime/mirror/object.cc8
-rw-r--r--runtime/mirror/object_array-alloc-inl.h5
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc13
-rw-r--r--runtime/runtime.cc3
-rw-r--r--runtime/runtime.h4
16 files changed, 49 insertions, 138 deletions
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 8ae2494357..b87f6f3975 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -218,7 +218,7 @@ void IntrinsicVisitor::AssertNonMovableStringClass() {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> string_class = GetClassRoot<mirror::String>();
- CHECK(!art::Runtime::Current()->GetHeap()->ObjectMayMove(string_class));
+ CHECK(!art::Runtime::Current()->GetHeap()->IsMovableObject(string_class));
}
}
diff --git a/libartbase/base/bit_vector.h b/libartbase/base/bit_vector.h
index 2eed9702bf..ec94efb09f 100644
--- a/libartbase/base/bit_vector.h
+++ b/libartbase/base/bit_vector.h
@@ -31,9 +31,9 @@ class ArenaBitVector;
/*
* Expanding bitmap. Bits are numbered starting from zero. All operations on a BitVector are
- * unsynchronized. New BitVectors are not necessarily zeroed out. If the used allocator doesn't
- * clear the vector (e.g. ScopedArenaAllocator), the caller is responsible for clearing it (e.g.
- * ArenaBitVector).
+ * unsynchronized. New BitVectors are not necessarily zeroed out. If the used allocator doesn't do
+ * clear the vector (e.g. ScopedArenaAllocator), the responsibility of clearing it relies on the
+ * caller (e.g. ArenaBitVector).
*/
class BitVector {
public:
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index f8589f1d1a..80bfa0ff43 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -1916,10 +1916,6 @@ jvmtiError HeapExtensions::ChangeArraySize(jvmtiEnv* env, jobject arr, jsize new
art::StackHandleScope<2> hs(self);
art::Handle<art::mirror::Array> old_arr(hs.NewHandle(soa.Decode<art::mirror::Array>(arr)));
art::MutableHandle<art::mirror::Array> new_arr(hs.NewHandle<art::mirror::Array>(nullptr));
- if (!art::Runtime::Current()->GetHeap()->PossiblyAllocatedMovable(old_arr.Get())) {
- JVMTI_LOG(INFO, env) << "Cannot resize a nonmovable array";
- return ERR(ILLEGAL_ARGUMENT);
- }
if (klass->IsObjectArrayClass()) {
new_arr.Assign(
art::mirror::ObjectArray<art::mirror::Object>::Alloc(self, old_arr->GetClass(), new_size));
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0f5dd99dc0..ad3e690938 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -377,10 +377,11 @@ Heap::Heap(size_t initial_size,
* verification is enabled, we limit the size of allocation stacks to speed up their
* searching.
*/
- max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize :
- (kVerifyObjectSupport > kVerifyObjectModeFast) ?
- kVerifyObjectAllocationStackSize :
- kDefaultAllocationStackSize),
+ max_allocation_stack_size_(kGCALotMode
+ ? kGcAlotAllocationStackSize
+ : (kVerifyObjectSupport > kVerifyObjectModeFast)
+ ? kVerifyObjectAllocationStackSize
+ : kDefaultAllocationStackSize),
current_allocator_(kAllocatorTypeDlMalloc),
current_non_moving_allocator_(kAllocatorTypeNonMoving),
bump_pointer_space_(nullptr),
@@ -431,8 +432,7 @@ Heap::Heap(size_t initial_size,
boot_image_spaces_(),
boot_images_start_address_(0u),
boot_images_size_(0u),
- pre_oome_gc_count_(0u),
- non_movable_zygote_objects_() {
+ pre_oome_gc_count_(0u) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -2378,7 +2378,7 @@ class ZygoteCompactingCollector final : public collector::SemiSpace {
bin_live_bitmap_ = space->GetLiveBitmap();
bin_mark_bitmap_ = space->GetMarkBitmap();
uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
- Heap* heap = Runtime::Current()->GetHeap();
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Note: This requires traversing the space in increasing order of object addresses.
auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
@@ -2386,11 +2386,7 @@ class ZygoteCompactingCollector final : public collector::SemiSpace {
// Add the bin consisting of the end of the previous object to the start of the current object.
AddBin(bin_size, prev);
prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
- if (!obj->IsClass()) {
- heap->AddNonMovableZygoteObject(obj);
- }
};
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
bin_live_bitmap_->Walk(visitor);
// Add the last bin which spans after the last object to the end of the space.
AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
@@ -2496,10 +2492,6 @@ void Heap::IncrementFreedEver() {
// FIXME: BUT it did exceed... http://b/197647048
# pragma clang diagnostic ignored "-Wframe-larger-than="
void Heap::PreZygoteFork() {
- // Opportunistically log here; empirically logs from the initial PreZygoteFork() are lost.
- // But for the main zygote, this is typically entered at least twice.
- LOG(INFO) << "PreZygoteFork(): non_movable_zygote_objects_.size() = "
- << non_movable_zygote_objects_.size();
if (!HasZygoteSpace()) {
// We still want to GC in case there is some unreachable non moving objects that could cause a
// suboptimal bin packing when we compact the zygote space.
@@ -2527,18 +2519,6 @@ void Heap::PreZygoteFork() {
// there.
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const bool same_space = non_moving_space_ == main_space_;
- // We create the ZygoteSpace by performing a semi-space collection to copy the main allocation
- // space into what was the non-moving space. We do so by ignoring and overwriting the meta-
- // information from the non-moving (dlmalloc) space. An initial pass identifies unused sections
- // of the heap that we usually try to copy into first. We copy any remaining objects past the
- // previous end of the old non-moving space. Eeverything up to the last allocated object in the
- // old non-moving space then becomes ZygoteSpace. Everything after that becomes the new
- // non-moving space.
- // There is a subtlety here in that Object.clone() treats objects allocated as non-movable
- // differently from other objects, and this ZygoteSpace creation process doesn't automatically
- // preserve that distinction. Thus we must explicitly track this in non_movable_zygote_objects_.
- // Otherwise we have to treat the entire ZygoteSpace as non-movable, which could cause some
- // weird programming styles to eventually render most of the heap non-movable.
if (kCompactZygote) {
// Temporarily disable rosalloc verification because the zygote
// compaction will mess up the rosalloc internal metadata.
@@ -2567,9 +2547,6 @@ void Heap::PreZygoteFork() {
zygote_collector.SetToSpace(&target_space);
zygote_collector.SetSwapSemiSpaces(false);
zygote_collector.Run(kGcCauseCollectorTransition, false);
- uint32_t num_nonmovable = non_movable_zygote_objects_.size();
- // For an AOSP boot, we saw num_nonmovable around a dozen.
- DCHECK_LT(num_nonmovable, 1000u) << " Too many nonmovable zygote objects?";
if (reset_main_space) {
main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
@@ -3759,32 +3736,7 @@ void Heap::SetIdealFootprint(size_t target_footprint) {
target_footprint_.store(target_footprint, std::memory_order_relaxed);
}
-bool Heap::IsNonMovable(ObjPtr<mirror::Object> obj) const {
- DCHECK(!obj.Ptr()->IsClass()); // We do not correctly track classes in zygote space.
- if (GetNonMovingSpace()->Contains(obj.Ptr())) {
- return true;
- }
- if (zygote_space_ != nullptr && zygote_space_->Contains(obj.Ptr())) {
- return non_movable_zygote_objects_.contains(
- mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj.Ptr()));
- }
- return false; // E.g. in LargeObjectsSpace.
-}
-
-bool Heap::PossiblyAllocatedMovable(ObjPtr<mirror::Object> obj) const {
- // The CC collector may copy movable objects into NonMovingSpace. It does that only when it
- // runs out of space, so we assume this does not affect ZygoteSpace.
- if (!gUseReadBarrier && GetNonMovingSpace()->Contains(obj.Ptr())) {
- return false;
- }
- if (zygote_space_ != nullptr && zygote_space_->Contains(obj.Ptr())) {
- return !non_movable_zygote_objects_.contains(
- mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj.Ptr()));
- }
- return true;
-}
-
-bool Heap::ObjectMayMove(ObjPtr<mirror::Object> obj) const {
+bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
if (kMovingCollector) {
space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
if (space != nullptr) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0593e45613..bbddf57090 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -383,28 +383,8 @@ class Heap {
bool sorted = false)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- // Approximates whether the object in question was explicitly requested to be nonmovable.
- // May rarely err on the side of claiming immovability for objects that were allocated movable,
- // but will not be moved.
- // Returns true if and only if one of the following is true:
- // 1) The object was allocated as nonmovable, whether or not it has moved to ZygoteSpace.
- // 2) All objects are being allocated in a non-movable space.
- // 3) The CC collector decided to spuriously allocate in non-moving space because it ran
- // out of memory at an inopportune time.
- // This is used primarily to determine Object.clone() behavior, where (2)
- // doesn't matter. (3) is unfortunate, but we can live with it.
- // SHOULD NOT BE CALLED ON CLASS OBJECTS.
- bool IsNonMovable(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
-
- // The negation of the above, but resolves ambiguous cases in the direction of assuming
- // movability. Used for partial error checking where an object must be movable.
- EXPORT bool PossiblyAllocatedMovable(ObjPtr<mirror::Object> obj) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Returns true if there is any chance that the object (obj) will move. Returns false for image
- // and zygote space, since we don't actually move objects in those spaces. Unlike the preceding
- // function, the result here depends on whether the object was moved to zygote or image space.
- bool ObjectMayMove(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns true if there is any chance that the object (obj) will move.
+ bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Enables us to compacting GC until objects are released.
EXPORT void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
@@ -1047,12 +1027,6 @@ class Heap {
return size < pud_size ? pmd_size : pud_size;
}
- // Add a reference to the set of preexisting zygote nonmovable objects.
- void AddNonMovableZygoteObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
- non_movable_zygote_objects_.insert(
- mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj));
- }
-
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -1371,7 +1345,7 @@ class Heap {
std::vector<space::AllocSpace*> alloc_spaces_;
// A space where non-movable objects are allocated, when compaction is enabled it contains
- // Classes, and non moving objects.
+ // Classes, ArtMethods, ArtFields, and non moving objects.
space::MallocSpace* non_moving_space_;
// Space which we use for the kAllocatorTypeROSAlloc.
@@ -1785,16 +1759,6 @@ class Heap {
std::unique_ptr<Verification> verification_;
- // Non-class immovable objects allocated before we created zygote space.
- // TODO: We may need a smaller data structure. Unfortunately, HashSets minimum size is too big.
- struct CRComparator {
- bool operator()(mirror::CompressedReference<mirror::Object> x,
- mirror::CompressedReference<mirror::Object> y) const {
- return x.AsVRegValue() < y.AsVRegValue();
- }
- };
- std::set<mirror::CompressedReference<mirror::Object>, CRComparator> non_movable_zygote_objects_;
-
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::ConcurrentCopying;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1b7cf7d76c..2b83eff44f 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -397,7 +397,7 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots
}
// Ensure that we don't put movable objects in the shared region.
if (is_shared_region) {
- CHECK(Runtime::Current()->GetHeap()->IsNonMovable(object.Get()));
+ CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
}
}
}
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 48428f6c3d..51350dc713 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -2126,7 +2126,7 @@ class JNI {
ScopedObjectAccess soa(env);
ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_string);
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->ObjectMayMove(s) || s->IsCompressed()) {
+ if (heap->IsMovableObject(s) || s->IsCompressed()) {
jchar* chars = new jchar[s->GetLength()];
if (s->IsCompressed()) {
int32_t length = s->GetLength();
@@ -2174,7 +2174,7 @@ class JNI {
}
return chars;
} else {
- if (heap->ObjectMayMove(s)) {
+ if (heap->IsMovableObject(s)) {
StackHandleScope<1> hs(soa.Self());
HandleWrapperObjPtr<mirror::String> h(hs.NewHandleWrapper(&s));
if (!gUseReadBarrier && !gUseUserfaultfd) {
@@ -2202,7 +2202,7 @@ class JNI {
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_string);
- if (!s->IsCompressed() && heap->ObjectMayMove(s)) {
+ if (!s->IsCompressed() && heap->IsMovableObject(s)) {
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->DecrementDisableMovingGC(soa.Self());
} else {
@@ -2369,7 +2369,7 @@ class JNI {
return nullptr;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->ObjectMayMove(array)) {
+ if (heap->IsMovableObject(array)) {
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->IncrementDisableMovingGC(soa.Self());
} else {
@@ -2966,7 +2966,7 @@ class JNI {
return nullptr;
}
// Only make a copy if necessary.
- if (Runtime::Current()->GetHeap()->ObjectMayMove(array)) {
+ if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
if (is_copy != nullptr) {
*is_copy = JNI_TRUE;
}
@@ -3026,7 +3026,7 @@ class JNI {
if (mode != JNI_COMMIT) {
if (is_copy) {
delete[] reinterpret_cast<uint64_t*>(elements);
- } else if (heap->ObjectMayMove(array)) {
+ } else if (heap->IsMovableObject(array)) {
// Non copy to a movable object must means that we had disabled the moving GC.
if (!gUseReadBarrier && !gUseUserfaultfd) {
heap->DecrementDisableMovingGC(soa.Self());
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a575981d9b..ed97e4d4c8 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -1829,7 +1829,7 @@ TEST_F(JniInternalTest, GetStringChars_ReleaseStringChars) {
jboolean is_copy = JNI_FALSE;
chars = env_->GetStringChars(s, &is_copy);
- if (Runtime::Current()->GetHeap()->ObjectMayMove(s_m)) {
+ if (Runtime::Current()->GetHeap()->IsMovableObject(s_m)) {
EXPECT_EQ(JNI_TRUE, is_copy);
} else {
EXPECT_EQ(JNI_FALSE, is_copy);
diff --git a/runtime/mirror/array-alloc-inl.h b/runtime/mirror/array-alloc-inl.h
index 8e182a3158..b905fd1727 100644
--- a/runtime/mirror/array-alloc-inl.h
+++ b/runtime/mirror/array-alloc-inl.h
@@ -115,7 +115,7 @@ class SetLengthToUsableSizeVisitor {
DISALLOW_COPY_AND_ASSIGN(SetLengthToUsableSizeVisitor);
};
-template <bool kIsInstrumented, bool kFillUsable, bool kCheckLargeObject>
+template <bool kIsInstrumented, bool kFillUsable>
inline ObjPtr<Array> Array::Alloc(Thread* self,
ObjPtr<Class> array_class,
int32_t component_count,
@@ -143,15 +143,15 @@ inline ObjPtr<Array> Array::Alloc(Thread* self,
ObjPtr<Array> result;
if (!kFillUsable) {
SetLengthVisitor visitor(component_count);
- result =
- ObjPtr<Array>::DownCast(heap->AllocObjectWithAllocator<kIsInstrumented, kCheckLargeObject>(
+ result = ObjPtr<Array>::DownCast(
+ heap->AllocObjectWithAllocator<kIsInstrumented>(
self, array_class, size, allocator_type, visitor));
} else {
SetLengthToUsableSizeVisitor visitor(component_count,
DataOffset(1U << component_size_shift).SizeValue(),
component_size_shift);
- result =
- ObjPtr<Array>::DownCast(heap->AllocObjectWithAllocator<kIsInstrumented, kCheckLargeObject>(
+ result = ObjPtr<Array>::DownCast(
+ heap->AllocObjectWithAllocator<kIsInstrumented>(
self, array_class, size, allocator_type, visitor));
}
if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 65371d0f5e..a4f6c88e4c 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -144,8 +144,9 @@ ObjPtr<Array> Array::CopyOf(Handle<Array> h_this, Thread* self, int32_t new_leng
CHECK(klass->IsPrimitiveArray()) << "Will miss write barriers";
DCHECK_GE(new_length, 0);
auto* heap = Runtime::Current()->GetHeap();
- DCHECK(!heap->IsNonMovable(h_this.Get()));
- gc::AllocatorType allocator_type = heap->GetCurrentAllocator();
+ gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
+ ? heap->GetCurrentAllocator()
+ : heap->GetCurrentNonMovingAllocator();
const auto component_size = klass->GetComponentSize();
const auto component_shift = klass->GetComponentSizeShift();
ObjPtr<Array> new_array =
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 1fb7f2e955..7a0976ab48 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -44,13 +44,14 @@ class MANAGED Array : public Object {
// Allocates an array with the given properties, if kFillUsable is true the array will be of at
// least component_count size, however, if there's usable space at the end of the allocation the
// array will fill it.
- template <bool kIsInstrumented = true, bool kFillUsable = false, bool kCheckLargeObject = true>
+ template <bool kIsInstrumented = true, bool kFillUsable = false>
ALWAYS_INLINE static ObjPtr<Array> Alloc(Thread* self,
ObjPtr<Class> array_class,
int32_t component_count,
size_t component_size_shift,
gc::AllocatorType allocator_type)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
static ObjPtr<Array> CreateMultiArray(Thread* self,
Handle<Class> element_class,
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 448409c7a7..b28978603c 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -162,11 +162,9 @@ ObjPtr<Object> Object::Clone(Handle<Object> h_this, Thread* self) {
gc::Heap* heap = Runtime::Current()->GetHeap();
size_t num_bytes = h_this->SizeOf();
CopyObjectVisitor visitor(&h_this, num_bytes);
- // Unclear whether this should ever allocate a nonmovable object. This is conservative.
- ObjPtr<Object> copy =
- heap->IsNonMovable(h_this.Get()) ?
- heap->AllocNonMovableObject(self, h_this->GetClass(), num_bytes, visitor) :
- heap->AllocObject(self, h_this->GetClass(), num_bytes, visitor);
+ ObjPtr<Object> copy = heap->IsMovableObject(h_this.Get())
+ ? heap->AllocObject(self, h_this->GetClass(), num_bytes, visitor)
+ : heap->AllocNonMovableObject(self, h_this->GetClass(), num_bytes, visitor);
if (h_this->GetClass()->IsFinalizable()) {
heap->AddFinalizerReference(self, &copy);
}
diff --git a/runtime/mirror/object_array-alloc-inl.h b/runtime/mirror/object_array-alloc-inl.h
index d3688762a4..e79d154f84 100644
--- a/runtime/mirror/object_array-alloc-inl.h
+++ b/runtime/mirror/object_array-alloc-inl.h
@@ -66,8 +66,9 @@ inline ObjPtr<ObjectArray<T>> ObjectArray<T>::CopyOf(Handle<ObjectArray<T>> h_th
int32_t new_length) {
DCHECK_GE(new_length, 0);
gc::Heap* heap = Runtime::Current()->GetHeap();
- DCHECK(heap->PossiblyAllocatedMovable(h_this.Get()));
- gc::AllocatorType allocator_type = heap->GetCurrentAllocator();
+ gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
+ ? heap->GetCurrentAllocator()
+ : heap->GetCurrentNonMovingAllocator();
ObjPtr<ObjectArray<T>> new_array = Alloc(self, h_this->GetClass(), new_length, allocator_type);
if (LIKELY(new_array != nullptr)) {
new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 66c0fe6d39..0e9660aaac 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -116,11 +116,11 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle
return nullptr;
}
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentNonMovingAllocator();
- // To keep these allocations distinguishable, do not fall back to LargeObjectsSpace:
- ObjPtr<mirror::Array> result = mirror::Array::Alloc</* kIsInstrumented= */ true,
- /* kfillUsable= */ false,
- /* kCheckLargeObject= */ false>(
- soa.Self(), array_class, length, array_class->GetComponentSizeShift(), allocator);
+ ObjPtr<mirror::Array> result = mirror::Array::Alloc(soa.Self(),
+ array_class,
+ length,
+ array_class->GetComponentSizeShift(),
+ allocator);
return soa.AddLocalReference<jobject>(result);
}
@@ -167,11 +167,10 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
ThrowIllegalArgumentException("not a primitive array");
return 0;
}
- if (!Runtime::Current()->GetHeap()->IsNonMovable(array)) {
+ if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
ThrowRuntimeException("Trying to get address of movable array object");
return 0;
}
- DCHECK(!Runtime::Current()->GetHeap()->ObjectMayMove(array));
return reinterpret_cast<uintptr_t>(array->GetRawData(array->GetClass()->GetComponentSize(), 0));
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index edea356dab..989763022f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1404,8 +1404,7 @@ static size_t OpenBootDexFiles(ArrayRef<const std::string> dex_filenames,
void Runtime::SetSentinel(ObjPtr<mirror::Object> sentinel) {
CHECK(sentinel_.Read() == nullptr);
CHECK(sentinel != nullptr);
- // IsNonMovable(sentinel) doesn't hold if it came from an image.
- CHECK(!heap_->ObjectMayMove(sentinel));
+ CHECK(!heap_->IsMovableObject(sentinel));
sentinel_ = GcRoot<mirror::Object>(sentinel);
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d09c9fadec..afb8ebd3c0 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -1208,8 +1208,8 @@ class Runtime {
// for differentiating between unfilled imt slots vs conflict slots in superclasses.
ArtMethod* imt_unimplemented_method_;
- // Special sentinel object used to indicate invalid conditions in JNI (cleared weak references)
- // and JDWP (invalid references).
+ // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
+ // JDWP (invalid references).
GcRoot<mirror::Object> sentinel_;
InstructionSet instruction_set_;