diff options
-rw-r--r-- | runtime/gc/heap-inl.h | 7 | ||||
-rw-r--r-- | runtime/gc/heap.h | 9 | ||||
-rw-r--r-- | runtime/globals.h | 3 | ||||
-rw-r--r-- | runtime/mirror/object-inl.h | 56 | ||||
-rw-r--r-- | runtime/mirror/object.cc | 2 | ||||
-rw-r--r-- | runtime/mirror/object.h | 91 | ||||
-rw-r--r-- | runtime/mirror/object_reference-inl.h | 36 | ||||
-rw-r--r-- | runtime/mirror/object_reference.h | 8 | ||||
-rw-r--r-- | runtime/obj_ptr.h | 3 | ||||
-rw-r--r-- | runtime/runtime.cc | 11 | ||||
-rw-r--r-- | runtime/runtime.h | 7 | ||||
-rw-r--r-- | runtime/verify_object-inl.h | 11 | ||||
-rw-r--r-- | runtime/verify_object.h | 5 |
13 files changed, 168 insertions, 81 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 6d61c647db..83789cc733 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -28,6 +28,7 @@ #include "gc/space/large_object_space.h" #include "gc/space/region_space-inl.h" #include "gc/space/rosalloc_space-inl.h" +#include "obj_ptr-inl.h" #include "runtime.h" #include "handle_scope-inl.h" #include "thread-inl.h" @@ -433,6 +434,12 @@ inline void Heap::CheckConcurrentGC(Thread* self, } } +inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst, + MemberOffset offset ATTRIBUTE_UNUSED, + ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) { + card_table_->MarkCard(dst.Ptr()); +} + } // namespace gc } // namespace art diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index e32f05766e..678edff9c1 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -438,11 +438,10 @@ class Heap { // Must be called if a field of an Object in the heap changes, and before any GC safe-point. // The call is not needed if null is stored in the field. - ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, - MemberOffset offset ATTRIBUTE_UNUSED, - const mirror::Object* new_value ATTRIBUTE_UNUSED) { - card_table_->MarkCard(dst); - } + ALWAYS_INLINE void WriteBarrierField(ObjPtr<mirror::Object> dst, + MemberOffset offset, + ObjPtr<mirror::Object> new_value) + REQUIRES_SHARED(Locks::mutator_lock_); // Write barrier for array operations that update many field positions ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst, diff --git a/runtime/globals.h b/runtime/globals.h index 28534e4708..616422585e 100644 --- a/runtime/globals.h +++ b/runtime/globals.h @@ -172,6 +172,9 @@ static constexpr bool kIsVdexEnabled = true; static constexpr bool kIsVdexEnabled = false; #endif +// Size of a heap reference. +static constexpr size_t kHeapReferenceSize = sizeof(uint32_t); + } // namespace art #endif // ART_RUNTIME_GLOBALS_H_ diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index ad7558c0ca..3e7bca789c 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -31,6 +31,7 @@ #include "lock_word-inl.h" #include "monitor.h" #include "object_array-inl.h" +#include "object_reference-inl.h" #include "obj_ptr-inl.h" #include "read_barrier-inl.h" #include "reference.h" @@ -53,7 +54,7 @@ inline Class* Object::GetClass() { } template<VerifyObjectFlags kVerifyFlags> -inline void Object::SetClass(Class* new_klass) { +inline void Object::SetClass(ObjPtr<Class> new_klass) { // new_klass may be null prior to class linker initialization. // We don't mark the card as this occurs as part of object allocation. Not all objects have // backing cards, such as large objects. @@ -159,7 +160,6 @@ inline Object* Object::GetReadBarrierPointerAcquire() { #endif } - inline uint32_t Object::GetMarkBit() { #ifdef USE_READ_BARRIER return GetLockWord(false).MarkBitState(); @@ -895,18 +895,18 @@ inline T* Object::GetFieldObjectVolatile(MemberOffset field_offset) { template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, bool kIsVolatile> inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, - Object* new_value) { + ObjPtr<Object> new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } if (kTransactionActive) { - mirror::Object* obj; + ObjPtr<Object> obj; if (kIsVolatile) { obj = GetFieldObjectVolatile<Object>(field_offset); } else { obj = GetFieldObject<Object>(field_offset); } - Runtime::Current()->RecordWriteFieldReference(this, field_offset, obj, true); + Runtime::Current()->RecordWriteFieldReference(this, field_offset, obj.Ptr(), true); } if (kVerifyFlags & kVerifyThis) { VerifyObject(this); @@ -919,17 +919,17 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, if (kIsVolatile) { // TODO: Refactor to use a SequentiallyConsistent store instead. QuasiAtomic::ThreadFenceRelease(); // Ensure that prior accesses are visible before store. - objref_addr->Assign(new_value); + objref_addr->Assign(new_value.Ptr()); QuasiAtomic::ThreadFenceSequentiallyConsistent(); // Ensure this store occurs before any volatile loads. } else { - objref_addr->Assign(new_value); + objref_addr->Assign(new_value.Ptr()); } } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, bool kIsVolatile> -inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value) { +inline void Object::SetFieldObject(MemberOffset field_offset, ObjPtr<Object> new_value) { SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags, kIsVolatile>(field_offset, new_value); if (new_value != nullptr) { @@ -940,7 +940,7 @@ inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value) } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> -inline void Object::SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) { +inline void Object::SetFieldObjectVolatile(MemberOffset field_offset, ObjPtr<Object> new_value) { SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(field_offset, new_value); } @@ -956,7 +956,8 @@ inline HeapReference<Object>* Object::GetFieldObjectReferenceAddr(MemberOffset f template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, - Object* old_value, Object* new_value) { + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) { bool success = CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier< kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value); if (success) { @@ -967,7 +968,9 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_ template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier( - MemberOffset field_offset, Object* old_value, Object* new_value) { + MemberOffset field_offset, + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -983,8 +986,8 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier( if (kTransactionActive) { Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); } - HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value)); - HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value)); + HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value)); + HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value)); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); @@ -995,7 +998,8 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier( template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, - Object* old_value, Object* new_value) { + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) { bool success = CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier< kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value); if (success) { @@ -1006,7 +1010,9 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset fiel template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier( - MemberOffset field_offset, Object* old_value, Object* new_value) { + MemberOffset field_offset, + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -1022,8 +1028,8 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrie if (kTransactionActive) { Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); } - HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value)); - HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value)); + HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value)); + HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value)); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); @@ -1034,7 +1040,9 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrie template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier( - MemberOffset field_offset, Object* old_value, Object* new_value) { + MemberOffset field_offset, + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -1050,8 +1058,8 @@ inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier( if (kTransactionActive) { Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); } - HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value)); - HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value)); + HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value)); + HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value)); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); @@ -1062,7 +1070,9 @@ inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier( template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier( - MemberOffset field_offset, Object* old_value, Object* new_value) { + MemberOffset field_offset, + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -1078,8 +1088,8 @@ inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier( if (kTransactionActive) { Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true); } - HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value)); - HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value)); + HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value)); + HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value)); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index 90b97fd143..fbb7c96b58 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -199,7 +199,7 @@ int32_t Object::IdentityHashCode() const { UNREACHABLE(); } -void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) { +void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) { Class* c = GetClass(); Runtime* runtime = Runtime::Current(); if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() || diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 10faf605fe..9ddf99500e 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -92,7 +92,7 @@ class MANAGED LOCKABLE Object { ALWAYS_INLINE Class* GetClass() REQUIRES_SHARED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetClass(Class* new_klass) REQUIRES_SHARED(Locks::mutator_lock_); + void SetClass(ObjPtr<Class> new_klass) REQUIRES_SHARED(Locks::mutator_lock_); // TODO: Clean these up and change to return int32_t Object* GetReadBarrierPointer() REQUIRES_SHARED(Locks::mutator_lock_); @@ -283,54 +283,69 @@ class MANAGED LOCKABLE Object { ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> - ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value) + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> + ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, + ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> - ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value) + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> + ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, + ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, - Object* new_value) + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, - Object* old_value, - Object* new_value) - REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, - Object* new_value) - REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) + REQUIRES_SHARED(Locks::mutator_lock_); + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) + REQUIRES_SHARED(Locks::mutator_lock_); + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, - Object* old_value, - Object* new_value) + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset, - Object* old_value, - Object* new_value) + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset, - Object* old_value, - Object* new_value) + ObjPtr<Object> old_value, + ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> @@ -585,9 +600,9 @@ class MANAGED LOCKABLE Object { // Verify the type correctness of stores to fields. // TODO: This can cause thread suspension and isn't moving GC safe. - void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) + void CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - void CheckFieldAssignment(MemberOffset field_offset, Object* new_value) + void CheckFieldAssignment(MemberOffset field_offset, ObjPtr<Object>new_value) REQUIRES_SHARED(Locks::mutator_lock_) { if (kCheckFieldAssignments) { CheckFieldAssignmentImpl(field_offset, new_value); diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h new file mode 100644 index 0000000000..60955d60df --- /dev/null +++ b/runtime/mirror/object_reference-inl.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_MIRROR_OBJECT_REFERENCE_INL_H_ +#define ART_RUNTIME_MIRROR_OBJECT_REFERENCE_INL_H_ + +#include "object_reference.h" + +#include "obj_ptr-inl.h" + +namespace art { +namespace mirror { + +// References between objects within the managed heap. +template<class MirrorType> +HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorType> ptr) { + return HeapReference<MirrorType>(ptr.Ptr()); +} + +} // namespace mirror +} // namespace art + +#endif // ART_RUNTIME_MIRROR_OBJECT_REFERENCE_INL_H_ diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h index f4a358018e..573cb308bd 100644 --- a/runtime/mirror/object_reference.h +++ b/runtime/mirror/object_reference.h @@ -19,6 +19,7 @@ #include "base/mutex.h" // For Locks::mutator_lock_. #include "globals.h" +#include "obj_ptr.h" namespace art { namespace mirror { @@ -86,11 +87,18 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr REQUIRES_SHARED(Locks::mutator_lock_) { return HeapReference<MirrorType>(mirror_ptr); } + + static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr) + REQUIRES_SHARED(Locks::mutator_lock_); + private: explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {} }; +static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize, + "heap reference size does not match"); + // Standard compressed reference used in the runtime. Used for StackReference and GC roots. template<class MirrorType> class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> { diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h index 74be44eb23..75a6f9fe55 100644 --- a/runtime/obj_ptr.h +++ b/runtime/obj_ptr.h @@ -22,7 +22,6 @@ #include "base/mutex.h" // For Locks::mutator_lock_. #include "globals.h" -#include "mirror/object_reference.h" namespace art { @@ -32,7 +31,7 @@ namespace art { template<class MirrorType, bool kPoison = kIsDebugBuild> class ObjPtr { static constexpr size_t kCookieShift = - sizeof(mirror::HeapReference<mirror::Object>) * kBitsPerByte - kObjectAlignmentShift; + sizeof(kHeapReferenceSize) * kBitsPerByte - kObjectAlignmentShift; static constexpr size_t kCookieBits = sizeof(uintptr_t) * kBitsPerByte - kCookieShift; static constexpr uintptr_t kCookieMask = (static_cast<uintptr_t>(1u) << kCookieBits) - 1; diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 7d9d506a1b..9c0d2db873 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1899,11 +1899,16 @@ void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile); } -void Runtime::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, - mirror::Object* value, bool is_volatile) const { +void Runtime::RecordWriteFieldReference(mirror::Object* obj, + MemberOffset field_offset, + ObjPtr<mirror::Object> value, + bool is_volatile) const { DCHECK(IsAotCompiler()); DCHECK(IsActiveTransaction()); - preinitialization_transaction_->RecordWriteFieldReference(obj, field_offset, value, is_volatile); + preinitialization_transaction_->RecordWriteFieldReference(obj, + field_offset, + value.Ptr(), + is_volatile); } void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const { diff --git a/runtime/runtime.h b/runtime/runtime.h index 5a95f78cad..66fd058bb2 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -502,8 +502,11 @@ class Runtime { bool is_volatile) const; void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value, bool is_volatile) const; - void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, - mirror::Object* value, bool is_volatile) const; + void RecordWriteFieldReference(mirror::Object* obj, + MemberOffset field_offset, + ObjPtr<mirror::Object> value, + bool is_volatile) const + REQUIRES_SHARED(Locks::mutator_lock_); void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_); void RecordStrongStringInsertion(mirror::String* s) const diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h index f7a8249f19..4892b49533 100644 --- a/runtime/verify_object-inl.h +++ b/runtime/verify_object-inl.h @@ -21,31 +21,32 @@ #include "gc/heap.h" #include "mirror/object-inl.h" +#include "obj_ptr-inl.h" namespace art { -inline void VerifyObject(mirror::Object* obj) { +inline void VerifyObject(ObjPtr<mirror::Object> obj) { if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) { if (kVerifyObjectSupport > kVerifyObjectModeFast) { // Slow object verification, try the heap right away. - Runtime::Current()->GetHeap()->VerifyObjectBody(obj); + Runtime::Current()->GetHeap()->VerifyObjectBody(obj.Ptr()); } else { // Fast object verification, only call the heap if our quick sanity tests fail. The heap will // print the diagnostic message. - bool failed = !IsAligned<kObjectAlignment>(obj); + bool failed = !IsAligned<kObjectAlignment>(obj.Ptr()); if (!failed) { mirror::Class* c = obj->GetClass<kVerifyNone>(); failed = failed || !IsAligned<kObjectAlignment>(c); failed = failed || !VerifyClassClass(c); } if (UNLIKELY(failed)) { - Runtime::Current()->GetHeap()->VerifyObjectBody(obj); + Runtime::Current()->GetHeap()->VerifyObjectBody(obj.Ptr()); } } } } -inline bool VerifyClassClass(mirror::Class* c) { +inline bool VerifyClassClass(ObjPtr<mirror::Class> c) { if (UNLIKELY(c == nullptr)) { return false; } diff --git a/runtime/verify_object.h b/runtime/verify_object.h index 8e1653ddb9..384e56f7f4 100644 --- a/runtime/verify_object.h +++ b/runtime/verify_object.h @@ -20,6 +20,7 @@ #include <stdint.h> #include "base/macros.h" +#include "obj_ptr.h" namespace art { @@ -52,10 +53,10 @@ static constexpr VerifyObjectFlags kDefaultVerifyFlags = kVerifyNone; static constexpr VerifyObjectMode kVerifyObjectSupport = kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled; -ALWAYS_INLINE void VerifyObject(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; +ALWAYS_INLINE void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS; // Check that c.getClass() == c.getClass().getClass(). -ALWAYS_INLINE bool VerifyClassClass(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS; +ALWAYS_INLINE bool VerifyClassClass(ObjPtr<mirror::Class> c) NO_THREAD_SAFETY_ANALYSIS; } // namespace art |