Object model changes to support 64bit.
Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.
Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.
Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 02e01b8..ea8f89c 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -19,7 +19,7 @@
#include <string>
-#include "atomic_integer.h"
+#include "atomic.h"
#include "base/logging.h"
#include "base/macros.h"
#include "UniquePtr.h"
@@ -165,7 +165,7 @@
void Init() {
std::string error_msg;
mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
- PROT_READ | PROT_WRITE, &error_msg));
+ PROT_READ | PROT_WRITE, false, &error_msg));
CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
byte* addr = mem_map_->Begin();
CHECK(addr != NULL);
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index e099137..714e6f7 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -57,7 +57,7 @@
std::string error_msg;
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous("card table", NULL,
capacity + 256, PROT_READ | PROT_WRITE,
- &error_msg));
+ false, &error_msg));
CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
@@ -72,11 +72,11 @@
byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
(reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
if (((uintptr_t)biased_begin & 0xff) != kCardDirty) {
- int delta = kCardDirty - (reinterpret_cast<int>(biased_begin) & 0xff);
+ int delta = kCardDirty - (reinterpret_cast<uintptr_t>(biased_begin) & 0xff);
offset = delta + (delta < 0 ? 0x100 : 0);
biased_begin += offset;
}
- CHECK_EQ(reinterpret_cast<int>(biased_begin) & 0xff, kCardDirty);
+ CHECK_EQ(reinterpret_cast<uintptr_t>(biased_begin) & 0xff, kCardDirty);
return new CardTable(mem_map.release(), biased_begin, offset);
}
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 6d9dde7..0225f29 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -82,9 +82,9 @@
if (ref != nullptr) {
Object* new_ref = visitor_(ref, arg_);
if (new_ref != ref) {
- // Use SetFieldPtr to avoid card mark as an optimization which reduces dirtied pages and
- // improves performance.
- obj->SetFieldPtr(offset, new_ref, true);
+ // Use SetFieldObjectWithoutWriteBarrier to avoid card mark as an optimization which
+ // reduces dirtied pages and improves performance.
+ obj->SetFieldObjectWithoutWriteBarrier(offset, new_ref, true);
}
}
}
@@ -122,9 +122,8 @@
class AddToReferenceArrayVisitor {
public:
explicit AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table,
- std::vector<Object**>* references)
- : mod_union_table_(mod_union_table),
- references_(references) {
+ std::vector<mirror::HeapReference<Object>*>* references)
+ : mod_union_table_(mod_union_table), references_(references) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
@@ -133,19 +132,19 @@
// Only add the reference if it is non null and fits our criteria.
if (ref != nullptr && mod_union_table_->AddReference(obj, ref)) {
// Push the adddress of the reference.
- references_->push_back(obj->GetFieldObjectAddr(offset));
+ references_->push_back(obj->GetFieldObjectReferenceAddr(offset));
}
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
- std::vector<Object**>* const references_;
+ std::vector<mirror::HeapReference<Object>*>* const references_;
};
class ModUnionReferenceVisitor {
public:
explicit ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table,
- std::vector<Object**>* references)
+ std::vector<mirror::HeapReference<Object>*>* references)
: mod_union_table_(mod_union_table),
references_(references) {
}
@@ -160,7 +159,7 @@
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
- std::vector<Object**>* const references_;
+ std::vector<mirror::HeapReference<Object>*>* const references_;
};
class CheckReferenceVisitor {
@@ -173,7 +172,7 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
// TODO: Fixme when anotatalysis works with visitors.
- void operator()(const Object* obj, const Object* ref,
+ void operator()(Object* obj, Object* ref,
const MemberOffset& /* offset */, bool /* is_static */) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Heap* heap = mod_union_table_->GetHeap();
@@ -219,8 +218,8 @@
void ModUnionTableReferenceCache::Verify() {
// Start by checking that everything in the mod union table is marked.
for (const auto& ref_pair : references_) {
- for (Object** ref : ref_pair.second) {
- CHECK(heap_->IsLiveObjectLocked(*ref));
+ for (mirror::HeapReference<Object>* ref : ref_pair.second) {
+ CHECK(heap_->IsLiveObjectLocked(ref->AsMirrorPtr()));
}
}
@@ -231,8 +230,8 @@
const byte* card = ref_pair.first;
if (*card == CardTable::kCardClean) {
std::set<const Object*> reference_set;
- for (Object** obj_ptr : ref_pair.second) {
- reference_set.insert(*obj_ptr);
+ for (mirror::HeapReference<Object>* obj_ptr : ref_pair.second) {
+ reference_set.insert(obj_ptr->AsMirrorPtr());
}
ModUnionCheckReferences visitor(this, reference_set);
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
@@ -255,8 +254,8 @@
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
- for (Object** ref : ref_pair.second) {
- os << reinterpret_cast<const void*>(*ref) << ",";
+ for (mirror::HeapReference<Object>* ref : ref_pair.second) {
+ os << reinterpret_cast<const void*>(ref->AsMirrorPtr()) << ",";
}
os << "},";
}
@@ -266,7 +265,7 @@
Heap* heap = GetHeap();
CardTable* card_table = heap->GetCardTable();
- std::vector<Object**> cards_references;
+ std::vector<mirror::HeapReference<Object>*> cards_references;
ModUnionReferenceVisitor add_visitor(this, &cards_references);
for (const auto& card : cleared_cards_) {
@@ -294,13 +293,13 @@
cleared_cards_.clear();
size_t count = 0;
for (const auto& ref : references_) {
- for (const auto& obj_ptr : ref.second) {
- Object* obj = *obj_ptr;
+ for (mirror::HeapReference<Object>* obj_ptr : ref.second) {
+ Object* obj = obj_ptr->AsMirrorPtr();
if (obj != nullptr) {
Object* new_obj = visitor(obj, arg);
// Avoid dirtying pages in the image unless necessary.
if (new_obj != obj) {
- *obj_ptr = new_obj;
+ obj_ptr->Assign(new_obj);
}
}
}
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 5a99f1b..a89dbd1 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -112,20 +112,23 @@
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void Verify()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
- void Dump(std::ostream& os);
+ void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
// Cleared card array, used to update the mod-union table.
ModUnionTable::CardSet cleared_cards_;
// Maps from dirty cards to their corresponding alloc space references.
- SafeMap<const byte*, std::vector<mirror::Object**>, std::less<const byte*>,
- GcAllocator<std::pair<const byte*, std::vector<mirror::Object**> > > > references_;
+ SafeMap<const byte*, std::vector<mirror::HeapReference<mirror::Object>*>, std::less<const byte*>,
+ GcAllocator<std::pair<const byte*, std::vector<mirror::HeapReference<mirror::Object>*> > > >
+ references_;
};
// Card caching implementation. Keeps track of which cards we cleared and only this information.
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 01c70fa..d6d1b3e 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -37,9 +37,11 @@
old_word = *address;
// Fast path: The bit is already set.
if ((old_word & mask) != 0) {
+ DCHECK(Test(obj));
return true;
}
} while (!__sync_bool_compare_and_swap(address, old_word, old_word | mask));
+ DCHECK(Test(obj));
return false;
}
@@ -56,6 +58,15 @@
void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
const Visitor& visitor) const {
DCHECK_LT(visit_begin, visit_end);
+#ifdef __LP64__
+ // TODO: make the optimized code below work in the 64bit case.
+ for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(i);
+ if (Test(obj)) {
+ visitor(obj);
+ }
+ }
+#else
const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
@@ -114,6 +125,7 @@
visitor(obj);
edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
}
+#endif
}
inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
@@ -130,6 +142,7 @@
} else {
*address = old_word & ~mask;
}
+ DCHECK_EQ(Test(obj), do_set);
return (old_word & mask) != 0;
}
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index b831843..a080bee 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -64,7 +64,7 @@
size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
std::string error_msg;
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), NULL, bitmap_size,
- PROT_READ | PROT_WRITE, &error_msg));
+ PROT_READ | PROT_WRITE, false, &error_msg));
if (UNLIKELY(mem_map.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
return NULL;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 2d6cde5..aa074eb 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -72,8 +72,8 @@
}
// Pack the bits in backwards so they come out in address order when using CLZ.
- static word OffsetToMask(uintptr_t offset_) {
- return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord);
+ static word OffsetToMask(uintptr_t offset) {
+ return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset / kAlignment) % kBitsPerWord);
}
inline bool Set(const mirror::Object* obj) {
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 9c1c5dc..d148ae5 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -118,7 +118,7 @@
while (ref_offsets != 0) {
size_t right_shift = CLZ(ref_offsets);
MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
- mirror::Object* ref = obj->GetFieldObject<mirror::Object*>(field_offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(field_offset, false);
visitor(obj, ref, field_offset, is_static);
ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
}
@@ -127,17 +127,17 @@
// walk up the class inheritance hierarchy and find reference
// offsets the hard way. In the static case, just consider this
// class.
- for (const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
- klass != NULL;
- klass = is_static ? NULL : klass->GetSuperClass()) {
+ for (mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+ klass != nullptr;
+ klass = is_static ? nullptr : klass->GetSuperClass()) {
size_t num_reference_fields = (is_static
? klass->NumReferenceStaticFields()
: klass->NumReferenceInstanceFields());
for (size_t i = 0; i < num_reference_fields; ++i) {
mirror::ArtField* field = (is_static ? klass->GetStaticField(i)
- : klass->GetInstanceField(i));
+ : klass->GetInstanceField(i));
MemberOffset field_offset = field->GetOffset();
- mirror::Object* ref = obj->GetFieldObject<mirror::Object*>(field_offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(field_offset, false);
visitor(obj, ref, field_offset, is_static);
}
}
@@ -150,7 +150,7 @@
const size_t length = static_cast<size_t>(array->GetLength());
for (size_t i = 0; i < length; ++i) {
mirror::Object* element = array->GetWithoutChecks(static_cast<int32_t>(i));
- const size_t width = sizeof(mirror::Object*);
+ const size_t width = sizeof(mirror::HeapReference<mirror::Object>);
MemberOffset offset(i * width + mirror::Array::DataOffset(width).Int32Value());
visitor(array, element, offset, false);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 0c27a3b..bfedac7 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
-#include "atomic_integer.h"
+#include "atomic.h"
#include "barrier.h"
#include "base/macros.h"
#include "base/mutex.h"
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 3fb78b0..03307f5 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -600,9 +600,9 @@
if (new_address != ref) {
DCHECK(new_address != nullptr);
// Don't need to mark the card since we updating the object address and not changing the
- // actual objects its pointing to. Using SetFieldPtr is better in this case since it does not
- // dirty cards and use additional memory.
- obj->SetFieldPtr(offset, new_address, false);
+ // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this
+ // case since it does not dirty cards and use additional memory.
+ obj->SetFieldObjectWithoutWriteBarrier(offset, new_address, false);
}
}, kMovingClasses);
mirror::Class* klass = obj->GetClass();
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index f81a7c2..685b33c 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
-#include "atomic_integer.h"
+#include "atomic.h"
#include "barrier.h"
#include "base/macros.h"
#include "base/mutex.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 309adb7..b1bbfc6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -231,7 +231,7 @@
std::string error_str;
post_zygote_non_moving_space_mem_map_.reset(
MemMap::MapAnonymous("post zygote non-moving space", nullptr, 64 * MB,
- PROT_READ | PROT_WRITE, &error_str));
+ PROT_READ | PROT_WRITE, true, &error_str));
CHECK(post_zygote_non_moving_space_mem_map_.get() != nullptr) << error_str;
heap_begin = std::min(post_zygote_non_moving_space_mem_map_->Begin(), heap_begin);
heap_end = std::max(post_zygote_non_moving_space_mem_map_->End(), heap_end);
@@ -653,15 +653,15 @@
bool Heap::IsEnqueued(mirror::Object* ref) const {
// Since the references are stored as cyclic lists it means that once enqueued, the pending next
// will always be non-null.
- return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
+ return ref->GetFieldObject<mirror::Object>(GetReferencePendingNextOffset(), false) != nullptr;
}
-bool Heap::IsEnqueuable(const mirror::Object* ref) const {
+bool Heap::IsEnqueuable(mirror::Object* ref) const {
DCHECK(ref != nullptr);
const mirror::Object* queue =
- ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false);
+ ref->GetFieldObject<mirror::Object>(GetReferenceQueueOffset(), false);
const mirror::Object* queue_next =
- ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false);
+ ref->GetFieldObject<mirror::Object>(GetReferenceQueueNextOffset(), false);
return queue != nullptr && queue_next == nullptr;
}
@@ -720,7 +720,7 @@
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
std::ostringstream oss;
- int64_t total_bytes_free = GetFreeMemory();
+ size_t total_bytes_free = GetFreeMemory();
oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
<< " free bytes";
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
@@ -805,7 +805,7 @@
return FindSpaceFromObject(obj, true) != nullptr;
}
-bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
+bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
bool search_live_stack, bool sorted) {
if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
return false;
@@ -874,7 +874,7 @@
return false;
}
-void Heap::VerifyObjectImpl(const mirror::Object* obj) {
+void Heap::VerifyObjectImpl(mirror::Object* obj) {
if (Thread::Current() == NULL ||
Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
return;
@@ -887,9 +887,9 @@
// to run
const byte* raw_addr =
reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
- const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ mirror::Class* c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
- const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ mirror::Class* c_c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
return c_c == c_c_c;
}
@@ -910,7 +910,7 @@
}
}
-void Heap::VerifyObjectBody(const mirror::Object* obj) {
+void Heap::VerifyObjectBody(mirror::Object* obj) {
CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
// Ignore early dawn of the universe verifications.
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
@@ -918,7 +918,7 @@
}
const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
mirror::Object::ClassOffset().Int32Value();
- const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ mirror::Class* c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
if (UNLIKELY(c == NULL)) {
LOG(FATAL) << "Null class in object: " << obj;
} else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
@@ -949,7 +949,7 @@
GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
}
-void Heap::RecordFree(int64_t freed_objects, int64_t freed_bytes) {
+void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
DCHECK_LE(freed_bytes, num_bytes_allocated_.Load());
num_bytes_allocated_.FetchAndSub(freed_bytes);
if (Runtime::Current()->HasStatsEnabled()) {
@@ -1059,9 +1059,9 @@
: classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
}
- void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
for (size_t i = 0; i < classes_.size(); ++i) {
- const mirror::Class* instance_class = o->GetClass();
+ mirror::Class* instance_class = o->GetClass();
if (use_is_assignable_from_) {
if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
++counts_[i];
@@ -1103,11 +1103,11 @@
: class_(c), max_count_(max_count), instances_(instances) {
}
- void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const mirror::Class* instance_class = o->GetClass();
+ void operator()(mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* instance_class = o->GetClass();
if (instance_class == class_) {
if (max_count_ == 0 || instances_.size() < max_count_) {
- instances_.push_back(const_cast<mirror::Object*>(o));
+ instances_.push_back(o);
}
}
}
@@ -1190,8 +1190,8 @@
return;
}
uint64_t start_time = NanoTime();
- int32_t before_size = GetTotalMemory();
- int32_t before_allocated = num_bytes_allocated_.Load();
+ uint32_t before_size = GetTotalMemory();
+ uint32_t before_allocated = num_bytes_allocated_.Load();
ThreadList* tl = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
@@ -1718,7 +1718,7 @@
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
// analysis on visitors.
- void operator()(const mirror::Object* obj, const mirror::Object* ref,
+ void operator()(mirror::Object* obj, mirror::Object* ref,
const MemberOffset& offset, bool /* is_static */) const
NO_THREAD_SAFETY_ANALYSIS {
if (ref == nullptr || IsLive(ref)) {
@@ -1813,7 +1813,7 @@
}
}
- bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
@@ -1898,7 +1898,7 @@
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
+ void operator()(mirror::Object* obj, mirror::Object* ref, const MemberOffset& offset,
bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
// Filter out class references since changing an object's class does not mark the card as dirty.
// Also handles large objects, since the only reference they hold is a class reference.
@@ -1926,13 +1926,13 @@
// Print which field of the object is dead.
if (!obj->IsObjectArray()) {
- const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+ mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
CHECK(klass != NULL);
- const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
- : klass->GetIFields();
+ mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
+ : klass->GetIFields();
CHECK(fields != NULL);
for (int32_t i = 0; i < fields->GetLength(); ++i) {
- const mirror::ArtField* cur = fields->Get(i);
+ mirror::ArtField* cur = fields->Get(i);
if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
<< PrettyField(cur);
@@ -1940,7 +1940,7 @@
}
}
} else {
- const mirror::ObjectArray<mirror::Object>* object_array =
+ mirror::ObjectArray<mirror::Object>* object_array =
obj->AsObjectArray<mirror::Object>();
for (int32_t i = 0; i < object_array->GetLength(); ++i) {
if (object_array->Get(i) == ref) {
@@ -2278,14 +2278,14 @@
mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
DCHECK(reference != NULL);
DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
- return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
+ return reference->GetFieldObject<mirror::Object>(reference_referent_offset_, true);
}
void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
ScopedObjectAccess soa(self);
JValue result;
ArgArray arg_array(NULL, 0);
- arg_array.Append(reinterpret_cast<uint32_t>(object));
+ arg_array.Append(object);
soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
}
@@ -2299,7 +2299,7 @@
ScopedObjectAccess soa(self);
JValue result;
ArgArray arg_array(NULL, 0);
- arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList()));
+ arg_array.Append(cleared_references_.GetList());
soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
}
@@ -2477,8 +2477,8 @@
} while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
}
-int64_t Heap::GetTotalMemory() const {
- int64_t ret = 0;
+size_t Heap::GetTotalMemory() const {
+ size_t ret = 0;
for (const auto& space : continuous_spaces_) {
// Currently don't include the image space.
if (!space->IsImageSpace()) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 26d67a1..499d27c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -21,7 +21,7 @@
#include <string>
#include <vector>
-#include "atomic_integer.h"
+#include "atomic.h"
#include "base/timing_logger.h"
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table.h"
@@ -204,14 +204,14 @@
void ChangeCollector(CollectorType collector_type);
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
- void VerifyObjectImpl(const mirror::Object* o);
- void VerifyObject(const mirror::Object* o) {
+ void VerifyObjectImpl(mirror::Object* o);
+ void VerifyObject(mirror::Object* o) {
if (o != nullptr && this != nullptr && verify_object_mode_ > kNoHeapVerification) {
VerifyObjectImpl(o);
}
}
// Check that c.getClass() == c.getClass().getClass().
- bool VerifyClassClass(const mirror::Class* c) const;
+ bool VerifyClassClass(const mirror::Class* c) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Check sanity of all live references.
void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
@@ -232,9 +232,9 @@
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
- bool IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack = true,
+ bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true,
bool search_live_stack = true, bool sorted = false)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Returns true if there is any chance that the object (obj) will move.
bool IsMovableObject(const mirror::Object* obj) const;
@@ -358,7 +358,7 @@
// Freed bytes can be negative in cases where we copy objects from a compacted space to a
// free-list backed space.
- void RecordFree(int64_t freed_objects, int64_t freed_bytes);
+ void RecordFree(size_t freed_objects, size_t freed_bytes);
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
@@ -411,16 +411,16 @@
// consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
// were specified. Android apps start with a growth limit (small heap size) which is
// cleared/extended for large apps.
- int64_t GetMaxMemory() const {
+ size_t GetMaxMemory() const {
return growth_limit_;
}
// Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
// application.
- int64_t GetTotalMemory() const;
+ size_t GetTotalMemory() const;
// Implements java.lang.Runtime.freeMemory.
- int64_t GetFreeMemory() const {
+ size_t GetFreeMemory() const {
return GetTotalMemory() - num_bytes_allocated_;
}
@@ -550,7 +550,8 @@
static bool IsCompactingGC(CollectorType collector_type) {
return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS;
}
- bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const;
+ bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
mirror::Object* obj);
@@ -596,8 +597,8 @@
}
void EnqueueClearedReferences();
// Returns true if the reference object has not yet been enqueued.
- bool IsEnqueuable(const mirror::Object* ref) const;
- bool IsEnqueued(mirror::Object* ref) const;
+ bool IsEnqueuable(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsEnqueued(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, RootVisitor mark_visitor,
void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -644,7 +645,7 @@
// No thread saftey analysis since we call this everywhere and it is impossible to find a proper
// lock ordering for it.
- void VerifyObjectBody(const mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
+ void VerifyObjectBody(mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
static void VerificationCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
@@ -781,13 +782,13 @@
size_t total_objects_freed_ever_;
// Number of bytes allocated. Adjusted after each allocation and free.
- AtomicInteger num_bytes_allocated_;
+ Atomic<size_t> num_bytes_allocated_;
// Bytes which are allocated and managed by native code but still need to be accounted for.
- AtomicInteger native_bytes_allocated_;
+ Atomic<size_t> native_bytes_allocated_;
// Data structure GC overhead.
- AtomicInteger gc_memory_overhead_;
+ Atomic<size_t> gc_memory_overhead_;
// Heap verification flags.
const bool verify_missing_card_marks_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index d006349..2d73a71 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -52,8 +52,7 @@
ref->SetFieldObject(pending_next_offset, ref, false);
list_ = ref;
} else {
- mirror::Object* head =
- list_->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+ mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false);
ref->SetFieldObject(pending_next_offset, head, false);
list_->SetFieldObject(pending_next_offset, ref, false);
}
@@ -62,7 +61,7 @@
mirror::Object* ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
- mirror::Object* head = list_->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+ mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false);
DCHECK(head != nullptr);
mirror::Object* ref;
// Note: the following code is thread-safe because it is only called from ProcessReferences which
@@ -71,7 +70,7 @@
ref = list_;
list_ = nullptr;
} else {
- mirror::Object* next = head->GetFieldObject<mirror::Object*>(pending_next_offset, false);
+ mirror::Object* next = head->GetFieldObject<mirror::Object>(pending_next_offset, false);
list_->SetFieldObject(pending_next_offset, next, false);
ref = head;
}
@@ -84,11 +83,11 @@
os << "Reference starting at list_=" << list_ << "\n";
while (cur != nullptr) {
mirror::Object* pending_next =
- cur->GetFieldObject<mirror::Object*>(heap_->GetReferencePendingNextOffset(), false);
+ cur->GetFieldObject<mirror::Object>(heap_->GetReferencePendingNextOffset(), false);
os << "PendingNext=" << pending_next;
if (cur->GetClass()->IsFinalizerReferenceClass()) {
os << " Zombie=" <<
- cur->GetFieldObject<mirror::Object*>(heap_->GetFinalizerReferenceZombieOffset(), false);
+ cur->GetFieldObject<mirror::Object>(heap_->GetFinalizerReferenceZombieOffset(), false);
}
os << "\n";
cur = pending_next;
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 89589c3..3f3069e 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -21,7 +21,7 @@
#include <string>
#include <vector>
-#include "atomic_integer.h"
+#include "atomic.h"
#include "base/timing_logger.h"
#include "globals.h"
#include "gtest/gtest.h"
@@ -83,7 +83,7 @@
private:
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
// calling AtomicEnqueueIfNotEnqueued.
- Mutex lock_;
+ Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// The heap contains the reference offsets.
Heap* const heap_;
// The actual reference list. Not a root since it will be nullptr when the GC is not running.
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 4dc17df..a314d74 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -29,7 +29,7 @@
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
- PROT_READ | PROT_WRITE, &error_msg));
+ PROT_READ | PROT_WRITE, true, &error_msg));
if (mem_map.get() == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
@@ -69,7 +69,7 @@
return ret;
}
-size_t BumpPointerSpace::AllocationSize(const mirror::Object* obj) {
+size_t BumpPointerSpace::AllocationSize(mirror::Object* obj) {
return AllocationSizeNonvirtual(obj);
}
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 3e25b6b..d73fe3b 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -49,8 +49,7 @@
mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
// Return the storage space required by obj.
- virtual size_t AllocationSize(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual size_t AllocationSize(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// NOPS unless we support free lists.
virtual size_t Free(Thread*, mirror::Object*) {
@@ -60,7 +59,7 @@
return 0;
}
- size_t AllocationSizeNonvirtual(const mirror::Object* obj)
+ size_t AllocationSizeNonvirtual(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return obj->SizeOf();
}
@@ -135,7 +134,6 @@
byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
- size_t InternalAllocationSize(const mirror::Object* obj);
mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 9ae6a33..931ed21 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -228,7 +228,7 @@
return dlmalloc_space->MoreCore(increment);
}
-size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
+size_t DlMallocSpace::AllocationSize(mirror::Object* obj) {
return AllocationSizeNonvirtual(obj);
}
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 24308f7..4507c36 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -48,13 +48,15 @@
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
- virtual size_t AllocationSize(const mirror::Object* obj);
- virtual size_t Free(Thread* self, mirror::Object* ptr);
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+ virtual size_t AllocationSize(mirror::Object* obj);
+ virtual size_t Free(Thread* self, mirror::Object* ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated);
- size_t AllocationSizeNonvirtual(const mirror::Object* obj) {
+ size_t AllocationSizeNonvirtual(mirror::Object* obj) {
void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
return mspace_usable_size(obj_ptr) + kChunkOverhead;
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 4777cc6..ebad8dd 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -35,7 +35,7 @@
namespace gc {
namespace space {
-AtomicInteger ImageSpace::bitmap_index_(0);
+Atomic<uint32_t> ImageSpace::bitmap_index_(0);
ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map,
accounting::SpaceBitmap* live_bitmap)
@@ -171,7 +171,7 @@
byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
while (current < End()) {
DCHECK_ALIGNED(current, kObjectAlignment);
- const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(current);
CHECK(live_bitmap_->Test(obj));
CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
current += RoundUp(obj->SizeOf(), kObjectAlignment);
@@ -227,7 +227,7 @@
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
}
- size_t bitmap_index = bitmap_index_.FetchAndAdd(1);
+ uint32_t bitmap_index = bitmap_index_.FetchAndAdd(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_file_name,
bitmap_index));
UniquePtr<accounting::SpaceBitmap> bitmap(
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index c3f0ae6..9e19774 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -94,7 +94,7 @@
friend class Space;
- static AtomicInteger bitmap_index_;
+ static Atomic<uint32_t> bitmap_index_;
UniquePtr<accounting::SpaceBitmap> live_bitmap_;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 7fcfed4..987a655 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -60,7 +60,7 @@
size_t* bytes_allocated) {
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
- PROT_READ | PROT_WRITE, &error_msg);
+ PROT_READ | PROT_WRITE, true, &error_msg);
if (UNLIKELY(mem_map == NULL)) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
return NULL;
@@ -92,9 +92,9 @@
return allocation_size;
}
-size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) {
+size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj) {
MutexLock mu(Thread::Current(), lock_);
- MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj));
+ MemMaps::iterator found = mem_maps_.find(obj);
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
return found->second->Size();
}
@@ -134,7 +134,7 @@
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
- PROT_READ | PROT_WRITE, &error_msg);
+ PROT_READ | PROT_WRITE, true, &error_msg);
CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
@@ -244,7 +244,7 @@
return mem_map_->HasAddress(obj);
}
-size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
+size_t FreeListSpace::AllocationSize(mirror::Object* obj) {
AllocationHeader* header = GetAllocationHeader(obj);
DCHECK(Contains(obj));
DCHECK(!header->IsFree());
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index cd7c383..5274c8d 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -92,7 +92,7 @@
static LargeObjectMapSpace* Create(const std::string& name);
// Return the storage space required by obj.
- size_t AllocationSize(const mirror::Object* obj);
+ size_t AllocationSize(mirror::Object* obj);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
size_t Free(Thread* self, mirror::Object* ptr);
void Walk(DlMallocSpace::WalkCallback, void* arg) LOCKS_EXCLUDED(lock_);
@@ -118,8 +118,7 @@
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
- size_t AllocationSize(const mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ size_t AllocationSize(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
size_t Free(Thread* self, mirror::Object* obj);
bool Contains(const mirror::Object* obj) const;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 6c6cb97..f90e6c7 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -87,7 +87,7 @@
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity,
- PROT_READ | PROT_WRITE, &error_msg);
+ PROT_READ | PROT_WRITE, true, &error_msg);
if (mem_map == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(*capacity) << ": " << error_msg;
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 9a42e2c..f17bcd2 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -58,9 +58,11 @@
// Allocate num_bytes allowing the underlying space to grow.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
// Return the storage space required by obj.
- virtual size_t AllocationSize(const mirror::Object* obj) = 0;
- virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
+ virtual size_t AllocationSize(mirror::Object* obj) = 0;
+ virtual size_t Free(Thread* self, mirror::Object* ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
#ifndef NDEBUG
virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
@@ -136,7 +138,9 @@
virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
bool low_memory_mode) = 0;
- void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void RegisterRecentFree(mirror::Object* ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
return &SweepCallback;
@@ -163,7 +167,8 @@
size_t growth_limit_;
private:
- static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
+ static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(MallocSpace);
};
@@ -204,13 +209,14 @@
return result;
}
- virtual size_t AllocationSize(const mirror::Object* obj) {
- size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<const mirror::Object*>(
- reinterpret_cast<const byte*>(obj) - kValgrindRedZoneBytes));
+ virtual size_t AllocationSize(mirror::Object* obj) {
+ size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes));
return result - 2 * kValgrindRedZoneBytes;
}
- virtual size_t Free(Thread* self, mirror::Object* ptr) {
+ virtual size_t Free(Thread* self, mirror::Object* ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
void* obj_after_rdz = reinterpret_cast<void*>(ptr);
void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
// Make redzones undefined.
@@ -221,7 +227,8 @@
return freed - 2 * kValgrindRedZoneBytes;
}
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
size_t freed = 0;
for (size_t i = 0; i < num_ptrs; i++) {
freed += Free(self, ptrs[i]);
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 177e38e..86e441e 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -220,7 +220,7 @@
return rosalloc_space->MoreCore(increment);
}
-size_t RosAllocSpace::AllocationSize(const mirror::Object* obj) {
+size_t RosAllocSpace::AllocationSize(mirror::Object* obj) {
return AllocationSizeNonvirtual(obj);
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 555eb3c..4cd5a6d 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -47,13 +47,15 @@
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
- virtual size_t AllocationSize(const mirror::Object* obj);
- virtual size_t Free(Thread* self, mirror::Object* ptr);
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+ virtual size_t AllocationSize(mirror::Object* obj);
+ virtual size_t Free(Thread* self, mirror::Object* ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated);
- size_t AllocationSizeNonvirtual(const mirror::Object* obj)
+ size_t AllocationSizeNonvirtual(mirror::Object* obj)
NO_THREAD_SAFETY_ANALYSIS {
// TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 95a79ec..98e6f65 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -223,7 +223,7 @@
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
// Return the storage space required by obj.
- virtual size_t AllocationSize(const mirror::Object* obj) = 0;
+ virtual size_t AllocationSize(mirror::Object* obj) = 0;
// Returns how many bytes were freed.
virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 427d547..9989ffe 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -163,6 +163,7 @@
EXPECT_TRUE(ptr5 == NULL);
// Release some memory.
+ ScopedObjectAccess soa(self);
size_t free3 = space->AllocationSize(ptr3);
EXPECT_EQ(free3, ptr3_bytes_allocated);
EXPECT_EQ(free3, space->Free(self, ptr3));
@@ -257,6 +258,7 @@
EXPECT_TRUE(ptr5 == NULL);
// Release some memory.
+ ScopedObjectAccess soa(self);
size_t free3 = space->AllocationSize(ptr3);
EXPECT_EQ(free3, ptr3_bytes_allocated);
space->Free(self, ptr3);
@@ -354,30 +356,36 @@
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
size_t allocation_size = 0;
lots_of_objects[i] = space->Alloc(self, 16, &allocation_size);
- EXPECT_TRUE(lots_of_objects[i] != NULL);
+ EXPECT_TRUE(lots_of_objects[i] != nullptr);
InstallClass(lots_of_objects[i], 16);
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
}
- // Release memory and check pointers are NULL
- space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
- for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- EXPECT_TRUE(lots_of_objects[i] == NULL);
+ // Release memory and check pointers are NULL.
+ {
+ ScopedObjectAccess soa(self);
+ space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ EXPECT_TRUE(lots_of_objects[i] == nullptr);
+ }
}
// Succeeds, fits by adjusting the max allowed footprint.
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
size_t allocation_size = 0;
lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size);
- EXPECT_TRUE(lots_of_objects[i] != NULL);
+ EXPECT_TRUE(lots_of_objects[i] != nullptr);
InstallClass(lots_of_objects[i], 1024);
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
}
// Release memory and check pointers are NULL
- space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
- for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- EXPECT_TRUE(lots_of_objects[i] == NULL);
+ {
+ ScopedObjectAccess soa(self);
+ space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ EXPECT_TRUE(lots_of_objects[i] == nullptr);
+ }
}
}
@@ -491,28 +499,30 @@
break;
}
- // Free some objects
- for (size_t i = 0; i < last_object; i += free_increment) {
- mirror::Object* object = lots_of_objects.get()[i];
- if (object == NULL) {
- continue;
+ {
+ // Free some objects
+ ScopedObjectAccess soa(self);
+ for (size_t i = 0; i < last_object; i += free_increment) {
+ mirror::Object* object = lots_of_objects.get()[i];
+ if (object == NULL) {
+ continue;
+ }
+ size_t allocation_size = space->AllocationSize(object);
+ if (object_size > 0) {
+ EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+ } else {
+ EXPECT_GE(allocation_size, 8u);
+ }
+ space->Free(self, object);
+ lots_of_objects.get()[i] = NULL;
+ amount_allocated -= allocation_size;
+ footprint = space->GetFootprint();
+ EXPECT_GE(space->Size(), footprint); // invariant
}
- size_t allocation_size = space->AllocationSize(object);
- if (object_size > 0) {
- EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
- } else {
- EXPECT_GE(allocation_size, 8u);
- }
- space->Free(self, object);
- lots_of_objects.get()[i] = NULL;
- amount_allocated -= allocation_size;
- footprint = space->GetFootprint();
- EXPECT_GE(space->Size(), footprint); // invariant
+
+ free_increment >>= 1;
}
-
- free_increment >>= 1;
}
-
// The space has become empty here before allocating a large object
// below. For RosAlloc, revoke thread-local runs, which are kept
// even when empty for a performance reason, so that they won't
@@ -540,8 +550,10 @@
EXPECT_LE(space->Size(), growth_limit);
// Clean up
- space->Free(self, large_object);
-
+ {
+ ScopedObjectAccess soa(self);
+ space->Free(self, large_object);
+ }
// Sanity check footprint
footprint = space->GetFootprint();
EXPECT_LE(footprint, growth_limit);
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 10a5492..e0035b3 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -54,7 +54,7 @@
LOG(FATAL) << "Unimplemented";
return nullptr;
}
- virtual size_t AllocationSize(const mirror::Object* obj) {
+ virtual size_t AllocationSize(mirror::Object* obj) {
LOG(FATAL) << "Unimplemented";
return 0;
}