diff options
-rw-r--r-- | runtime/gc/space/large_object_space.cc | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 9616ec1d9a..d1b4d7cf02 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -425,7 +425,6 @@ void FreeListSpace::RemoveFreePrev(AllocationInfo* info) { } size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) { - MutexLock mu(self, lock_); DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " " << reinterpret_cast<void*>(End()); DCHECK_ALIGNED(obj, kAlignment); @@ -434,6 +433,15 @@ size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) { const size_t allocation_size = info->ByteSize(); DCHECK_GT(allocation_size, 0U); DCHECK_ALIGNED(allocation_size, kAlignment); + + // madvise the pages without lock + madvise(obj, allocation_size, MADV_DONTNEED); + if (kIsDebugBuild) { + // Can't disallow reads since we use them to find next chunks during coalescing. + CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ); + } + + MutexLock mu(self, lock_); info->SetByteSize(allocation_size, true); // Mark as free. // Look at the next chunk. AllocationInfo* next_info = info->GetNextInfo(); @@ -475,11 +483,6 @@ size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) { --num_objects_allocated_; DCHECK_LE(allocation_size, num_bytes_allocated_); num_bytes_allocated_ -= allocation_size; - madvise(obj, allocation_size, MADV_DONTNEED); - if (kIsDebugBuild) { - // Can't disallow reads since we use them to find next chunks during coalescing. - CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ); - } return allocation_size; } |