diff options
| author | 2014-12-01 15:00:27 -0800 | |
|---|---|---|
| committer | 2014-12-01 17:06:33 -0800 | |
| commit | 446f9ee5031cf89b8964e29eba2c9f10a4d4aaf1 (patch) | |
| tree | 224af2e70163cdb99ce61662feba63a9b51a2caf | |
| parent | 87e0aa4c72962a54bbf64fc020c1f59d13c3e30f (diff) | |
Try normal allocation if large object allocation fails
If a large object allocation fails, we now try the normal allocators.
Bug: 18124612
(cherry picked from commit f1c4d0e3a27e9b39916750147ecdea1418fcc231)
Change-Id: Ib83ebe53fbdd83aa2d23fd10a8bb10e149f8918f
| -rw-r--r-- | runtime/gc/heap-inl.h | 22 | ||||
| -rw-r--r-- | runtime/gc/heap.h | 2 |
2 files changed, 18 insertions, 6 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 3101c68599..9d2f6d1238 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -48,11 +48,20 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas } // Need to check that we arent the large object allocator since the large object allocation code // path this function. If we didn't check we would have an infinite loop. + mirror::Object* obj; if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { - return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count, - pre_fence_visitor); + obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count, + pre_fence_visitor); + if (obj != nullptr) { + return obj; + } else { + // There should be an OOM exception, since we are retrying, clear it. + self->ClearException(); + } + // If the large object allocation failed, try to use the normal spaces (main space, + // non moving space). This can happen if there is significant virtual address space + // fragmentation. } - mirror::Object* obj; AllocationTimer alloc_timer(this, &obj); size_t bytes_allocated; size_t usable_size; @@ -171,10 +180,13 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) { } template <bool kInstrumented, typename PreFenceVisitor> -inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass, +inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) { - return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count, + // Save and restore the class in case it moves. + StackHandleScope<1> hs(self); + auto klass_wrapper = hs.NewHandleWrapper(klass); + return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count, kAllocatorTypeLOS, pre_fence_visitor); } diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 69a573ef98..4e1a0ff242 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -654,7 +654,7 @@ class Heap { // We don't force this to be inlined since it is a slow path. template <bool kInstrumented, typename PreFenceVisitor> - mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count, + mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |