summaryrefslogtreecommitdiff
path: root/runtime/gc/heap-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc/heap-inl.h')
-rw-r--r--runtime/gc/heap-inl.h11
1 files changed, 5 insertions, 6 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9e1524e657..922b58870d 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -209,13 +209,12 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
// IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for the
// BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
- // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant
- // since the allocator_type should be constant propagated.
- if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()
- && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
+ // optimized out.
+ if (IsGcConcurrent() && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
need_gc = true;
}
GetMetrics()->TotalBytesAllocated()->Add(bytes_tl_bulk_allocated);
+ GetMetrics()->TotalBytesAllocatedDelta()->Add(bytes_tl_bulk_allocated);
}
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
@@ -442,7 +441,7 @@ inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_co
return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
}
-inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
+inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type ATTRIBUTE_UNUSED,
size_t alloc_size,
bool grow) {
size_t old_target = target_footprint_.load(std::memory_order_relaxed);
@@ -457,7 +456,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
return true;
}
// We are between target_footprint_ and growth_limit_ .
- if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) {
+ if (IsGcConcurrent()) {
return false;
} else {
if (grow) {