Revert "Revert CC related changes."
Disable entrypoint switching in ResetQuickAllocEntryPointsForThread
instead of callers. Fixes bug where instrumentation would switch
to non CC entrypoints for non X86_64 architectures causing aborts.
Bug: 31018974
Test: test-art-host
Test: test/run-test 099
This reverts commit 96172e0172c5fca6e9a5ad4b857a24d8c7b064e5.
Change-Id: If206694ae35ff4446c6a8a97bfbcbf2dac35e3f9
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 97129e8..54f2210 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -247,7 +247,7 @@
if (allocator_type != kAllocatorTypeTLAB &&
allocator_type != kAllocatorTypeRegionTLAB &&
allocator_type != kAllocatorTypeRosAlloc &&
- UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
+ UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
return nullptr;
}
mirror::Object* ret;
@@ -267,8 +267,9 @@
if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
// If running on valgrind or asan, we should be using the instrumented path.
size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
- if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
- max_bytes_tl_bulk_allocated))) {
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
+ max_bytes_tl_bulk_allocated,
+ kGrow))) {
return nullptr;
}
ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
@@ -277,14 +278,18 @@
DCHECK(!is_running_on_memory_tool_);
size_t max_bytes_tl_bulk_allocated =
rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
- if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
- max_bytes_tl_bulk_allocated))) {
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
+ max_bytes_tl_bulk_allocated,
+ kGrow))) {
return nullptr;
}
if (!kInstrumented) {
DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
}
- ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+ ret = rosalloc_space_->AllocNonvirtual(self,
+ alloc_size,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated);
}
break;
@@ -292,22 +297,34 @@
case kAllocatorTypeDlMalloc: {
if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
// If running on valgrind, we should be using the instrumented path.
- ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ ret = dlmalloc_space_->Alloc(self,
+ alloc_size,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated);
} else {
DCHECK(!is_running_on_memory_tool_);
- ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+ ret = dlmalloc_space_->AllocNonvirtual(self,
+ alloc_size,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated);
}
break;
}
case kAllocatorTypeNonMoving: {
- ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ ret = non_moving_space_->Alloc(self,
+ alloc_size,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated);
break;
}
case kAllocatorTypeLOS: {
- ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ ret = large_object_space_->Alloc(self,
+ alloc_size,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated);
// Note that the bump pointer spaces aren't necessarily next to
// the other continuous spaces like the non-moving alloc space or
@@ -315,80 +332,38 @@
DCHECK(ret == nullptr || large_object_space_->Contains(ret));
break;
}
- case kAllocatorTypeTLAB: {
- DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment);
- if (UNLIKELY(self->TlabSize() < alloc_size)) {
- const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
- if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) {
- return nullptr;
- }
- // Try allocating a new thread local buffer, if the allocaiton fails the space must be
- // full so return null.
- if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
- return nullptr;
- }
- *bytes_tl_bulk_allocated = new_tlab_size;
- } else {
- *bytes_tl_bulk_allocated = 0;
- }
- // The allocation can't fail.
- ret = self->AllocTlab(alloc_size);
- DCHECK(ret != nullptr);
- *bytes_allocated = alloc_size;
- *usable_size = alloc_size;
- break;
- }
case kAllocatorTypeRegion: {
DCHECK(region_space_ != nullptr);
alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ ret = region_space_->AllocNonvirtual<false>(alloc_size,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated);
break;
}
+ case kAllocatorTypeTLAB:
+ FALLTHROUGH_INTENDED;
case kAllocatorTypeRegionTLAB: {
- DCHECK(region_space_ != nullptr);
- DCHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
+ DCHECK_ALIGNED(alloc_size, kObjectAlignment);
+ static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
+ "mismatched alignments");
+ static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
+ "mismatched alignments");
if (UNLIKELY(self->TlabSize() < alloc_size)) {
- if (space::RegionSpace::kRegionSize >= alloc_size) {
- // Non-large. Check OOME for a tlab.
- if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, space::RegionSpace::kRegionSize))) {
- // Try to allocate a tlab.
- if (!region_space_->AllocNewTlab(self)) {
- // Failed to allocate a tlab. Try non-tlab.
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
- return ret;
- }
- *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
- // Fall-through.
- } else {
- // Check OOME for a non-tlab allocation.
- if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) {
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
- return ret;
- } else {
- // Neither tlab or non-tlab works. Give up.
- return nullptr;
- }
- }
- } else {
- // Large. Check OOME.
- if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
- return ret;
- } else {
- return nullptr;
- }
- }
- } else {
- *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
+ // kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
+ // that is why the allocator is not passed down.
+ return AllocWithNewTLAB(self,
+ alloc_size,
+ kGrow,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated);
}
// The allocation can't fail.
ret = self->AllocTlab(alloc_size);
DCHECK(ret != nullptr);
*bytes_allocated = alloc_size;
+ *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
*usable_size = alloc_size;
break;
}
@@ -408,15 +383,16 @@
return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
}
-template <bool kGrow>
-inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
+inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
+ size_t alloc_size,
+ bool grow) {
size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
}
if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
- if (!kGrow) {
+ if (!grow) {
return true;
}
// TODO: Grow for allocation is racy, fix it.