ART: Refactor for bugprone-argument-comment
Handles runtime.
Bug: 116054210
Test: WITH_TIDY=1 mmma art
Change-Id: Ibc0d5086809d647f0ce4df5452eb84442d27ecf0
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 10af10d..313b2b4 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,10 @@
void Init() {
std::string error_msg;
mem_map_ = MemMap::MapAnonymous(name_.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bb2beaa..80c4c76 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,10 @@
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7cddec6..9a5bde8 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,10 @@
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("card table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity + 256,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 40dc6e1..b4026fc 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -462,7 +462,7 @@
for (mirror::HeapReference<mirror::Object>* obj_ptr : references) {
if (obj_ptr->AsMirrorPtr() != nullptr) {
all_null = false;
- visitor->MarkHeapReference(obj_ptr, /*do_atomic_update*/ false);
+ visitor->MarkHeapReference(obj_ptr, /*do_atomic_update=*/ false);
}
}
count += references.size();
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 8bdf6da..b369a66 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,10 @@
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
mem_map_ = MemMap::MapAnonymous("read barrier table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 9dea2f8..fba62c3 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -75,7 +75,7 @@
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
*contains_reference_to_target_space_ = true;
- collector_->MarkHeapReference(ref_ptr, /*do_atomic_update*/ false);
+ collector_->MarkHeapReference(ref_ptr, /*do_atomic_update=*/ false);
DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
}
}
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2946486..76d5d9d 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,10 @@
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 0dbafde..8cc0c4e 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -92,10 +92,10 @@
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3095f9f..8fd235f 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -76,8 +76,8 @@
// we can avoid an expensive CAS.
// For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
// set.
- success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
- /* rb_state */ ReadBarrier::GrayState());
+ success = ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+ /* rb_state= */ ReadBarrier::GrayState());
} else {
success = !bitmap->AtomicTestAndSet(ref);
}
@@ -113,8 +113,8 @@
}
// This may or may not succeed, which is ok because the object may already be gray.
bool success =
- ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
- /* rb_state */ ReadBarrier::GrayState());
+ ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+ /* rb_state= */ ReadBarrier::GrayState());
if (success) {
MutexLock mu(self, immune_gray_stack_lock_);
immune_gray_stack_.push_back(ref);
@@ -186,7 +186,7 @@
region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
- heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
UNREACHABLE();
}
} else {
@@ -209,8 +209,8 @@
if (UNLIKELY(mark_from_read_barrier_measurements_)) {
ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
} else {
- ret = Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self,
- from_ref);
+ ret = Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+ from_ref);
}
// Only set the mark bit for baker barrier.
if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 46cc79c..2ae4676 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,10 @@
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"concurrent copying sweep array free buffer",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -488,7 +488,7 @@
TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
// Only change live bytes for full CC.
cc->region_space_->SetFromSpace(
- cc->rb_table_, evac_mode, /*clear_live_bytes*/ !cc->young_gen_);
+ cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
}
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -601,7 +601,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
// If an object is not gray, it should only have references to things in the immune spaces.
if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
- obj->VisitReferences</*kVisitNativeRoots*/true,
+ obj->VisitReferences</*kVisitNativeRoots=*/true,
kDefaultVerifyFlags,
kWithoutReadBarrier>(visitor, visitor);
}
@@ -669,8 +669,8 @@
// Objects on clean cards should never have references to newly allocated regions. Note
// that aged cards are also not clean.
if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
- VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
- obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+ VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj);
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>(
internal_visitor, internal_visitor);
}
};
@@ -742,7 +742,7 @@
TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
Thread* const self = Thread::Current();
- using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>;
VisitorType visitor(self);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
@@ -769,11 +769,11 @@
: card;
},
/* card modified visitor */ VoidFunctor());
- card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
- space->Begin(),
- space->End(),
- visitor,
- gc::accounting::CardTable::kCardAged);
+ card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardAged);
}
}
}
@@ -781,7 +781,7 @@
void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
- using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>;
Thread* const self = Thread::Current();
VisitorType visitor(self);
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -791,11 +791,11 @@
// Don't need to scan aged cards since we did these before the pause. Note that scanning cards
// also handles the mod-union table cards.
- card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
- space->Begin(),
- space->End(),
- visitor,
- gc::accounting::CardTable::kCardDirty);
+ card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardDirty);
if (table != nullptr) {
// Add the cards to the mod-union table so that we can clear cards to save RAM.
table->ProcessCards();
@@ -1376,7 +1376,7 @@
space::RegionSpace* region_space = RegionSpace();
CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
VerifyNoFromSpaceRefsFieldVisitor visitor(this);
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor,
visitor);
if (kUseBakerReadBarrier) {
@@ -1558,8 +1558,8 @@
MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
if (mark_stack_mode == kMarkStackModeThreadLocal) {
// Process the thread-local mark stacks and the GC mark stack.
- count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
- /* checkpoint_callback */ nullptr);
+ count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
+ /* checkpoint_callback= */ nullptr);
while (!gc_mark_stack_->IsEmpty()) {
mirror::Object* to_ref = gc_mark_stack_->PopBack();
ProcessMarkStackRef(to_ref);
@@ -1734,7 +1734,7 @@
CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
AssertToSpaceInvariantFieldVisitor visitor(this);
- to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor,
visitor);
}
@@ -1769,7 +1769,7 @@
DisableWeakRefAccessCallback dwrac(this);
// Process the thread local mark stacks one last time after switching to the shared mark stack
// mode and disable weak ref accesses.
- ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ true, &dwrac);
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
if (kVerboseMode) {
LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
}
@@ -1833,7 +1833,7 @@
void ConcurrentCopying::Sweep(bool swap_bitmaps) {
if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
// Only sweep objects on the live stack.
- SweepArray(heap_->GetLiveStack(), /* swap_bitmaps */ false);
+ SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
} else {
{
TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
@@ -2060,7 +2060,7 @@
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Sweep(/* swap_bitmaps */ false);
+ Sweep(/* swap_bitmaps= */ false);
SwapBitmaps();
heap_->UnBindBitmaps();
@@ -2171,7 +2171,7 @@
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
LOG(FATAL) << "Invalid reference " << ref
<< " referenced from object " << obj << " at offset " << offset;
}
@@ -2264,12 +2264,12 @@
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
LOG(FATAL) << "Invalid reference " << ref;
}
} else {
// Check to-space invariant in non-moving space.
- AssertToSpaceInvariantInNonMovingSpace(/* obj */ nullptr, ref);
+ AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref);
}
}
}
@@ -2440,7 +2440,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
- collector_->MarkRoot</*kGrayImmuneObject*/false>(thread_, root);
+ collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root);
}
private:
@@ -2462,7 +2462,7 @@
DCHECK_EQ(Thread::Current(), thread_running_gc_);
RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
// Disable the read barrier for a performance reason.
- to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
@@ -2476,10 +2476,10 @@
DCHECK_EQ(Thread::Current(), thread_running_gc_);
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, kNoUnEvac, /*kFromGCThread*/true>(
+ mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>(
thread_running_gc_,
ref,
- /*holder*/ obj,
+ /*holder=*/ obj,
offset);
if (to_ref == ref) {
return;
@@ -2553,7 +2553,7 @@
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
// kGrayImmuneObject is true because this is used for the thread flip.
- MarkRoot</*kGrayImmuneObject*/true>(self, root);
+ MarkRoot</*kGrayImmuneObject=*/true>(self, root);
}
}
}
@@ -2702,7 +2702,7 @@
if (UNLIKELY(klass == nullptr)) {
// Remove memory protection from the region space and log debugging information.
region_space_->Unprotect();
- heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
}
// There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
// Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
@@ -2716,7 +2716,7 @@
size_t bytes_allocated = 0U;
size_t dummy;
bool fall_back_to_non_moving = false;
- mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
+ mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (LIKELY(to_ref != nullptr)) {
@@ -2790,7 +2790,7 @@
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
// Free the large alloc.
- region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
+ region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated);
} else {
// Record the lost copy for later reuse.
heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
@@ -3017,7 +3017,7 @@
// AtomicSetReadBarrierState since it will fault if the address is not
// valid.
region_space_->Unprotect();
- heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
}
// Not marked nor on the allocation stack. Try to mark it.
// This may or may not succeed, which is ok.
@@ -3131,7 +3131,7 @@
} while (!field->CasWeakRelaxed(from_ref, to_ref));
} else {
// TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
- field->Assign</* kIsVolatile */ true>(to_ref);
+ field->Assign</* kIsVolatile= */ true>(to_ref);
}
}
return true;
@@ -3151,7 +3151,7 @@
// We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
+ /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
@@ -3169,7 +3169,8 @@
ScopedTrace tr(__FUNCTION__);
const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
mirror::Object* ret =
- Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, from_ref);
+ Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+ from_ref);
if (measure_read_barrier_slow_path_) {
rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
}
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 3b59618..3c20e51 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -57,7 +57,7 @@
if (image_oat_file != nullptr) {
intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
reinterpret_cast<uintptr_t>(image_oat_file->End()),
- /*image*/false));
+ /*image=*/false));
}
}
intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 3f85c71..0e5fac1 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -32,7 +32,7 @@
class DummyOatFile : public OatFile {
public:
- DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
+ DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
begin_ = begin;
end_ = end;
}
@@ -45,7 +45,7 @@
std::unique_ptr<DummyOatFile>&& oat_file,
MemMap&& oat_map)
: ImageSpace("DummyImageSpace",
- /*image_location*/"",
+ /*image_location=*/"",
std::move(map),
std::move(live_bitmap),
map.End()),
@@ -87,7 +87,7 @@
image_begin,
image_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
+ /*low_4gb=*/true,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << error_str;
@@ -100,7 +100,7 @@
oat_begin,
oat_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
+ /*low_4gb=*/true,
&error_str);
if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
@@ -110,23 +110,23 @@
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
new (map.Begin()) ImageHeader(
- /*image_begin*/PointerToLowMemUInt32(map.Begin()),
- /*image_size*/map.Size(),
+ /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
+ /*image_size=*/map.Size(),
sections,
- /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
- /*oat_checksum*/0u,
+ /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
+ /*oat_checksum=*/0u,
// The oat file data in the header is always right after the image space.
- /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*boot_image_begin*/0u,
- /*boot_image_size*/0u,
- /*boot_oat_begin*/0u,
- /*boot_oat_size*/0u,
- /*pointer_size*/sizeof(void*),
+ /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*boot_image_begin=*/0u,
+ /*boot_image_size=*/0u,
+ /*boot_oat_begin=*/0u,
+ /*boot_oat_size=*/0u,
+ /*pointer_size=*/sizeof(void*),
ImageHeader::kStorageModeUncompressed,
- /*storage_size*/0u);
+ /*data_size=*/0u);
return new DummyImageSpace(std::move(map),
std::move(live_bitmap),
std::move(oat_file),
@@ -138,10 +138,10 @@
static uint8_t* GetContinuousMemoryRegion(size_t size) {
std::string error_str;
MemMap map = MemMap::MapAnonymous("reserve",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << "Failed to allocate memory region " << error_str;
@@ -163,7 +163,7 @@
space::kGcRetentionPolicyNeverCollect,
begin,
end,
- /*limit*/end) {}
+ /*limit=*/end) {}
space::SpaceType GetType() const override {
return space::kSpaceTypeMallocSpace;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5f44a72..399f9ff 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,10 @@
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"mark sweep sweep array free buffer",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -283,9 +283,9 @@
// cards (during the call to Heap::ProcessCard) are not reordered
// *after* marking actually starts?
heap_->ProcessCards(GetTimings(),
- /* use_rem_sets */ false,
- /* process_alloc_space_cards */ true,
- /* clear_alloc_space_cards */ GetGcType() != kGcTypeSticky);
+ /* use_rem_sets= */ false,
+ /* process_alloc_space_cards= */ true,
+ /* clear_alloc_space_cards= */ GetGcType() != kGcTypeSticky);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
MarkRoots(self);
MarkReachableObjects();
@@ -446,7 +446,7 @@
!large_object_space->Contains(obj)))) {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
// which is called from LOG(FATAL) but before the abort message.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c58b59d..19b1fc7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -728,7 +728,7 @@
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
MarkObjectVisitor visitor(this);
// Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index af9000b..e253dfb 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -129,10 +129,10 @@
if (!self->IsExceptionPending()) {
// AllocObject will pick up the new allocator type, and instrumented as true is the safe
// default.
- return AllocObject</*kInstrumented*/true>(self,
- klass,
- byte_count,
- pre_fence_visitor);
+ return AllocObject</*kInstrumented=*/true>(self,
+ klass,
+ byte_count,
+ pre_fence_visitor);
}
return nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 78e8422..a31cbe7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -433,8 +433,8 @@
request_begin,
capacity_,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
- /* reuse */ false,
+ /* low_4gb= */ true,
+ /* reuse= */ false,
heap_reservation.IsValid() ? &heap_reservation : nullptr,
&error_str);
}
@@ -463,7 +463,7 @@
initial_size,
size,
size,
- /* can_move_objects */ false);
+ /* can_move_objects= */ false);
CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
<< non_moving_space_mem_map_begin;
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
@@ -505,11 +505,11 @@
// Create bump pointer spaces instead of a backup space.
main_mem_map_2.Reset();
bump_pointer_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
} else if (main_mem_map_2.IsValid()) {
@@ -519,7 +519,7 @@
growth_limit_,
capacity_,
name,
- /* can_move_objects */ true));
+ /* can_move_objects= */ true));
CHECK(main_space_backup_.get() != nullptr);
// Add the space so its accounted for in the heap_begin and heap_end.
AddSpace(main_space_backup_.get());
@@ -634,13 +634,13 @@
}
if (MayUseCollector(kCollectorTypeCC)) {
concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
- /*young_gen*/false,
+ /*young_gen=*/false,
"",
measure_gc_performance);
if (kEnableGenerationalConcurrentCopyingCollection) {
young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
this,
- /*young_gen*/true,
+ /*young_gen=*/true,
"young",
measure_gc_performance);
}
@@ -671,7 +671,7 @@
bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
- MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
@@ -696,7 +696,7 @@
request_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb*/ true,
+ /* low_4gb=*/ true,
out_error_str);
if (map.IsValid() || request_begin == nullptr) {
return map;
@@ -1323,7 +1323,7 @@
// Invoke CC full compaction.
CollectGarbageInternal(collector::kGcTypeFull,
kGcCauseCollectorTransition,
- /*clear_soft_references*/false);
+ /*clear_soft_references=*/false);
} else {
VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
}
@@ -1783,7 +1783,7 @@
break;
}
// Try to transition the heap if the allocation failure was due to the space being full.
- if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
+ if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
// If we aren't out of memory then the OOM was probably from the non moving space being
// full. Attempt to disable compaction and turn the main space into a non moving space.
DisableMovingGc();
@@ -3870,7 +3870,7 @@
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
+ RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true);
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
@@ -3916,7 +3916,7 @@
<< " IsVariableSize=" << c->IsVariableSize()
<< " ObjectSize=" << c->GetObjectSize()
<< " sizeof(Class)=" << sizeof(mirror::Class)
- << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
+ << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
CHECK_GE(byte_count, sizeof(mirror::Object));
}
@@ -4012,7 +4012,7 @@
{
static constexpr size_t kMaxFrames = 16u;
FixedSizeBacktrace<kMaxFrames> backtrace;
- backtrace.Collect(/* skip_frames */ 2);
+ backtrace.Collect(/* skip_count= */ 2);
uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
@@ -4023,7 +4023,7 @@
if (new_backtrace) {
StackHandleScope<1> hs(self);
auto h = hs.NewHandleWrapper(obj);
- CollectGarbage(/* clear_soft_references */ false);
+ CollectGarbage(/* clear_soft_references= */ false);
unique_backtrace_count_.fetch_add(1);
} else {
seen_backtrace_count_.fetch_add(1);
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 05a04f2..a133a10 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -37,7 +37,7 @@
gc::Heap::kPreferredAllocSpaceBegin,
16 * KB,
PROT_READ,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_msg);
ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
@@ -77,7 +77,7 @@
}
}
}
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
}
TEST_F(HeapTest, HeapBitmapCapacityTest) {
@@ -91,7 +91,7 @@
}
TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
Runtime::Current()->SetDumpGCPerformanceOnShutdown(true);
}
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c212bad..d4af117 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,16 +60,16 @@
static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
- reference_class->SetFieldBoolean</* kTransactionActive */ false, /* kCheckTransaction */ false>(
+ reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
slow_path_offset, enabled ? 1 : 0);
}
void ReferenceProcessor::EnableSlowPath() {
- SetSlowPathFlag(/* enabled */ true);
+ SetSlowPathFlag(/* enabled= */ true);
}
void ReferenceProcessor::DisableSlowPath(Thread* self) {
- SetSlowPathFlag(/* enabled */ false);
+ SetSlowPathFlag(/* enabled= */ false);
condition_.Broadcast(self);
}
@@ -238,13 +238,13 @@
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
// do_atomic_update needs to be true because this happens outside of the reference processing
// phase.
- if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
if (UNLIKELY(collector->IsTransactionActive())) {
// In transaction mode, keep the referent alive and avoid any reference processing to avoid the
// issue of rolling back reference processing. do_atomic_update needs to be true because this
// happens outside of the reference processing phase.
if (!referent->IsNull()) {
- collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
+ collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
}
return;
}
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index e25e279..5c11e50 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -136,7 +136,7 @@
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
// do_atomic_update is false because this happens during the reference processing phase where
// Reference.clear() would block.
- if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -158,7 +158,7 @@
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
// do_atomic_update is false because this happens during the reference processing phase where
// Reference.clear() would block.
- if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
@@ -187,7 +187,7 @@
if (referent_addr->AsMirrorPtr() != nullptr) {
// do_atomic_update is false because mutators can't access the referent due to the weak ref
// access blocking.
- visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false);
+ visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
}
ref = ref->GetPendingNext();
} while (LIKELY(ref != head));
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 80af700..497a0c2 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -32,7 +32,7 @@
requested_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 36d2161..73582a0 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -54,7 +54,7 @@
end,
limit,
growth_limit,
- /* create_bitmaps */ true,
+ /* create_bitmaps= */ true,
can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index b783cfe..a7f82f6 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,10 @@
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
num_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -353,7 +353,7 @@
requested_begin,
size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +372,10 @@
std::string error_msg;
allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
alloc_info_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 445560a..be75efe 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -109,7 +109,7 @@
requested_begin,
*capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index bda1f1c..8cb079d 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -409,7 +409,7 @@
} else {
DCHECK(reg->IsLargeTail());
}
- reg->Clear(/*zero_and_release_pages*/true);
+ reg->Clear(/*zero_and_release_pages=*/true);
if (kForEvac) {
--num_evac_regions_;
} else {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index eba6fac..31bbfb8 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,7 @@
requested_begin,
capacity + kRegionSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (mem_map.IsValid() || requested_begin == nullptr) {
break;
@@ -393,7 +393,7 @@
uint8_t* clear_block_begin = nullptr;
uint8_t* clear_block_end = nullptr;
auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
- r->Clear(/*zero_and_release_pages*/false);
+ r->Clear(/*zero_and_release_pages=*/false);
if (clear_block_end != r->Begin()) {
// Region `r` is not adjacent to the current clear block; zero and release
// pages within the current block and restart a new clear block at the
@@ -656,7 +656,7 @@
if (!r->IsFree()) {
--num_non_free_regions_;
}
- r->Clear(/*zero_and_release_pages*/true);
+ r->Clear(/*zero_and_release_pages=*/true);
}
SetNonFreeRegionLimit(0);
DCHECK_EQ(num_non_free_regions_, 0u);
@@ -735,7 +735,7 @@
RevokeThreadLocalBuffersLocked(self);
// Retain sufficient free regions for full evacuation.
- Region* r = AllocateRegion(/*for_evac*/ false);
+ Region* r = AllocateRegion(/*for_evac=*/ false);
if (r != nullptr) {
r->is_a_tlab_ = true;
r->thread_ = self;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 5af1dd3..cc371b8 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -206,12 +206,12 @@
// Go through all of the blocks and visit the continuous objects.
template <typename Visitor>
ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
- WalkInternal<false /* kToSpaceOnly */>(visitor);
+ WalkInternal</* kToSpaceOnly= */ false>(visitor);
}
template <typename Visitor>
ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
REQUIRES(Locks::mutator_lock_) {
- WalkInternal<true /* kToSpaceOnly */>(visitor);
+ WalkInternal</* kToSpaceOnly= */ true>(visitor);
}
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index f16ed2d..4fe8027 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -145,7 +145,7 @@
cswh.Set(GcRoot<mirror::Object>(s.Get()));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -166,7 +166,7 @@
cswh.Set(GcRoot<mirror::Object>(mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -190,7 +190,7 @@
cswh.Set(GcRoot<mirror::Object>(s.Get()));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -205,7 +205,7 @@
Runtime::Current()->RemoveSystemWeakHolder(&cswh);
// Trigger another GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expectation: no change in the numbers.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 0281eee..47c54bd 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -87,7 +87,7 @@
bool fatal) const {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort