diff options
| author | 2018-01-30 11:35:11 +0000 | |
|---|---|---|
| committer | 2018-02-09 17:50:28 +0000 | |
| commit | 2ae376f5af8953d3524cd8ed915ebdacf505625c (patch) | |
| tree | f6c1fa068b877054ea425bc4494462f2a9cbe955 | |
| parent | dc46115ab283bac2453b4f9e454e66107e64e8ef (diff) | |
Stylistic and aesthetic changes.
Test: art/test.py
Change-Id: Ic41aa80430d16af748994c80f049c5b479fd9980
| -rw-r--r-- | compiler/jni/jni_compiler_test.cc | 3 | ||||
| -rw-r--r-- | runtime/gc/accounting/space_bitmap-inl.h | 9 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying-inl.h | 7 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 29 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 12 | ||||
| -rw-r--r-- | runtime/gc/space/region_space-inl.h | 42 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.cc | 28 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.h | 58 | ||||
| -rw-r--r-- | runtime/gc/space/space.cc | 2 | ||||
| -rw-r--r-- | runtime/gc/space/space.h | 4 | ||||
| -rw-r--r-- | runtime/interpreter/interpreter_common.h | 5 | ||||
| -rw-r--r-- | runtime/lock_word.h | 6 | ||||
| -rw-r--r-- | runtime/mirror/object-readbarrier-inl.h | 2 | ||||
| -rw-r--r-- | runtime/thread.cc | 7 | ||||
| -rwxr-xr-x | test/etc/run-test-jar | 3 |
15 files changed, 129 insertions, 88 deletions
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index dc66234c6f..451a909965 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -523,7 +523,8 @@ struct ScopedDisableCheckNumStackReferences { bool ScopedDisableCheckNumStackReferences::sCheckNumStackReferences = true; -// Check that the handle scope at the start of this block is the same as the handle scope at the end of the block. +// Check that the handle scope at the start of this block is the same +// as the handle scope at the end of the block. struct ScopedCheckHandleScope { ScopedCheckHandleScope() : handle_scope_(Thread::Current()->GetTopHandleScope()) { } diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index df9ee8c219..a3fd1ba84a 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -62,7 +62,8 @@ inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const { return (bitmap_begin_[OffsetToIndex(offset)].LoadRelaxed() & OffsetToMask(offset)) != 0; } -template<size_t kAlignment> template<typename Visitor> +template<size_t kAlignment> +template<typename Visitor> inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const { @@ -157,7 +158,8 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, #endif } -template<size_t kAlignment> template<typename Visitor> +template<size_t kAlignment> +template<typename Visitor> void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) { CHECK(bitmap_begin_ != nullptr); @@ -177,7 +179,8 @@ void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) { } } -template<size_t kAlignment> template<bool kSetBit> +template<size_t kAlignment> +template<bool kSetBit> inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) { uintptr_t addr = reinterpret_cast<uintptr_t>(obj); DCHECK_GE(addr, heap_begin_); diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h index 85a656ec51..20e754554e 100644 --- a/runtime/gc/collector/concurrent_copying-inl.h +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -52,7 +52,8 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion( // we can avoid an expensive CAS. // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is // set. - success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState()); + success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(), + /* rb_state */ ReadBarrier::GrayState()); } else { success = !bitmap->AtomicTestAndSet(ref); } @@ -86,8 +87,8 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) { return ref; } // This may or may not succeed, which is ok because the object may already be gray. - bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), - ReadBarrier::GrayState()); + bool success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(), + /* rb_state */ ReadBarrier::GrayState()); if (success) { MutexLock mu(Thread::Current(), immune_gray_stack_lock_); immune_gray_stack_.push_back(ref); diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index e925d42ed9..8bb36c98bd 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -347,8 +347,9 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor // This must come before the revoke. size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); - reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> - FetchAndAddSequentiallyConsistent(thread_local_objects); + reinterpret_cast<Atomic<size_t>*>( + &concurrent_copying_->from_space_num_objects_at_first_pause_)-> + FetchAndAddSequentiallyConsistent(thread_local_objects); } else { concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); } @@ -1534,7 +1535,8 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { !IsInToSpace(referent)))) { // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We // will change it to white later in ReferenceQueue::DequeuePendingReference(). - DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref; + DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) + << "Left unenqueued ref gray " << to_ref; } else { // We may occasionally leave a reference white in the queue if its referent happens to be // concurrently marked after the Scan() call above has enqueued the Reference, in which case the @@ -1552,7 +1554,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { #endif if (add_to_live_bytes) { - // Add to the live bytes per unevacuated from space. Note this code is always run by the + // Add to the live bytes per unevacuated from-space. Note this code is always run by the // GC-running thread (no synchronization required). DCHECK(region_space_bitmap_->Test(to_ref)); size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>(); @@ -1774,17 +1776,20 @@ void ConcurrentCopying::ReclaimPhase() { if (kVerboseMode) { LOG(INFO) << "RecordFree:" << " from_bytes=" << from_bytes << " from_objects=" << from_objects - << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects + << " unevac_from_bytes=" << unevac_from_bytes + << " unevac_from_objects=" << unevac_from_objects << " to_bytes=" << to_bytes << " to_objects=" << to_objects << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects << " from_space size=" << region_space_->FromSpaceSize() << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() << " to_space size=" << region_space_->ToSpaceSize(); - LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); + LOG(INFO) << "(before) num_bytes_allocated=" + << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); } RecordFree(ObjectBytePair(freed_objects, freed_bytes)); if (kVerboseMode) { - LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); + LOG(INFO) << "(after) num_bytes_allocated=" + << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); } } @@ -2051,11 +2056,13 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o (is_los && los_bitmap->Test(ref))) { // OK. } else { - // If ref is on the allocation stack, then it may not be + // If `ref` is on the allocation stack, then it may not be // marked live, but considered marked/alive (but not // necessarily on the live stack). - CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " - << "obj=" << obj << " ref=" << ref; + CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack." + << " obj=" << obj + << " ref=" << ref + << " is_los=" << std::boolalpha << is_los << std::noboolalpha; } } } @@ -2136,7 +2143,7 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) // It was updated by the mutator. break; } - // Use release cas to make sure threads reading the reference see contents of copied objects. + // Use release CAS to make sure threads reading the reference see contents of copied objects. } while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier<false, false, kVerifyNone>( offset, expected_ref, diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index f22d025c91..d8d215bf76 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1899,10 +1899,10 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self); - // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count - // is non zero. - // If the collector type changed to something which doesn't benefit from homogeneous space compaction, - // exit. + // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable + // count is non zero. + // If the collector type changed to something which doesn't benefit from homogeneous space + // compaction, exit. if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) || !main_space_->CanMoveObjects()) { return kErrorReject; @@ -3445,8 +3445,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, TraceHeapSize(bytes_allocated); uint64_t target_size; collector::GcType gc_type = collector_ran->GetGcType(); - const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for - // foreground. + // Use the multiplier to grow more for foreground. + const double multiplier = HeapGrowthMultiplier(); const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier); const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier); if (gc_type != collector::kGcTypeSticky) { diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index e74e9b169f..b4c8092ce1 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -24,26 +24,30 @@ namespace art { namespace gc { namespace space { -inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated) { +inline mirror::Object* RegionSpace::Alloc(Thread* self ATTRIBUTE_UNUSED, + size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) { num_bytes = RoundUp(num_bytes, kAlignment); return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } -inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes, - size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated) { +inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, + size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) { Locks::mutator_lock_->AssertExclusiveHeld(self); return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } template<bool kForEvac> -inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated) { +inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) { DCHECK_ALIGNED(num_bytes, kAlignment); mirror::Object* obj; if (LIKELY(num_bytes <= kRegionSize)) { @@ -79,8 +83,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by } } else { // Large object. - obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size, - bytes_tl_bulk_allocated); + obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); if (LIKELY(obj != nullptr)) { return obj; } @@ -88,9 +91,10 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by return nullptr; } -inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated) { +inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) { DCHECK(IsAllocated() && IsInToSpace()); DCHECK_ALIGNED(num_bytes, kAlignment); uint8_t* old_top; @@ -238,9 +242,9 @@ inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) { template<bool kForEvac> inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, - size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated) { + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) { DCHECK_ALIGNED(num_bytes, kAlignment); DCHECK_GT(num_bytes, kRegionSize); size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize; @@ -270,7 +274,7 @@ inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, } } if (found) { - // right points to the one region past the last free region. + // `right` points to the one region past the last free region. DCHECK_EQ(left + num_regs, right); Region* first_reg = ®ions_[left]; DCHECK(first_reg->IsFree()); diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index d58b76bd6b..ce7576171a 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -27,7 +27,7 @@ namespace space { // If a region has live objects whose size is less than this percent // value of the region size, evaculate the region. -static constexpr uint kEvaculateLivePercentThreshold = 75U; +static constexpr uint kEvacuateLivePercentThreshold = 75U; // If we protect the cleared regions. // Only protect for target builds to prevent flaky test failures (b/63131961). @@ -165,7 +165,7 @@ inline bool RegionSpace::Region::ShouldBeEvacuated() { if (is_newly_allocated_) { result = true; } else { - bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1); + bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1)); if (is_live_percent_valid) { DCHECK(IsInToSpace()); DCHECK(!IsLargeTail()); @@ -177,10 +177,10 @@ inline bool RegionSpace::Region::ShouldBeEvacuated() { // Side node: live_percent == 0 does not necessarily mean // there's no live objects due to rounding (there may be a // few). - result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated; + result = (live_bytes_ * 100U < kEvacuateLivePercentThreshold * bytes_allocated); } else { DCHECK(IsLarge()); - result = live_bytes_ == 0U; + result = (live_bytes_ == 0U); } } else { result = false; @@ -260,7 +260,8 @@ static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) { } } -void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) { +void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, + /* out */ uint64_t* cleared_objects) { DCHECK(cleared_bytes != nullptr); DCHECK(cleared_objects != nullptr); *cleared_bytes = 0; @@ -432,7 +433,7 @@ void RegionSpace::ClampGrowthLimit(size_t new_capacity) { void RegionSpace::Dump(std::ostream& os) const { os << GetName() << " " - << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit()); + << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit()); } void RegionSpace::DumpRegionForObject(std::ostream& os, mirror::Object* obj) { @@ -532,13 +533,18 @@ void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() { } void RegionSpace::Region::Dump(std::ostream& os) const { - os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" - << reinterpret_cast<void*>(Top()) + os << "Region[" << idx_ << "]=" + << reinterpret_cast<void*>(begin_) + << "-" << reinterpret_cast<void*>(Top()) << "-" << reinterpret_cast<void*>(end_) - << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_) + << " state=" << static_cast<uint>(state_) + << " type=" << static_cast<uint>(type_) << " objects_allocated=" << objects_allocated_ - << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_ - << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n"; + << " alloc_time=" << alloc_time_ + << " live_bytes=" << live_bytes_ + << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha + << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha + << " thread=" << thread_ << '\n'; } size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index b4970ed96f..5ca364eb9d 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -46,24 +46,33 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin); static RegionSpace* Create(const std::string& name, MemMap* mem_map); - // Allocate num_bytes, returns null if the space is full. - mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated) + // Allocate `num_bytes`, returns null if the space is full. + mirror::Object* Alloc(Thread* self, + size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) OVERRIDE REQUIRES(!region_lock_); // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. - mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated) + mirror::Object* AllocThreadUnsafe(Thread* self, + size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_); // The main allocation routine. template<bool kForEvac> - ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated) + ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_); // Allocate/free large objects (objects that are larger than the region size). template<bool kForEvac> - mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, - size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_); + mirror::Object* AllocLarge(size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_); template<bool kForEvac> void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_); @@ -176,7 +185,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { template <typename Visitor> ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) { - WalkInternal<true>(visitor); + WalkInternal<true /* kToSpaceOnly */>(visitor); } accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE { @@ -236,7 +245,8 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { size_t FromSpaceSize() REQUIRES(!region_lock_); size_t UnevacFromSpaceSize() REQUIRES(!region_lock_); size_t ToSpaceSize() REQUIRES(!region_lock_); - void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_); + void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects) + REQUIRES(!region_lock_); void AddLiveBytes(mirror::Object* ref, size_t alloc_size) { Region* reg = RefToRegionUnlocked(ref); @@ -303,12 +313,13 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { void Clear(bool zero_and_release_pages); - ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, - size_t* bytes_tl_bulk_allocated); + ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, + /* out */ size_t* bytes_allocated, + /* out */ size_t* usable_size, + /* out */ size_t* bytes_tl_bulk_allocated); bool IsFree() const { - bool is_free = state_ == RegionState::kRegionStateFree; + bool is_free = (state_ == RegionState::kRegionStateFree); if (is_free) { DCHECK(IsInNoSpace()); DCHECK_EQ(begin_, Top()); @@ -341,7 +352,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Large allocated. bool IsLarge() const { - bool is_large = state_ == RegionState::kRegionStateLarge; + bool is_large = (state_ == RegionState::kRegionStateLarge); if (is_large) { DCHECK_LT(begin_ + kRegionSize, Top()); } @@ -350,7 +361,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Large-tail allocated. bool IsLargeTail() const { - bool is_large_tail = state_ == RegionState::kRegionStateLargeTail; + bool is_large_tail = (state_ == RegionState::kRegionStateLargeTail); if (is_large_tail) { DCHECK_EQ(begin_, Top()); } @@ -553,12 +564,15 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_); // The pointer to the region array. + // The upper-bound index of the non-free regions. Used to avoid scanning all regions in - // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is - // true. + // RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace. + // + // Invariant (verified by RegionSpace::VerifyNonFreeRegionLimit): + // for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true. size_t non_free_region_index_limit_ GUARDED_BY(region_lock_); - Region* current_region_; // The region that's being allocated currently. - Region* evac_region_; // The region that's being evacuated to currently. + Region* current_region_; // The region that's being currently allocated. + Region* evac_region_; // The region that's being currently evacuated to. Region full_region_; // The dummy/sentinel region that looks full. // Mark bitmap used by the GC. diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc index 2c6afa7eb8..82f9905683 100644 --- a/runtime/gc/space/space.cc +++ b/runtime/gc/space/space.cc @@ -125,7 +125,7 @@ bool ContinuousMemMapAllocSpace::HasBoundBitmaps() const { void ContinuousMemMapAllocSpace::UnBindBitmaps() { CHECK(HasBoundBitmaps()); - // At this point, the temp_bitmap holds our old mark bitmap. + // At this point, `temp_bitmap_` holds our old mark bitmap. accounting::ContinuousSpaceBitmap* new_bitmap = temp_bitmap_.release(); Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap_.get(), new_bitmap); CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get()); diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index 6b76048cb1..964824abd5 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -389,8 +389,8 @@ class MemMapSpace : public ContinuousSpace { } protected: - MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end, uint8_t* limit, - GcRetentionPolicy gc_retention_policy) + MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end, + uint8_t* limit, GcRetentionPolicy gc_retention_policy) : ContinuousSpace(name, gc_retention_policy, begin, end, limit), mem_map_(mem_map) { } diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 8180222e22..39a1db85d4 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -176,7 +176,8 @@ static inline bool DoInvoke(Thread* self, } const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c(); - ObjPtr<mirror::Object> receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC); + ObjPtr<mirror::Object> receiver = + (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC); ArtMethod* sf_method = shadow_frame.GetMethod(); ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>( method_idx, &receiver, sf_method, self); @@ -645,7 +646,7 @@ EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kVirtual); // invoke-virtual // Explicitly instantiate all DoInvokeVirtualQuick functions. #define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \ - template REQUIRES_SHARED(Locks::mutator_lock_) \ + template REQUIRES_SHARED(Locks::mutator_lock_) \ bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ JValue* result) diff --git a/runtime/lock_word.h b/runtime/lock_word.h index fac1a7597d..e89beb6d41 100644 --- a/runtime/lock_word.h +++ b/runtime/lock_word.h @@ -34,7 +34,9 @@ class Monitor; /* The lock value itself as stored in mirror::Object::monitor_. The two most significant bits of * the state. The four possible states are fat locked, thin/unlocked, hash code, and forwarding - * address. When the lock word is in the "thin" state and its bits are formatted as follows: + * address. + * + * When the lock word is in the "thin" state and its bits are formatted as follows: * * |33|2|2|222222221111|1111110000000000| * |10|9|8|765432109876|5432109876543210| @@ -59,7 +61,7 @@ class Monitor; * |11|0| ForwardingAddress | * * The `r` bit stores the read barrier state. - * The `m` bit stores the mark state. + * The `m` bit stores the mark bit state. */ class LockWord { public: diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h index d81fff0a22..126cb04cf1 100644 --- a/runtime/mirror/object-readbarrier-inl.h +++ b/runtime/mirror/object-readbarrier-inl.h @@ -187,7 +187,7 @@ inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_b expected_lw = lw; new_lw = lw; new_lw.SetMarkBitState(mark_bit); - // Since this is only set from the mutator, we can use the non release Cas. + // Since this is only set from the mutator, we can use the non-release CAS. } while (!CasLockWordWeakRelaxed(expected_lw, new_lw)); return true; } diff --git a/runtime/thread.cc b/runtime/thread.cc index 7136fee613..2ee7f9deda 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -663,8 +663,8 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer); stack_size = FixStackSize(stack_size); - // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to - // assign it. + // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing + // to assign it. env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, reinterpret_cast<jlong>(child_thread)); @@ -839,7 +839,8 @@ Thread* Thread::Attach(const char* thread_name, if (create_peer) { self->CreatePeer(thread_name, as_daemon, thread_group); if (self->IsExceptionPending()) { - // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it. + // We cannot keep the exception around, as we're deleting self. Try to be helpful and log + // it. { ScopedObjectAccess soa(self); LOG(ERROR) << "Exception creating thread peer:"; diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index 055cffbbb4..89efd7c507 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -667,7 +667,8 @@ fi if [ "$HOST" = "y" ]; then max_filename_size=$(getconf NAME_MAX $DEX_LOCATION) else - # There is no getconf on device, fallback to standard value. See NAME_MAX in kernel <linux/limits.h> + # There is no getconf on device, fallback to standard value. + # See NAME_MAX in kernel <linux/limits.h> max_filename_size=255 fi # Compute VDEX_NAME. |