summaryrefslogtreecommitdiff
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/mod_union_table-inl.h2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc4
-rw-r--r--runtime/gc/accounting/mod_union_table.h32
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc6
-rw-r--r--runtime/gc/allocation_record.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc18
-rw-r--r--runtime/gc/collector/concurrent_copying.h26
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc8
-rw-r--r--runtime/gc/collector/mark_sweep.cc12
-rw-r--r--runtime/gc/collector/mark_sweep.h20
-rw-r--r--runtime/gc/collector/partial_mark_sweep.h4
-rw-r--r--runtime/gc/collector/semi_space.h18
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h12
-rw-r--r--runtime/gc/heap.cc20
-rw-r--r--runtime/gc/heap_test.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space.h28
-rw-r--r--runtime/gc/space/dlmalloc_space.h42
-rw-r--r--runtime/gc/space/image_space.h6
-rw-r--r--runtime/gc/space/image_space_test.cc6
-rw-r--r--runtime/gc/space/large_object_space.cc14
-rw-r--r--runtime/gc/space/large_object_space.h44
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/memory_tool_malloc_space.h18
-rw-r--r--runtime/gc/space/region_space.h28
-rw-r--r--runtime/gc/space/rosalloc_space.h40
-rw-r--r--runtime/gc/space/space.h12
-rw-r--r--runtime/gc/space/zygote_space.h24
-rw-r--r--runtime/gc/system_weak.h6
-rw-r--r--runtime/gc/system_weak_test.cc8
-rw-r--r--runtime/gc/task_processor_test.cc6
-rw-r--r--runtime/gc/verification.cc2
31 files changed, 236 insertions, 236 deletions
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 3a09634c0b..f0a82e0c88 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -33,7 +33,7 @@ class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
space::ContinuousSpace* space)
: ModUnionTableReferenceCache(name, heap, space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+ bool ShouldAddReference(const mirror::Object* ref) const override ALWAYS_INLINE {
return !space_->HasAddress(ref);
}
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0dd05cd6f0..40dc6e146a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -329,8 +329,8 @@ class ModUnionCheckReferences {
class EmptyMarkObjectVisitor : public MarkObjectVisitor {
public:
- mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
- void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {}
+ mirror::Object* MarkObject(mirror::Object* obj) override {return obj;}
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {}
};
void ModUnionTable::FilterCards() {
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 7a3c06a281..ec6f144fd9 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -125,33 +125,33 @@ class ModUnionTableReferenceCache : public ModUnionTable {
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ProcessCards() OVERRIDE;
+ void ProcessCards() override;
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify() OVERRIDE
+ void Verify() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) override;
- virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void SetCards() OVERRIDE;
+ virtual void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ virtual void ClearTable() override;
protected:
// Cleared card array, used to update the mod-union table.
@@ -172,27 +172,27 @@ class ModUnionTableCardCache : public ModUnionTable {
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- virtual void ProcessCards() OVERRIDE;
+ virtual void ProcessCards() override;
// Mark all references to the alloc space(s).
- virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Nothing to verify.
- virtual void Verify() OVERRIDE {}
+ virtual void Verify() override {}
- virtual void Dump(std::ostream& os) OVERRIDE;
+ virtual void Dump(std::ostream& os) override;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) override;
- virtual void SetCards() OVERRIDE;
+ virtual void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ virtual void ClearTable() override;
protected:
// Cleared card bitmap, used to update the mod-union table.
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index d59ff71676..5aa55506a5 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -98,12 +98,12 @@ class CollectVisitedVisitor : public MarkObjectVisitor {
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update ATTRIBUTE_UNUSED) OVERRIDE
+ bool do_atomic_update ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
@@ -122,7 +122,7 @@ class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache {
space::ContinuousSpace* target_space)
: ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+ bool ShouldAddReference(const mirror::Object* ref) const override {
return target_space_->HasAddress(ref);
}
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index a1d198652e..b9c1dc61b6 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -196,7 +196,7 @@ class AllocRecordStackVisitor : public StackVisitor {
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (trace_->GetDepth() >= max_depth_) {
return false;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c7a5f79cb2..f73ecf1c49 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -229,7 +229,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closu
explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -250,7 +250,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure
explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
@@ -393,7 +393,7 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -467,7 +467,7 @@ class ConcurrentCopying::FlipCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
ConcurrentCopying* cc = concurrent_copying_;
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
@@ -1072,7 +1072,7 @@ class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1096,7 +1096,7 @@ class ConcurrentCopying::DisableMarkingCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(concurrent_copying_->is_marking_);
@@ -1291,7 +1291,7 @@ class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor
}
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
operator()(root);
}
@@ -1457,7 +1457,7 @@ class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
disable_weak_ref_access_(disable_weak_ref_access) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1727,7 +1727,7 @@ class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a deadlock b/31500969.
CHECK(concurrent_copying_->weak_ref_access_enabled_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 0ebe6f0c25..a956d3807a 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,7 +71,7 @@ class ConcurrentCopying : public GarbageCollector {
bool measure_read_barrier_slow_path = false);
~ConcurrentCopying();
- virtual void RunPhases() OVERRIDE
+ virtual void RunPhases() override
REQUIRES(!immune_gray_stack_lock_,
!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
@@ -87,15 +87,15 @@ class ConcurrentCopying : public GarbageCollector {
void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
? kGcTypeSticky
: kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return kCollectorTypeCC;
}
- virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
+ virtual void RevokeAllThreadLocalBuffers() override;
void SetRegionSpace(space::RegionSpace* region_space) {
DCHECK(region_space != nullptr);
region_space_ = region_space;
@@ -144,7 +144,7 @@ class ConcurrentCopying : public GarbageCollector {
void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -167,7 +167,7 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
@@ -175,12 +175,12 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ virtual void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -205,20 +205,20 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
- ObjPtr<mirror::Reference> reference) OVERRIDE
+ ObjPtr<mirror::Reference> reference) override
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
@@ -293,7 +293,7 @@ class ConcurrentCopying : public GarbageCollector {
mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
+ void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
// Set the read barrier mark entrypoints to non-null.
void ActivateReadBarrierEntrypoints();
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 145bd0208d..677e3f3a05 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -167,19 +167,19 @@ class DummySpace : public space::ContinuousSpace {
end,
/*limit*/end) {}
- space::SpaceType GetType() const OVERRIDE {
+ space::SpaceType GetType() const override {
return space::kSpaceTypeMallocSpace;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 58a75ee189..840a4b03dc 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@ class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
public:
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
@@ -607,7 +607,7 @@ class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// See if the root is on any space bitmap.
auto* heap = Runtime::Current()->GetHeap();
@@ -1110,7 +1110,7 @@ class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
virtual mirror::Object* IsMarked(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
@@ -1144,7 +1144,7 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1154,14 +1154,14 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
}
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
ScopedTrace trace("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* const self = Thread::Current();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index af2bb973c9..012e17932f 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -57,7 +57,7 @@ class MarkSweep : public GarbageCollector {
~MarkSweep() {}
- virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
+ virtual void RunPhases() override REQUIRES(!mark_stack_lock_);
void InitializePhase();
void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -72,11 +72,11 @@ class MarkSweep : public GarbageCollector {
return is_concurrent_;
}
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypeFull;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
}
@@ -188,24 +188,24 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object.
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -216,7 +216,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -231,7 +231,7 @@ class MarkSweep : public GarbageCollector {
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
@@ -279,7 +279,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void ProcessMarkStack()
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 8b0d3ddf42..308699bf7e 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,7 @@ namespace collector {
class PartialMarkSweep : public MarkSweep {
public:
// Virtual as overridden by StickyMarkSweep.
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypePartial;
}
@@ -37,7 +37,7 @@ class PartialMarkSweep : public MarkSweep {
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index d1d45c8df6..49cd02e99a 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,7 @@ class SemiSpace : public GarbageCollector {
~SemiSpace() {}
- virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ virtual void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
virtual void InitializePhase();
virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
@@ -72,10 +72,10 @@ class SemiSpace : public GarbageCollector {
virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects()
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
}
@@ -106,11 +106,11 @@ class SemiSpace : public GarbageCollector {
void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* root) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void ScanObject(mirror::Object* obj)
@@ -145,11 +145,11 @@ class SemiSpace : public GarbageCollector {
void SweepSystemWeaks()
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
@@ -162,12 +162,12 @@ class SemiSpace : public GarbageCollector {
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 45f912f63a..f92a103b13 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -24,9 +24,9 @@ namespace art {
namespace gc {
namespace collector {
-class StickyMarkSweep FINAL : public PartialMarkSweep {
+class StickyMarkSweep final : public PartialMarkSweep {
public:
- GcType GetGcType() const OVERRIDE {
+ GcType GetGcType() const override {
return kGcTypeSticky;
}
@@ -34,7 +34,7 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
~StickyMarkSweep() {}
virtual void MarkConcurrentRoots(VisitRootFlags flags)
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -42,15 +42,15 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
- void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
void MarkReachableObjects()
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void Sweep(bool swap_bitmaps)
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf06cf9758..16fd78630d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1327,7 +1327,7 @@ class TrimIndirectReferenceTableClosure : public Closure {
public:
explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
thread->GetJniEnv()->TrimLocals();
// If thread is a running mutator, then act on behalf of the trim thread.
// See the code in ThreadList::RunCheckpoint.
@@ -2213,7 +2213,7 @@ void Heap::ChangeCollector(CollectorType collector_type) {
}
// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
-class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
+class ZygoteCompactingCollector final : public collector::SemiSpace {
public:
ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
: SemiSpace(heap, false, "zygote collector"),
@@ -2769,7 +2769,7 @@ class RootMatchesObjectVisitor : public SingleRootVisitor {
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
void VisitRoot(mirror::Object* root, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == obj_) {
LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
}
@@ -2826,7 +2826,7 @@ class VerifyReferenceVisitor : public SingleRootVisitor {
root->AsMirrorPtr(), RootInfo(kRootVMInternal));
}
- virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+ virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -3259,10 +3259,10 @@ void Heap::ProcessCards(TimingLogger* timings,
}
struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override {
return obj;
}
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
}
};
@@ -3633,7 +3633,7 @@ class Heap::ConcurrentGCTask : public HeapTask {
public:
ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
: HeapTask(target_time), cause_(cause), force_full_(force_full) {}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->ConcurrentGC(self, cause_, force_full_);
heap->ClearConcurrentGCRequest();
@@ -3691,7 +3691,7 @@ class Heap::CollectorTransitionTask : public HeapTask {
public:
explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->DoPendingCollectorTransition();
heap->ClearPendingCollectorTransition(self);
@@ -3733,7 +3733,7 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint
class Heap::HeapTrimTask : public HeapTask {
public:
explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->Trim(self);
heap->ClearPendingTrim(self);
@@ -4176,7 +4176,7 @@ void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, si
class Heap::TriggerPostForkCCGcTask : public HeapTask {
public:
explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
// Trigger a GC, if not already done. The first GC after fork, whenever
// takes place, will adjust the thresholds to normal levels.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 8720a3e014..7cbad3b523 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -29,7 +29,7 @@ namespace gc {
class HeapTest : public CommonRuntimeTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
MemMap::Init();
std::string error_msg;
// Reserve the preferred address to force the heap to use another one for testing.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 9b315584fb..02e84b509e 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -35,11 +35,11 @@ namespace space {
// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
// implementation as its intended to be evacuated.
-class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
+class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeBumpPointerSpace;
}
@@ -51,27 +51,27 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(size_t num_bytes);
mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
// NOPS unless we support free lists.
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
return 0;
}
@@ -94,16 +94,16 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return GetMemMap()->Size();
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
// Reset the space to empty.
- void Clear() OVERRIDE REQUIRES(!block_lock_);
+ void Clear() override REQUIRES(!block_lock_);
void Dump(std::ostream& os) const;
@@ -122,7 +122,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return Begin() == End();
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -141,7 +141,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate a new TLAB, returns false if the allocation failed.
bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
- BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
+ BumpPointerSpace* AsBumpPointerSpace() override {
return this;
}
@@ -151,7 +151,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!block_lock_);
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
// Record objects / bytes freed.
void RecordFree(int32_t objects, int32_t bytes) {
@@ -159,7 +159,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 66537d5dac..09f3970408 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -53,36 +53,36 @@ class DlMallocSpace : public MallocSpace {
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_) {
+ override REQUIRES(!lock_) {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual(obj, usable_size);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ virtual size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return num_bytes;
}
// DlMallocSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -103,23 +103,23 @@ class DlMallocSpace : public MallocSpace {
return mspace_;
}
- size_t Trim() OVERRIDE;
+ size_t Trim() override;
// Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
// in use, indicated by num_bytes equaling zero.
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
// Returns the number of bytes that the space has currently obtained from the system. This is
// greater or equal to the amount of live data in the space.
- size_t GetFootprint() OVERRIDE;
+ size_t GetFootprint() override;
// Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
- size_t GetFootprintLimit() OVERRIDE;
+ size_t GetFootprintLimit() override;
// Set the maximum number of bytes that the heap is allowed to obtain from the system via
// MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
// allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ void SetFootprintLimit(size_t limit) override;
MallocSpace* CreateInstance(MemMap&& mem_map,
const std::string& name,
@@ -128,22 +128,22 @@ class DlMallocSpace : public MallocSpace {
uint8_t* end,
uint8_t* limit,
size_t growth_limit,
- bool can_move_objects) OVERRIDE;
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
- virtual void Clear() OVERRIDE;
+ virtual void Clear() override;
- bool IsDlMallocSpace() const OVERRIDE {
+ bool IsDlMallocSpace() const override {
return true;
}
- DlMallocSpace* AsDlMallocSpace() OVERRIDE {
+ DlMallocSpace* AsDlMallocSpace() override {
return this;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
@@ -165,7 +165,7 @@ class DlMallocSpace : public MallocSpace {
REQUIRES(lock_);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
+ size_t /*maximum_size*/, bool /*low_memory_mode*/) override {
return CreateMspace(base, morecore_start, initial_size);
}
static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 20bce66957..93cf947218 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -86,11 +86,11 @@ class ImageSpace : public MemMapSpace {
return image_location_;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
// ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
// special cases to test against.
return live_bitmap_.get();
@@ -102,7 +102,7 @@ class ImageSpace : public MemMapSpace {
void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index a1ffa067d0..d93385de3a 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -113,7 +113,7 @@ TEST_F(DexoptTest, ValidateOatFile) {
template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
class ImageSpaceLoadingTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
if (kImage) {
options->emplace_back(android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str()),
nullptr);
@@ -152,7 +152,7 @@ TEST_F(ImageSpaceNoRelocateNoDex2oatNoPatchoatTest, Test) {
class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
const char* android_data = getenv("ANDROID_DATA");
CHECK(android_data != nullptr);
old_android_data_ = android_data;
@@ -172,7 +172,7 @@ class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false,
ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
int result = unlink(bad_dalvik_cache_.c_str());
CHECK_EQ(result, 0) << strerror(errno);
result = rmdir(bad_android_data_.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 76ea9fda29..09d02518a3 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -39,12 +39,12 @@ namespace art {
namespace gc {
namespace space {
-class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
public:
explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
}
- ~MemoryToolLargeObjectMapSpace() OVERRIDE {
+ ~MemoryToolLargeObjectMapSpace() override {
// Historical note: We were deleting large objects to keep Valgrind happy if there were
// any large objects such as Dex cache arrays which aren't freed since they are held live
// by the class linker.
@@ -52,7 +52,7 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE {
+ override {
mirror::Object* obj =
LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
@@ -68,21 +68,21 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
return object_without_rdz;
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
}
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+ size_t Free(Thread* self, mirror::Object* obj) override {
mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
return LargeObjectMapSpace::Free(self, object_with_rdz);
}
- bool Contains(const mirror::Object* obj) const OVERRIDE {
+ bool Contains(const mirror::Object* obj) const override {
return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b69bd91162..39ff2c3e43 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -41,7 +41,7 @@ enum class LargeObjectSpaceType {
// Abstraction implemented by all large object spaces.
class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
public:
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeLargeObjectSpace;
}
void SwapBitmaps();
@@ -49,10 +49,10 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
virtual ~LargeObjectSpace() {}
- uint64_t GetBytesAllocated() OVERRIDE {
+ uint64_t GetBytesAllocated() override {
return num_bytes_allocated_;
}
- uint64_t GetObjectsAllocated() OVERRIDE {
+ uint64_t GetObjectsAllocated() override {
return num_objects_allocated_;
}
uint64_t GetTotalBytesAllocated() const {
@@ -61,22 +61,22 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
uint64_t GetTotalObjectsAllocated() const {
return total_objects_allocated_;
}
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// LargeObjectSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
collector::ObjectBytePair Sweep(bool swap_bitmaps);
- virtual bool CanMoveObjects() const OVERRIDE {
+ virtual bool CanMoveObjects() const override {
return false;
}
// Current address at which the space begins, which may vary as the space is filled.
@@ -96,7 +96,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
@@ -140,11 +140,11 @@ class LargeObjectMapSpace : public LargeObjectSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
REQUIRES(!lock_);
size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
struct LargeObject {
@@ -154,8 +154,8 @@ class LargeObjectMapSpace : public LargeObjectSpace {
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -164,22 +164,22 @@ class LargeObjectMapSpace : public LargeObjectSpace {
};
// A continuous large object space with a free-list to handle holes.
-class FreeListSpace FINAL : public LargeObjectSpace {
+class FreeListSpace final : public LargeObjectSpace {
public:
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
void Dump(std::ostream& os) const REQUIRES(!lock_);
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
@@ -198,8 +198,8 @@ class FreeListSpace FINAL : public LargeObjectSpace {
}
// Removes header from the free blocks set by finding the corresponding iterator and erasing it.
void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
class SortByPrevFree {
public:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index e4a6f158ec..6bf2d71c7c 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,7 +133,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return can_move_objects_;
}
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 32bd204354..33bddfa4c8 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -29,28 +29,28 @@ template <typename BaseMallocSpaceType,
size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace final : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE;
+ override;
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) override {}
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override;
template <typename... Params>
MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8ad26baff1..0bf4f38a4b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -39,7 +39,7 @@ namespace space {
static constexpr bool kCyclicRegionAllocation = true;
// A space that consists of equal-sized regions.
-class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
+class RegionSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
@@ -49,7 +49,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
kEvacModeForceAll,
};
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeRegionSpace;
}
@@ -65,14 +65,14 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!region_lock_);
+ override REQUIRES(!region_lock_);
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self,
size_t num_bytes,
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
// The main allocation routine.
template<bool kForEvac>
ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
@@ -90,29 +90,29 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return mark_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
- void Clear() OVERRIDE REQUIRES(!region_lock_);
+ void Clear() override REQUIRES(!region_lock_);
// Remove read and write memory protection from the whole region space,
// i.e. make memory pages backing the region area not readable and not
@@ -188,7 +188,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return num_regions_;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -197,7 +197,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return byte_obj >= Begin() && byte_obj < Limit();
}
- RegionSpace* AsRegionSpace() OVERRIDE {
+ RegionSpace* AsRegionSpace() override {
return this;
}
@@ -212,10 +212,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
WalkInternal<true /* kToSpaceOnly */>(visitor);
}
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index c630826f48..5162a064d1 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -52,24 +52,24 @@ class RosAllocSpace : public MallocSpace {
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ override REQUIRES(Locks::mutator_lock_) {
return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual<true>(obj, usable_size);
}
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -93,7 +93,7 @@ class RosAllocSpace : public MallocSpace {
// run without allocating a new run.
ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
size_t* bytes_allocated);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
}
ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
@@ -107,13 +107,13 @@ class RosAllocSpace : public MallocSpace {
return rosalloc_;
}
- size_t Trim() OVERRIDE;
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
- size_t GetFootprint() OVERRIDE;
- size_t GetFootprintLimit() OVERRIDE;
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ size_t Trim() override;
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
+ size_t GetFootprint() override;
+ size_t GetFootprintLimit() override;
+ void SetFootprintLimit(size_t limit) override;
- void Clear() OVERRIDE;
+ void Clear() override;
MallocSpace* CreateInstance(MemMap&& mem_map,
const std::string& name,
@@ -122,10 +122,10 @@ class RosAllocSpace : public MallocSpace {
uint8_t* end,
uint8_t* limit,
size_t growth_limit,
- bool can_move_objects) OVERRIDE;
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
size_t RevokeThreadLocalBuffers(Thread* thread);
size_t RevokeAllThreadLocalBuffers();
@@ -135,11 +135,11 @@ class RosAllocSpace : public MallocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool IsRosAllocSpace() const OVERRIDE {
+ bool IsRosAllocSpace() const override {
return true;
}
- RosAllocSpace* AsRosAllocSpace() OVERRIDE {
+ RosAllocSpace* AsRosAllocSpace() override {
return this;
}
@@ -149,7 +149,7 @@ class RosAllocSpace : public MallocSpace {
virtual ~RosAllocSpace();
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
}
@@ -174,7 +174,7 @@ class RosAllocSpace : public MallocSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t maximum_size, bool low_memory_mode) OVERRIDE {
+ size_t maximum_size, bool low_memory_mode) override {
return CreateRosAlloc(
base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
}
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4e173a86f1..2fe1f82547 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -352,7 +352,7 @@ class DiscontinuousSpace : public Space {
return mark_bitmap_.get();
}
- virtual bool IsDiscontinuousSpace() const OVERRIDE {
+ virtual bool IsDiscontinuousSpace() const override {
return true;
}
@@ -409,14 +409,14 @@ class MemMapSpace : public ContinuousSpace {
// Used by the heap compaction interface to enable copying from one type of alloc space to another.
class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
public:
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
- bool IsContinuousMemMapAllocSpace() const OVERRIDE {
+ bool IsContinuousMemMapAllocSpace() const override {
return true;
}
ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
@@ -435,11 +435,11 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 200c79f00c..1f73577a3a 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -27,7 +27,7 @@ namespace gc {
namespace space {
// A zygote space is a space which you cannot allocate into or free from.
-class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
+class ZygoteSpace final : public ContinuousMemMapAllocSpace {
public:
// Returns the remaining storage in the out_map field.
static ZygoteSpace* Create(const std::string& name,
@@ -38,28 +38,28 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
void Dump(std::ostream& os) const;
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeZygoteSpace;
}
- ZygoteSpace* AsZygoteSpace() OVERRIDE {
+ ZygoteSpace* AsZygoteSpace() override {
return this;
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE;
+ size_t Free(Thread* self, mirror::Object* ptr) override;
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// ZygoteSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -71,13 +71,13 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return objects_allocated_.load(std::memory_order_seq_cst);
}
- void Clear() OVERRIDE;
+ void Clear() override;
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 60105f4e4f..ef85b3942f 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -45,7 +45,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
}
virtual ~SystemWeakHolder() {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -54,7 +54,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
new_weak_condition_.Broadcast(Thread::Current());
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -62,7 +62,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
allow_new_system_weak_ = false;
}
- void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) override
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
new_weak_condition_.Broadcast(Thread::Current());
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 897ab01251..07725b9a56 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -44,7 +44,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_(0),
sweep_count_(0) {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Allow();
@@ -52,7 +52,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
allow_count_++;
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Disallow();
@@ -60,7 +60,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_++;
}
- void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint) override
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
@@ -70,7 +70,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
}
}
- void Sweep(IsMarkedVisitor* visitor) OVERRIDE
+ void Sweep(IsMarkedVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 38581ce807..caa8802823 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -33,7 +33,7 @@ class RecursiveTask : public HeapTask {
: HeapTask(NanoTime() + MsToNs(10)), task_processor_(task_processor), counter_(counter),
max_recursion_(max_recursion) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
if (max_recursion_ > 0) {
task_processor_->AddTask(self,
new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
@@ -52,7 +52,7 @@ class WorkUntilDoneTask : public SelfDeletingTask {
WorkUntilDoneTask(TaskProcessor* task_processor, Atomic<bool>* done_running)
: task_processor_(task_processor), done_running_(done_running) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
task_processor_->RunAllTasks(self);
done_running_->store(true, std::memory_order_seq_cst);
}
@@ -105,7 +105,7 @@ class TestOrderTask : public HeapTask {
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
+ virtual void Run(Thread* thread ATTRIBUTE_UNUSED) override {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index d6a2fa0cb5..5d234eaac3 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -198,7 +198,7 @@ class Verification::CollectRootVisitor : public SingleRootVisitor {
CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
void VisitRoot(mirror::Object* obj, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (obj != nullptr && visited_->insert(obj).second) {
std::ostringstream oss;
oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";