summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Yi Kong <yikong@google.com> 2019-03-24 02:47:16 -0700
committer Treehugger Robot <treehugger-gerrit@google.com> 2019-03-29 18:18:00 +0000
commit39402548017c99f9b33afaa851f412371013e911 (patch)
tree80c4d53cb7ae12ab9f4800cb358e53d5109593da
parent2fde9bdc252e209d722f83410f2ced42ab73e8a4 (diff)
Modernise code to use override specifier
Generated by clang-tidy, with IgnoreDestructors option enabled. Test: m checkbuild Bug: 116509795 Change-Id: I5dafa10c2cf605165581b8cf7dd2633ed101ed65
-rw-r--r--build/Android.bp4
-rw-r--r--compiler/optimizing/codegen_test_utils.h2
-rw-r--r--compiler/optimizing/nodes.h4
-rw-r--r--compiler/optimizing/register_allocator_graph_color.h2
-rw-r--r--compiler/optimizing/scheduler_arm.h2
-rw-r--r--compiler/optimizing/scheduler_arm64.h2
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h2
-rw-r--r--compiler/utils/arm64/assembler_arm64.h2
-rw-r--r--compiler/utils/mips/assembler_mips.h4
-rw-r--r--compiler/utils/mips64/assembler_mips64.h4
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h2
-rw-r--r--dex2oat/linker/image_test.h2
-rw-r--r--libartbase/base/indenter.h2
-rw-r--r--runtime/base/mutex.h8
-rw-r--r--runtime/dexopt_test.h2
-rw-r--r--runtime/gc/collector/mark_sweep.h4
-rw-r--r--runtime/gc/collector/semi_space.h6
-rw-r--r--runtime/gc/space/bump_pointer_space.h16
-rw-r--r--runtime/gc/space/dlmalloc_space.h2
-rw-r--r--runtime/gc/space/image_space.h4
-rw-r--r--runtime/gc/space/large_object_space.h12
-rw-r--r--runtime/gc/space/malloc_space.h24
-rw-r--r--runtime/gc/space/region_space.h12
-rw-r--r--runtime/gc/space/rosalloc_space.h4
-rw-r--r--runtime/gc/space/space.h2
-rw-r--r--runtime/gc/space/zygote_space.h8
-rw-r--r--runtime/thread.cc2
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.h2
28 files changed, 73 insertions, 69 deletions
diff --git a/build/Android.bp b/build/Android.bp
index 3ee7f92f6e..7b807d56b7 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -100,6 +100,10 @@ art_global_defaults {
// We use it to implement OFFSETOF_MEMBER - see macros.h.
"-Wno-invalid-offsetof",
+ // Enable inconsistent-missing-override warning. This warning is disabled by default in
+ // Android.
+ "-Winconsistent-missing-override",
+
// Enable thread annotations for std::mutex, etc.
"-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
],
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 0289e9c4a7..dde39d46f3 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -176,7 +176,7 @@ class InternalCodeAllocator : public CodeAllocator {
public:
InternalCodeAllocator() : size_(0) { }
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
size_ = size;
memory_.reset(new uint8_t[size]);
return memory_.get();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4670b3f6ff..fedad0c69a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2934,7 +2934,7 @@ class HNullConstant final : public HConstant {
size_t ComputeHashCode() const override { return 0; }
// The null constant representation is a 0-bit pattern.
- virtual bool IsZeroBitPattern() const { return true; }
+ bool IsZeroBitPattern() const override { return true; }
DECLARE_INSTRUCTION(NullConstant);
@@ -6316,7 +6316,7 @@ class HLoadClass final : public HInstruction {
bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const;
+ bool InstructionDataEquals(const HInstruction* other) const override;
size_t ComputeHashCode() const override { return type_index_.index_; }
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 16131e1c71..f0e7e55863 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -94,7 +94,7 @@ class RegisterAllocatorGraphColor : public RegisterAllocator {
void AllocateRegisters() override;
- bool Validate(bool log_fatal_on_failure);
+ bool Validate(bool log_fatal_on_failure) override;
private:
// Collect all intervals and prepare for register allocation.
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 875593bbf0..4c7a3bb4d6 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -55,7 +55,7 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor {
: codegen_(down_cast<CodeGeneratorARMType*>(codegen)) {}
// Default visitor for instructions not handled specifically below.
- void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) {
+ void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) override {
last_visited_latency_ = kArmIntegerOpLatency;
}
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 7f6549dcfe..ba5a743545 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -58,7 +58,7 @@ static constexpr uint32_t kArm64SIMDTypeConversionInt2FPLatency = 10;
class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
public:
// Default visitor for instructions not handled specifically below.
- void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) {
+ void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) override {
last_visited_latency_ = kArm64IntegerOpLatency;
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 674bf12f89..0b1b6d2ba9 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -188,7 +188,7 @@ class ArmVIXLJNIMacroAssembler final
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
std::unique_ptr<JNIMacroLabel> CreateLabel() override;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index fdecab8251..9e01a70ea9 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -81,7 +81,7 @@ class Arm64Assembler final : public Assembler {
const uint8_t* CodeBufferBaseAddress() const override;
// Copy instructions out of assembly buffer into the given region of memory.
- void FinalizeInstructions(const MemoryRegion& region);
+ void FinalizeInstructions(const MemoryRegion& region) override;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 69189a49aa..a24071d694 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -287,7 +287,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
size_t CodeSize() const override { return Assembler::CodeSize(); }
size_t CodePosition() override;
- DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() override { return Assembler::cfi(); }
virtual ~MipsAssembler() {
for (auto& branch : branches_) {
@@ -1372,7 +1372,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
void FinalizeCode() override;
// Emit branches and finalize all instructions.
- void FinalizeInstructions(const MemoryRegion& region);
+ void FinalizeInstructions(const MemoryRegion& region) override;
// Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS,
// must be used instead of MipsLabel::GetPosition()).
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 2f991e92c5..b331cee33d 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -440,7 +440,7 @@ class Mips64Assembler final : public Assembler, public JNIMacroAssembler<Pointer
}
size_t CodeSize() const override { return Assembler::CodeSize(); }
- DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() override { return Assembler::cfi(); }
// Emit Machine Instructions.
void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
@@ -1437,7 +1437,7 @@ class Mips64Assembler final : public Assembler, public JNIMacroAssembler<Pointer
void FinalizeCode() override;
// Emit branches and finalize all instructions.
- void FinalizeInstructions(const MemoryRegion& region);
+ void FinalizeInstructions(const MemoryRegion& region) override;
// Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS64,
// must be used instead of Mips64Label::GetPosition()).
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 465ebbe6c3..4c2fd8fc73 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -91,7 +91,7 @@ class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assemble
void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
// Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 7209fbfce5..a0f1093dbc 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -75,7 +75,7 @@ struct CompilationHelper {
class ImageTest : public CommonCompilerDriverTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
ReserveImageSpace();
CommonCompilerTest::SetUp();
}
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index 81d55fc2f3..215bf88ce3 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -65,7 +65,7 @@ class Indenter : public std::streambuf {
return c;
}
- int sync() {
+ int sync() override {
return out_sbuf_->pubsync();
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index aaa1ee6d6f..39fd8c82a3 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -160,7 +160,7 @@ class LOCKABLE Mutex : public BaseMutex {
explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
~Mutex();
- virtual bool IsMutex() const { return true; }
+ bool IsMutex() const override { return true; }
// Block until mutex is free then acquire exclusive access.
void ExclusiveLock(Thread* self) ACQUIRE();
@@ -200,7 +200,7 @@ class LOCKABLE Mutex : public BaseMutex {
return recursion_count_;
}
- virtual void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
// For negative capabilities in clang annotations.
const Mutex& operator!() const { return *this; }
@@ -249,7 +249,7 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
~ReaderWriterMutex();
- virtual bool IsReaderWriterMutex() const { return true; }
+ bool IsReaderWriterMutex() const override { return true; }
// Block until ReaderWriterMutex is free then acquire exclusive access.
void ExclusiveLock(Thread* self) ACQUIRE();
@@ -321,7 +321,7 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
// one or more readers.
pid_t GetExclusiveOwnerTid() const;
- virtual void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
// For negative capabilities in clang annotations.
const ReaderWriterMutex& operator!() const { return *this; }
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 70f35c857a..bfae8a180d 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -28,7 +28,7 @@ class DexoptTest : public Dex2oatEnvironmentTest {
public:
void SetUp() override;
- virtual void PreRuntimeCreate();
+ void PreRuntimeCreate() override;
void PostRuntimeCreate() override;
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index ff9597cfe7..75cfdba54e 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -227,7 +227,7 @@ class MarkSweep : public GarbageCollector {
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
@@ -301,7 +301,7 @@ class MarkSweep : public GarbageCollector {
void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
// Revoke all the thread-local buffers.
- void RevokeAllThreadLocalBuffers();
+ void RevokeAllThreadLocalBuffers() override;
// Whether or not we count how many of each type of object were scanned.
static constexpr bool kCountScannedTypes = false;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 6fab371d73..f23d4167dd 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -158,7 +158,7 @@ class SemiSpace : public GarbageCollector {
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
@@ -192,14 +192,14 @@ class SemiSpace : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
- void ProcessMarkStack()
+ void ProcessMarkStack() override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
- void RevokeAllThreadLocalBuffers();
+ void RevokeAllThreadLocalBuffers() override;
// Current space, we check this space first to avoid searching for the appropriate space for an
// object.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 6d9fd047f4..3e4961a9f0 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -87,12 +87,12 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
}
// Override capacity so that we only return the possibly limited capacity
- size_t Capacity() const {
+ size_t Capacity() const override {
return growth_end_ - begin_;
}
// The total amount of memory reserved for the space.
- size_t NonGrowthLimitCapacity() const {
+ size_t NonGrowthLimitCapacity() const override {
return GetMemMap()->Size();
}
@@ -107,18 +107,18 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// Reset the space to empty.
void Clear() override REQUIRES(!block_lock_);
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
- size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_);
- size_t RevokeAllThreadLocalBuffers()
+ size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!block_lock_);
+ size_t RevokeAllThreadLocalBuffers() override
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
void AssertAllThreadLocalBuffersAreRevoked()
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
- uint64_t GetBytesAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
+ uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
- uint64_t GetObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
+ uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
bool IsEmpty() const {
return Begin() == End();
@@ -128,7 +128,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
return true;
}
- bool Contains(const mirror::Object* obj) const {
+ bool Contains(const mirror::Object* obj) const override {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return byte_obj >= Begin() && byte_obj < End();
}
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index e91602f607..930f557125 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -102,7 +102,7 @@ class DlMallocSpace : public MallocSpace {
#ifndef NDEBUG
// Override only in the debug build.
- void CheckMoreCoreForPrecondition();
+ void CheckMoreCoreForPrecondition() override;
#endif
void* GetMspace() const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index e27810d230..c020dc1d86 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -35,7 +35,7 @@ namespace space {
// An image space is a space backed with a memory mapped image.
class ImageSpace : public MemMapSpace {
public:
- SpaceType GetType() const {
+ SpaceType GetType() const override {
return kSpaceTypeImageSpace;
}
@@ -107,7 +107,7 @@ class ImageSpace : public MemMapSpace {
return live_bitmap_.get();
}
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
// Sweeping image spaces is a NOP.
void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 86ecd85c83..4d1cbc0dd0 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -97,7 +97,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
return End() - Begin();
}
// Return true if we contain the specified address.
- bool Contains(const mirror::Object* obj) const {
+ bool Contains(const mirror::Object* obj) const override {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
@@ -153,14 +153,14 @@ class LargeObjectMapSpace : public LargeObjectSpace {
// of malloc.
static LargeObjectMapSpace* Create(const std::string& name);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override
REQUIRES(!lock_);
- size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* ptr) override REQUIRES(!lock_);
void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
- bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+ bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS;
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
@@ -193,7 +193,7 @@ class FreeListSpace final : public LargeObjectSpace {
override REQUIRES(!lock_);
size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
- void Dump(std::ostream& os) const REQUIRES(!lock_);
+ void Dump(std::ostream& os) const override REQUIRES(!lock_);
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 9a90dfd2ac..50006568ca 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -40,7 +40,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
- SpaceType GetType() const {
+ SpaceType GetType() const override {
return kSpaceTypeMallocSpace;
}
@@ -49,14 +49,14 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) = 0;
// Allocate num_bytes without allowing the underlying space to grow.
- virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
+ mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override = 0;
// Return the storage space required by obj. If usable_size isn't null then it is set to the
// amount of the storage space that may be used by obj.
- virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
- virtual size_t Free(Thread* self, mirror::Object* ptr)
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override = 0;
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Returns the maximum bytes that could be allocated for the given
@@ -98,12 +98,12 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
}
// Override capacity so that we only return the possibly limited capacity
- size_t Capacity() const {
+ size_t Capacity() const override {
return growth_limit_;
}
// The total amount of memory reserved for the alloc space.
- size_t NonGrowthLimitCapacity() const {
+ size_t NonGrowthLimitCapacity() const override {
return GetMemMap()->Size();
}
@@ -111,7 +111,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// shrinking is supported.
void ClampGrowthLimit();
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
void SetGrowthLimit(size_t growth_limit);
@@ -129,8 +129,8 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// aggressive in releasing unused pages. Invalidates the space its called on.
ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
- virtual uint64_t GetBytesAllocated() = 0;
- virtual uint64_t GetObjectsAllocated() = 0;
+ uint64_t GetBytesAllocated() override = 0;
+ uint64_t GetObjectsAllocated() override = 0;
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
@@ -170,7 +170,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(lock_);
- virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return &SweepCallback;
}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index d8b54e26ed..0bbc76a581 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -134,15 +134,15 @@ class RegionSpace final : public ContinuousMemMapAllocSpace {
// growth limit.
void ClampGrowthLimit(size_t new_capacity) REQUIRES(!region_lock_);
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
// Dump region containing object `obj`. Precondition: `obj` is in the region space.
void DumpRegionForObject(std::ostream& os, mirror::Object* obj) REQUIRES(!region_lock_);
void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
- size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!region_lock_);
void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
- size_t RevokeAllThreadLocalBuffers()
+ size_t RevokeAllThreadLocalBuffers() override
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
void AssertAllThreadLocalBuffersAreRevoked()
@@ -165,10 +165,10 @@ class RegionSpace final : public ContinuousMemMapAllocSpace {
template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
- uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
+ uint64_t GetBytesAllocated() override REQUIRES(!region_lock_) {
return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
}
- uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
+ uint64_t GetObjectsAllocated() override REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
}
uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
@@ -194,7 +194,7 @@ class RegionSpace final : public ContinuousMemMapAllocSpace {
return true;
}
- bool Contains(const mirror::Object* obj) const {
+ bool Contains(const mirror::Object* obj) const override {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return byte_obj >= Begin() && byte_obj < Limit();
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9e95c16cb3..00f5ab2da9 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -130,8 +130,8 @@ class RosAllocSpace : public MallocSpace {
uint64_t GetBytesAllocated() override;
uint64_t GetObjectsAllocated() override;
- size_t RevokeThreadLocalBuffers(Thread* thread);
- size_t RevokeAllThreadLocalBuffers();
+ size_t RevokeThreadLocalBuffers(Thread* thread) override;
+ size_t RevokeAllThreadLocalBuffers() override;
void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllThreadLocalBuffersAreRevoked();
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 3a42f9847c..05ff55b9f1 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -419,7 +419,7 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
bool IsContinuousMemMapAllocSpace() const override {
return true;
}
- ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
+ ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() override {
return this;
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 03e2ec8977..09db40e0d7 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -36,7 +36,7 @@ class ZygoteSpace final : public ContinuousMemMapAllocSpace {
accounting::ContinuousSpaceBitmap* mark_bitmap)
REQUIRES_SHARED(Locks::mutator_lock_);
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
SpaceType GetType() const override {
return kSpaceTypeZygoteSpace;
@@ -63,11 +63,11 @@ class ZygoteSpace final : public ContinuousMemMapAllocSpace {
return 0U;
}
- uint64_t GetBytesAllocated() {
+ uint64_t GetBytesAllocated() override {
return Size();
}
- uint64_t GetObjectsAllocated() {
+ uint64_t GetObjectsAllocated() override {
return objects_allocated_.load();
}
@@ -81,7 +81,7 @@ class ZygoteSpace final : public ContinuousMemMapAllocSpace {
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
- virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return &SweepCallback;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f4222aeba4..bdbb69777d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -4235,7 +4235,7 @@ void Thread::SetReadBarrierEntrypoints() {
void Thread::ClearAllInterpreterCaches() {
static struct ClearInterpreterCacheClosure : Closure {
- virtual void Run(Thread* thread) {
+ void Run(Thread* thread) override {
thread->GetInterpreterCache()->Clear(thread);
}
} closure;
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 55d2f44e99..1d600d755f 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -94,7 +94,7 @@ class CountDexIndices : public Experiment {
void ProcessDexFile(const DexFile& dex_file) override;
void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
- void Dump(std::ostream& os, uint64_t total_size) const;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
private:
// Total string ids loaded from dex code.