Remove superfluous 'virtual' specifiers in ART.
Remove 'virtual' specifier on methods already bearing the 'override'
specifier.
Test: mmma art
Change-Id: I114930969a5ca048d88de9ecd18e2c6403593e31
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index acf13c4..6bd6263 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -53,7 +53,7 @@
bool HasAtLeast(const InstructionSetFeatures* other) const override;
- virtual InstructionSet GetInstructionSet() const override {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86;
}
@@ -69,7 +69,7 @@
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
- virtual std::unique_ptr<const InstructionSetFeatures>
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
std::string* error_msg) const override {
return AddFeaturesFromSplitString(features, false, error_msg);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 52ddd13..ab7182a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -450,7 +450,7 @@
class ClassLinkerMethodHandlesTest : public ClassLinkerTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index bf17e64..a5157df 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -157,11 +157,11 @@
virtual ~CommonRuntimeTestBase() {}
protected:
- virtual void SetUp() override {
+ void SetUp() override {
CommonRuntimeTestImpl::SetUp();
}
- virtual void TearDown() override {
+ void TearDown() override {
CommonRuntimeTestImpl::TearDown();
}
};
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 0b99722..2cbf557 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -42,7 +42,7 @@
// Test class that provides some helpers to set a test up for compilation using dex2oat.
class Dex2oatEnvironmentTest : public CommonRuntimeTest {
public:
- virtual void SetUp() override {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
const ArtDexFileLoader dex_file_loader;
@@ -106,7 +106,7 @@
ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
}
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
// options->push_back(std::make_pair("-verbose:oat", nullptr));
// Set up the image location.
@@ -117,7 +117,7 @@
callbacks_.reset();
}
- virtual void TearDown() override {
+ void TearDown() override {
ClearDirectory(odex_dir_.c_str());
ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index b4e52ac..df7181a 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -26,11 +26,11 @@
class DexoptTest : public Dex2oatEnvironmentTest {
public:
- virtual void SetUp() override;
+ void SetUp() override;
virtual void PreRuntimeCreate();
- virtual void PostRuntimeCreate() override;
+ void PostRuntimeCreate() override;
// Generate an oat file for the purposes of test.
// The oat file will be generated for dex_location in the given oat_location
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index ec6f144..8c471bc 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -132,7 +132,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) override
+ void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -145,13 +145,13 @@
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
- virtual bool ContainsCardFor(uintptr_t addr) override;
+ bool ContainsCardFor(uintptr_t addr) override;
- virtual void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void SetCards() override;
+ void SetCards() override;
- virtual void ClearTable() override;
+ void ClearTable() override;
protected:
// Cleared card array, used to update the mod-union table.
@@ -172,27 +172,27 @@
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- virtual void ProcessCards() override;
+ void ProcessCards() override;
// Mark all references to the alloc space(s).
- virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
+ void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) override
+ void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Nothing to verify.
- virtual void Verify() override {}
+ void Verify() override {}
- virtual void Dump(std::ostream& os) override;
+ void Dump(std::ostream& os) override;
- virtual bool ContainsCardFor(uintptr_t addr) override;
+ bool ContainsCardFor(uintptr_t addr) override;
- virtual void SetCards() override;
+ void SetCards() override;
- virtual void ClearTable() override;
+ void ClearTable() override;
protected:
// Cleared card bitmap, used to update the mod-union table.
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 5aa5550..2a382d7 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -97,13 +97,13 @@
class CollectVisitedVisitor : public MarkObjectVisitor {
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update ATTRIBUTE_UNUSED) override
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
- virtual mirror::Object* MarkObject(mirror::Object* obj) override
+ mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index f73ecf1..3ea88d6 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -393,7 +393,7 @@
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -467,7 +467,7 @@
: concurrent_copying_(concurrent_copying) {
}
- virtual void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
+ void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
ConcurrentCopying* cc = concurrent_copying_;
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
@@ -1457,7 +1457,7 @@
disable_weak_ref_access_(disable_weak_ref_access) {
}
- virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index a956d38..1a7464a 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,7 +71,7 @@
bool measure_read_barrier_slow_path = false);
~ConcurrentCopying();
- virtual void RunPhases() override
+ void RunPhases() override
REQUIRES(!immune_gray_stack_lock_,
!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
@@ -87,15 +87,15 @@
void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const override {
+ GcType GetGcType() const override {
return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
? kGcTypeSticky
: kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const override {
+ CollectorType GetCollectorType() const override {
return kCollectorTypeCC;
}
- virtual void RevokeAllThreadLocalBuffers() override;
+ void RevokeAllThreadLocalBuffers() override;
void SetRegionSpace(space::RegionSpace* region_space) {
DCHECK(region_space != nullptr);
region_space_ = region_space;
@@ -144,7 +144,7 @@
void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- virtual mirror::Object* IsMarked(mirror::Object* from_ref) override
+ mirror::Object* IsMarked(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -166,21 +166,22 @@
void Process(mirror::Object* obj, MemberOffset offset)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- override REQUIRES_SHARED(Locks::mutator_lock_)
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info)
- override REQUIRES_SHARED(Locks::mutator_lock_)
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
+ void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -204,21 +205,21 @@
void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
- ObjPtr<mirror::Reference> reference) override
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) override
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* from_ref) override
+ mirror::Object* MarkObject(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
- bool do_atomic_update) override
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
- bool do_atomic_update) override
+ bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 840a4b0..23b2719 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1109,8 +1109,7 @@
public:
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
- virtual mirror::Object* IsMarked(mirror::Object* obj)
- override
+ mirror::Object* IsMarked(mirror::Object* obj) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
@@ -1161,7 +1160,7 @@
}
}
- virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
ScopedTrace trace("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* const self = Thread::Current();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 012e179..ff9597c 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -57,7 +57,7 @@
~MarkSweep() {}
- virtual void RunPhases() override REQUIRES(!mark_stack_lock_);
+ void RunPhases() override REQUIRES(!mark_stack_lock_);
void InitializePhase();
void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -72,11 +72,11 @@
return is_concurrent_;
}
- virtual GcType GetGcType() const override {
+ GcType GetGcType() const override {
return kGcTypeFull;
}
- virtual CollectorType GetCollectorType() const override {
+ CollectorType GetCollectorType() const override {
return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
}
@@ -187,25 +187,25 @@
void VerifyIsLive(const mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) override
+ bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
- size_t count,
- const RootInfo& info) override
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object.
- virtual mirror::Object* MarkObject(mirror::Object* obj) override
+ mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -215,8 +215,8 @@
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) override
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -231,7 +231,7 @@
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
- virtual mirror::Object* IsMarked(mirror::Object* object) override
+ mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
@@ -278,8 +278,7 @@
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void ProcessMarkStack()
- override
+ void ProcessMarkStack() override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 308699b..76c44a3 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,7 @@
class PartialMarkSweep : public MarkSweep {
public:
// Virtual as overridden by StickyMarkSweep.
- virtual GcType GetGcType() const override {
+ GcType GetGcType() const override {
return kGcTypePartial;
}
@@ -37,7 +37,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
+ void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 49cd02e..bb42be6 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,7 @@
~SemiSpace() {}
- virtual void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
+ void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
virtual void InitializePhase();
virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
@@ -72,10 +72,10 @@
virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects()
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const override {
+ GcType GetGcType() const override {
return kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const override {
+ CollectorType GetCollectorType() const override {
return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
}
@@ -106,11 +106,11 @@
void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* root) override
+ mirror::Object* MarkObject(mirror::Object* root) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
- bool do_atomic_update) override
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void ScanObject(mirror::Object* obj)
@@ -145,11 +145,12 @@
void SweepSystemWeaks()
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) override
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
@@ -162,12 +163,12 @@
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- virtual mirror::Object* IsMarked(mirror::Object* object) override
+ mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
- virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
- bool do_atomic_update) override
+ bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ bool do_atomic_update) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index f92a103..f65413d 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -33,8 +33,7 @@
StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
~StickyMarkSweep() {}
- virtual void MarkConcurrentRoots(VisitRootFlags flags)
- override
+ void MarkConcurrentRoots(VisitRootFlags flags) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 16fd786..b74071b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1327,7 +1327,7 @@
public:
explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
}
- virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
thread->GetJniEnv()->TrimLocals();
// If thread is a running mutator, then act on behalf of the trim thread.
// See the code in ThreadList::RunCheckpoint.
@@ -2826,7 +2826,7 @@
root->AsMirrorPtr(), RootInfo(kRootVMInternal));
}
- virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
+ void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -3259,10 +3259,10 @@
}
struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
- virtual mirror::Object* MarkObject(mirror::Object* obj) override {
+ mirror::Object* MarkObject(mirror::Object* obj) override {
return obj;
}
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
}
};
@@ -3633,7 +3633,7 @@
public:
ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
: HeapTask(target_time), cause_(cause), force_full_(force_full) {}
- virtual void Run(Thread* self) override {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->ConcurrentGC(self, cause_, force_full_);
heap->ClearConcurrentGCRequest();
@@ -3691,7 +3691,7 @@
public:
explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
- virtual void Run(Thread* self) override {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->DoPendingCollectorTransition();
heap->ClearPendingCollectorTransition(self);
@@ -3733,7 +3733,7 @@
class Heap::HeapTrimTask : public HeapTask {
public:
explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
- virtual void Run(Thread* self) override {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->Trim(self);
heap->ClearPendingTrim(self);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 09f3970..c63ff71 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -50,27 +50,30 @@
size_t capacity, uint8_t* requested_begin, bool can_move_objects);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated)
- override REQUIRES(!lock_);
+ mirror::Object* AllocWithGrowth(Thread* self,
+ size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- override REQUIRES(!lock_) {
+ mirror::Object* Alloc(Thread* self,
+ size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_) {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual(obj, usable_size);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t Free(Thread* self, mirror::Object* ptr) override
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -133,7 +136,7 @@
uint64_t GetBytesAllocated() override;
uint64_t GetObjectsAllocated() override;
- virtual void Clear() override;
+ void Clear() override;
bool IsDlMallocSpace() const override {
return true;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 39ff2c3..26c6463 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -76,7 +76,7 @@
return this;
}
collector::ObjectBytePair Sweep(bool swap_bitmaps);
- virtual bool CanMoveObjects() const override {
+ bool CanMoveObjects() const override {
return false;
}
// Current address at which the space begins, which may vary as the space is filled.
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 2fe1f82..545e3d8 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -352,7 +352,7 @@
return mark_bitmap_.get();
}
- virtual bool IsDiscontinuousSpace() const override {
+ bool IsDiscontinuousSpace() const override {
return true;
}
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index caa8802..7cb678b 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -33,7 +33,7 @@
: HeapTask(NanoTime() + MsToNs(10)), task_processor_(task_processor), counter_(counter),
max_recursion_(max_recursion) {
}
- virtual void Run(Thread* self) override {
+ void Run(Thread* self) override {
if (max_recursion_ > 0) {
task_processor_->AddTask(self,
new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
@@ -52,7 +52,7 @@
WorkUntilDoneTask(TaskProcessor* task_processor, Atomic<bool>* done_running)
: task_processor_(task_processor), done_running_(done_running) {
}
- virtual void Run(Thread* self) override {
+ void Run(Thread* self) override {
task_processor_->RunAllTasks(self);
done_running_->store(true, std::memory_order_seq_cst);
}
@@ -105,7 +105,7 @@
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread ATTRIBUTE_UNUSED) override {
+ void Run(Thread* thread ATTRIBUTE_UNUSED) override {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index fe1c168..4049c6e 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -34,7 +34,7 @@
}
- virtual void TearDown() override {
+ void TearDown() override {
CommonRuntimeTest::TearDown();
}
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a4b151a..3040b90 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -84,7 +84,7 @@
}
}
- virtual void TearDown() override {
+ void TearDown() override {
CleanUpJniEnv();
CommonCompilerTest::TearDown();
}
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 528740b..e9e7ca8 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,7 +34,7 @@
class DexCacheMethodHandlesTest : public DexCacheTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 6e5786a..72eced2 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1574,7 +1574,7 @@
public:
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
- virtual mirror::Object* IsMarked(mirror::Object* object) override
+ mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Monitor::Deflate(self_, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index e1e0e23..aaedb23 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -252,14 +252,14 @@
}
struct Callback : public ClassLoadCallback {
- virtual void ClassPreDefine(const char* descriptor,
- Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
- Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
- const DexFile& initial_dex_file,
- const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
- /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
- /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
- override REQUIRES_SHARED(Locks::mutator_lock_) {
+ void ClassPreDefine(const char* descriptor,
+ Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
+ Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
+ const DexFile& initial_dex_file,
+ const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+ /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
+ /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const std::string& location = initial_dex_file.GetLocation();
std::string event =
std::string("PreDefine:") + descriptor + " <" +
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 04a7dfb..3099b23 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -699,7 +699,7 @@
class DoubleHiType final : public Cat2Type {
public:
std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsDoubleHi() const override { return true; }
+ bool IsDoubleHi() const override { return true; }
static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -772,7 +772,7 @@
ConstantValue() >= std::numeric_limits<int16_t>::min() &&
ConstantValue() <= std::numeric_limits<int16_t>::max();
}
- virtual bool IsConstantTypes() const override { return true; }
+ bool IsConstantTypes() const override { return true; }
AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
@@ -993,7 +993,7 @@
CheckConstructorInvariants(this);
}
- virtual bool IsUninitializedThisReference() const override { return true; }
+ bool IsUninitializedThisReference() const override { return true; }
bool HasClassVirtual() const override { return true; }