summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/aot_class_linker.h4
-rw-r--r--runtime/arch/arch_test.cc4
-rw-r--r--runtime/arch/arm/context_arm.h30
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.h14
-rw-r--r--runtime/arch/arm64/context_arm64.h30
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.h12
-rw-r--r--runtime/arch/mips/context_mips.h28
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.h12
-rw-r--r--runtime/arch/mips64/context_mips64.h28
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.h12
-rw-r--r--runtime/arch/stub_test.cc4
-rw-r--r--runtime/arch/x86/context_x86.h30
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.h12
-rw-r--r--runtime/arch/x86_64/context_x86_64.h30
-rw-r--r--runtime/arch/x86_64/instruction_set_features_x86_64.h6
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/base/mem_map_arena_pool.cc4
-rw-r--r--runtime/base/mem_map_arena_pool.h14
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h4
-rw-r--r--runtime/cha.cc8
-rw-r--r--runtime/class_linker.cc24
-rw-r--r--runtime/class_linker_test.cc4
-rw-r--r--runtime/common_runtime_test.h4
-rw-r--r--runtime/compiler_filter.h2
-rw-r--r--runtime/debugger.cc34
-rw-r--r--runtime/debugger.h22
-rw-r--r--runtime/dex2oat_environment_test.h6
-rw-r--r--runtime/dexopt_test.h4
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc36
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc4
-rw-r--r--runtime/fault_handler.h16
-rw-r--r--runtime/gc/accounting/mod_union_table-inl.h2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc4
-rw-r--r--runtime/gc/accounting/mod_union_table.h32
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc6
-rw-r--r--runtime/gc/allocation_record.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc18
-rw-r--r--runtime/gc/collector/concurrent_copying.h26
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc8
-rw-r--r--runtime/gc/collector/mark_sweep.cc12
-rw-r--r--runtime/gc/collector/mark_sweep.h20
-rw-r--r--runtime/gc/collector/partial_mark_sweep.h4
-rw-r--r--runtime/gc/collector/semi_space.h18
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h12
-rw-r--r--runtime/gc/heap.cc20
-rw-r--r--runtime/gc/heap_test.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space.h28
-rw-r--r--runtime/gc/space/dlmalloc_space.h42
-rw-r--r--runtime/gc/space/image_space.h6
-rw-r--r--runtime/gc/space/image_space_test.cc6
-rw-r--r--runtime/gc/space/large_object_space.cc14
-rw-r--r--runtime/gc/space/large_object_space.h44
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/memory_tool_malloc_space.h18
-rw-r--r--runtime/gc/space/region_space.h28
-rw-r--r--runtime/gc/space/rosalloc_space.h40
-rw-r--r--runtime/gc/space/space.h12
-rw-r--r--runtime/gc/space/zygote_space.h24
-rw-r--r--runtime/gc/system_weak.h6
-rw-r--r--runtime/gc/system_weak_test.cc8
-rw-r--r--runtime/gc/task_processor_test.cc6
-rw-r--r--runtime/gc/verification.cc2
-rw-r--r--runtime/gc_root.h4
-rw-r--r--runtime/handle_scope.h2
-rw-r--r--runtime/hidden_api_test.cc2
-rw-r--r--runtime/hprof/hprof.cc24
-rw-r--r--runtime/instrumentation.cc10
-rw-r--r--runtime/instrumentation_test.cc28
-rw-r--r--runtime/intern_table_test.cc2
-rw-r--r--runtime/java_frame_root_info.h4
-rw-r--r--runtime/jit/jit.cc8
-rw-r--r--runtime/jit/jit_code_cache.cc8
-rw-r--r--runtime/jit/profile_saver.cc2
-rw-r--r--runtime/jit/profiling_info_test.cc2
-rw-r--r--runtime/jni/java_vm_ext_test.cc4
-rw-r--r--runtime/jni/jni_internal_test.cc2
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/dex_cache.h2
-rw-r--r--runtime/mirror/dex_cache_test.cc2
-rw-r--r--runtime/mirror/iftable.h2
-rw-r--r--runtime/mirror/proxy.h2
-rw-r--r--runtime/mirror/stack_trace_element.h2
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/mirror/var_handle.cc26
-rw-r--r--runtime/monitor.cc12
-rw-r--r--runtime/monitor_objects_stack_visitor.h2
-rw-r--r--runtime/monitor_test.cc2
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc4
-rw-r--r--runtime/noop_compiler_callbacks.h10
-rw-r--r--runtime/oat_file.cc22
-rw-r--r--runtime/oat_file.h6
-rw-r--r--runtime/proxy_test.cc2
-rw-r--r--runtime/quick_exception_handler.cc12
-rw-r--r--runtime/runtime.cc2
-rw-r--r--runtime/runtime_callbacks_test.cc40
-rw-r--r--runtime/stack.cc6
-rw-r--r--runtime/thread.cc34
-rw-r--r--runtime/thread_list.cc4
-rw-r--r--runtime/trace.h24
-rw-r--r--runtime/transaction.h2
-rw-r--r--runtime/verifier/instruction_flags.h2
-rw-r--r--runtime/verifier/reg_type.h280
-rw-r--r--runtime/verifier/reg_type_test.cc2
107 files changed, 769 insertions, 769 deletions
diff --git a/runtime/aot_class_linker.h b/runtime/aot_class_linker.h
index 927b53302b..6a8133efc1 100644
--- a/runtime/aot_class_linker.h
+++ b/runtime/aot_class_linker.h
@@ -34,14 +34,14 @@ class AotClassLinker : public ClassLinker {
Handle<mirror::Class> klass,
verifier::HardFailLogMode log_level,
std::string* error_msg)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_);
bool InitializeClass(Thread *self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_);
};
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d4ceede07a..d4dbbf9541 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -46,7 +46,7 @@ namespace art {
class ArchTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use 64-bit ISA for runtime setup to make method size potentially larger
// than necessary (rather than smaller) during CreateCalleeSaveMethod
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -55,7 +55,7 @@ class ArchTest : public CommonRuntimeTest {
// Do not do any of the finalization. We don't want to run any code, we don't need the heap
// prepared, it actually will be a problem with setting the instruction set to x86_64 in
// SetUpRuntimeOptions.
- void FinalizeSetup() OVERRIDE {
+ void FinalizeSetup() override {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
};
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index b9802967fe..845cdaa100 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -26,7 +26,7 @@
namespace art {
namespace arm {
-class ArmContext FINAL : public Context {
+class ArmContext final : public Context {
public:
ArmContext() {
Reset();
@@ -34,55 +34,55 @@ class ArmContext FINAL : public Context {
virtual ~ArmContext() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(PC, new_pc);
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(R0, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pointers to register locations, initialized to null or the specific registers below.
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index f82534b511..d964148900 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -25,7 +25,7 @@ class ArmInstructionSetFeatures;
using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
// Instruction set features relevant to the ARM architecture.
-class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class ArmInstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,18 +47,18 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static ArmFeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
// Return a string of the form "div,lpae" or "none".
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Is the divide instruction feature enabled?
bool HasDivideInstruction() const {
@@ -82,7 +82,7 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
ArmInstructionSetFeatures(bool has_div,
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index e64cfb86ea..95dac90ac7 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -26,7 +26,7 @@
namespace art {
namespace arm64 {
-class Arm64Context FINAL : public Context {
+class Arm64Context final : public Context {
public:
Arm64Context() {
Reset();
@@ -34,56 +34,56 @@ class Arm64Context FINAL : public Context {
~Arm64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_lr) OVERRIDE {
+ void SetPC(uintptr_t new_lr) override {
SetGPR(kPC, new_lr);
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(X0, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, arraysize(gprs_));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, arraysize(gprs_));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
// Note: PC isn't an available GPR (outside of internals), so don't allow retrieving the value.
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfXRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
static constexpr size_t kPC = kNumberOfXRegisters;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index af2d4c79f9..163a2d8eba 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -25,7 +25,7 @@ class Arm64InstructionSetFeatures;
using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
// Instruction set features relevant to the ARM64 architecture.
-class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Arm64InstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,16 +47,16 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static Arm64FeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm64;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
// Return a string of the form "a53" or "none".
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Generate code addressing Cortex-A53 erratum 835769?
bool NeedFixCortexA53_835769() const {
@@ -74,7 +74,7 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix)
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 7e073b288a..960aea1fcd 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -33,53 +33,53 @@ class MipsContext : public Context {
}
virtual ~MipsContext() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(T9, new_pc);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(A0, new_arg0_value);
}
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 76bc639277..ab5bb3c101 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -28,7 +28,7 @@ class MipsInstructionSetFeatures;
using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
// Instruction set features relevant to the MIPS architecture.
-class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class MipsInstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -50,15 +50,15 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static MipsFeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kMips;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Is this an ISA revision greater than 2 opening up new opcodes.
bool IsMipsIsaRevGreaterThanEqual2() const {
@@ -87,7 +87,7 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index b2a6138471..857abfd2b8 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -33,53 +33,53 @@ class Mips64Context : public Context {
}
virtual ~Mips64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(T9, new_pc);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(A0, new_arg0_value);
}
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index 27e544ed91..e204d9de83 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -25,7 +25,7 @@ class Mips64InstructionSetFeatures;
using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
// Instruction set features relevant to the MIPS64 architecture.
-class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Mips64InstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
@@ -48,15 +48,15 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static Mips64FeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kMips64;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Does it have MSA (MIPS SIMD Architecture) support.
bool HasMsa() const {
@@ -69,7 +69,7 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index b0c0e43e35..e8df90eccd 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -37,7 +37,7 @@ namespace art {
class StubTest : public CommonRuntimeTest {
protected:
// We need callee-save methods set up in the Runtime for exceptions.
- void SetUp() OVERRIDE {
+ void SetUp() override {
// Do the normal setup.
CommonRuntimeTest::SetUp();
@@ -54,7 +54,7 @@ class StubTest : public CommonRuntimeTest {
}
}
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use a smaller heap
for (std::pair<std::string, const void*>& pair : *options) {
if (pair.first.find("-Xmx") == 0) {
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index 0ebb22bd6d..5b438c3623 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -26,62 +26,62 @@
namespace art {
namespace x86 {
-class X86Context FINAL : public Context {
+class X86Context final : public Context {
public:
X86Context() {
Reset();
}
virtual ~X86Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(ESP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
eip_ = new_pc;
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(EAX, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pretend XMM registers are made of uin32_t pieces, because they are manipulated
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 57cf4b2741..acf13c491e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -49,17 +49,17 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const override;
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ virtual InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
virtual ~X86InstructionSetFeatures() {}
@@ -71,7 +71,7 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
virtual std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
return AddFeaturesFromSplitString(features, false, error_msg);
}
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d242693f81..ab38614c98 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -26,62 +26,62 @@
namespace art {
namespace x86_64 {
-class X86_64Context FINAL : public Context {
+class X86_64Context final : public Context {
public:
X86_64Context() {
Reset();
}
virtual ~X86_64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(RSP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
rip_ = new_pc;
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(RDI, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pointers to register locations. Values are initialized to null or the special registers below.
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index e76490ba13..76258fa5d4 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -25,7 +25,7 @@ class X86_64InstructionSetFeatures;
using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
// Instruction set features relevant to the X86_64 architecture.
-class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
+class X86_64InstructionSetFeatures final : public X86InstructionSetFeatures {
public:
// Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
@@ -59,7 +59,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
return Convert(X86InstructionSetFeatures::FromAssembly(true));
}
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86_64;
}
@@ -69,7 +69,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
return X86InstructionSetFeatures::AddFeaturesFromSplitString(features, true, error_msg);
}
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 123595c6fe..5afd000b05 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -40,7 +40,7 @@ class Object;
class String;
} // namespace mirror
-class ArtField FINAL {
+class ArtField final {
public:
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce08cb0bea..48ddc6992d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -66,7 +66,7 @@ using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
using MethodDexCacheType = std::atomic<MethodDexCachePair>;
} // namespace mirror
-class ArtMethod FINAL {
+class ArtMethod final {
public:
// Should the class state be checked on sensitive operations?
DECLARE_RUNTIME_DEBUG_FLAG(kCheckDeclaringClassState);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index a9fbafe7ab..851c23f1cb 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -31,11 +31,11 @@
namespace art {
-class MemMapArena FINAL : public Arena {
+class MemMapArena final : public Arena {
public:
MemMapArena(size_t size, bool low_4gb, const char* name);
virtual ~MemMapArena();
- void Release() OVERRIDE;
+ void Release() override;
private:
static MemMap Allocate(size_t size, bool low_4gb, const char* name);
diff --git a/runtime/base/mem_map_arena_pool.h b/runtime/base/mem_map_arena_pool.h
index 24e150e1e7..e98ef07ddb 100644
--- a/runtime/base/mem_map_arena_pool.h
+++ b/runtime/base/mem_map_arena_pool.h
@@ -21,17 +21,17 @@
namespace art {
-class MemMapArenaPool FINAL : public ArenaPool {
+class MemMapArenaPool final : public ArenaPool {
public:
explicit MemMapArenaPool(bool low_4gb = false, const char* name = "LinearAlloc");
virtual ~MemMapArenaPool();
- Arena* AllocArena(size_t size) OVERRIDE;
- void FreeArenaChain(Arena* first) OVERRIDE;
- size_t GetBytesAllocated() const OVERRIDE;
- void ReclaimMemory() OVERRIDE;
- void LockReclaimMemory() OVERRIDE;
+ Arena* AllocArena(size_t size) override;
+ void FreeArenaChain(Arena* first) override;
+ size_t GetBytesAllocated() const override;
+ void ReclaimMemory() override;
+ void LockReclaimMemory() override;
// Trim the maps in arenas by madvising, used by JIT to reduce memory usage.
- void TrimMaps() OVERRIDE;
+ void TrimMaps() override;
private:
const bool low_4gb_;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 044c4c2f78..28b29125cd 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -125,7 +125,7 @@ static void BackOff(uint32_t i) {
}
}
-class ScopedAllMutexesLock FINAL {
+class ScopedAllMutexesLock final {
public:
explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
@@ -144,7 +144,7 @@ class ScopedAllMutexesLock FINAL {
const BaseMutex* const mutex_;
};
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
public:
explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
@@ -166,7 +166,7 @@ class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
};
// Scoped class that generates events at the beginning and end of lock contention.
-class ScopedContentionRecorder FINAL : public ValueObject {
+class ScopedContentionRecorder final : public ValueObject {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
: mutex_(kLogLockContentions ? mutex : nullptr),
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index fba209a0b6..d127d0f01f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -297,7 +297,7 @@ class LOCKABLE Mutex : public BaseMutex {
// For negative capabilities in clang annotations.
const Mutex& operator!() const { return *this; }
- void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+ void WakeupToRespondToEmptyCheckpoint() override;
private:
#if ART_USE_FUTEXES
@@ -418,7 +418,7 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
// For negative capabilities in clang annotations.
const ReaderWriterMutex& operator!() const { return *this; }
- void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+ void WakeupToRespondToEmptyCheckpoint() override;
private:
#if ART_USE_FUTEXES
diff --git a/runtime/cha.cc b/runtime/cha.cc
index ce84e8ce2e..3ea920dff1 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -181,7 +181,7 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
// headers, sets the should_deoptimize flag on stack to 1.
// TODO: also set the register value to 1 when should_deoptimize is allocated in
// a register.
-class CHAStackVisitor FINAL : public StackVisitor {
+class CHAStackVisitor final : public StackVisitor {
public:
CHAStackVisitor(Thread* thread_in,
Context* context,
@@ -190,7 +190,7 @@ class CHAStackVisitor FINAL : public StackVisitor {
method_headers_(method_headers) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
// Avoid types of methods that do not have an oat quick method header.
if (method == nullptr ||
@@ -245,13 +245,13 @@ class CHAStackVisitor FINAL : public StackVisitor {
DISALLOW_COPY_AND_ASSIGN(CHAStackVisitor);
};
-class CHACheckpoint FINAL : public Closure {
+class CHACheckpoint final : public Closure {
public:
explicit CHACheckpoint(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
: barrier_(0),
method_headers_(method_headers) {}
- void Run(Thread* thread) OVERRIDE {
+ void Run(Thread* thread) override {
// Note thread and self may not be equal if thread was already suspended at
// the point of the request.
Thread* self = Thread::Current();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f80d34ca2f..65f05d9362 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -884,7 +884,7 @@ class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor {
explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
: image_pointer_size_(image_pointer_size) {}
- void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild && !method->IsRuntimeMethod()) {
CHECK(method->GetDeclaringClass() != nullptr);
}
@@ -1390,7 +1390,7 @@ bool ClassLinker::OpenImageDexFiles(gc::space::ImageSpace* space,
// Helper class for ArtMethod checks when adding an image. Keeps all required functionality
// together and caches some intermediate results.
-class ImageSanityChecks FINAL {
+class ImageSanityChecks final {
public:
static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1951,7 +1951,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
done_(false) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (!done_ && class_table != nullptr) {
DefiningClassLoaderFilterVisitor visitor(class_loader, visitor_);
@@ -1972,7 +1972,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
ClassVisitor* visitor)
: defining_class_loader_(defining_class_loader), visitor_(visitor) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (klass->GetClassLoader() != defining_class_loader_) {
return true;
}
@@ -2009,7 +2009,7 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor) {
class GetClassesInToVector : public ClassVisitor {
public:
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE {
+ bool operator()(ObjPtr<mirror::Class> klass) override {
classes_.push_back(klass);
return true;
}
@@ -2021,7 +2021,7 @@ class GetClassInToObjectArray : public ClassVisitor {
explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
: arr_(arr), index_(0) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
++index_;
if (index_ <= arr_->GetLength()) {
arr_->Set(index_ - 1, klass);
@@ -3845,7 +3845,7 @@ class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
class_table->FreezeSnapshot();
@@ -3871,7 +3871,7 @@ class LookupClassesVisitor : public ClassLoaderVisitor {
result_(result) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
ObjPtr<mirror::Class> klass = class_table->Lookup(descriptor_, hash_);
// Add `klass` only if `class_loader` is its defining (not just initiating) class loader.
@@ -5563,7 +5563,7 @@ bool ClassLinker::LinkMethods(Thread* self,
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
// avoids the use of handles, if it didn't then rather than compare dex files we could compare dex
// caches in the implementation below.
-class MethodNameAndSignatureComparator FINAL : public ValueObject {
+class MethodNameAndSignatureComparator final : public ValueObject {
public:
explicit MethodNameAndSignatureComparator(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) :
@@ -8555,7 +8555,7 @@ class CountClassesVisitor : public ClassLoaderVisitor {
CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
num_zygote_classes += class_table->NumZygoteClasses(class_loader);
@@ -8825,7 +8825,7 @@ class GetResolvedClassesVisitor : public ClassVisitor {
extra_stats_(),
last_extra_stats_(extra_stats_.end()) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!klass->IsProxyClass() &&
!klass->IsArrayClass() &&
klass->IsResolved() &&
@@ -8913,7 +8913,7 @@ class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
: method_(method),
pointer_size_(pointer_size) {}
- bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) override {
if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
holder_ = klass;
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e40f1dbcdf..52ddd13ab7 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -442,7 +442,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
class TestRootVisitor : public SingleRootVisitor {
public:
- void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE {
+ void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) override {
EXPECT_TRUE(root != nullptr);
}
};
@@ -450,7 +450,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
class ClassLinkerMethodHandlesTest : public ClassLinkerTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 234b66a862..bf17e644af 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -157,11 +157,11 @@ class CommonRuntimeTestBase : public TestType, public CommonRuntimeTestImpl {
virtual ~CommonRuntimeTestBase() {}
protected:
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
CommonRuntimeTestImpl::SetUp();
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
CommonRuntimeTestImpl::TearDown();
}
};
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 60975b04f7..012ebcbe1c 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -25,7 +25,7 @@
namespace art {
-class CompilerFilter FINAL {
+class CompilerFilter final {
public:
// Note: Order here matters. Later filter choices are considered "as good
// as" earlier filter choices.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e607b31e68..366b5ec5e9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -138,7 +138,7 @@ static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
return os;
}
-class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class DebugInstrumentationListener final : public instrumentation::InstrumentationListener {
public:
DebugInstrumentationListener() {}
virtual ~DebugInstrumentationListener() {}
@@ -147,7 +147,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -176,7 +176,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
const JValue& return_value)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -195,7 +195,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
@@ -205,7 +205,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t new_dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
// We also listen to kMethodExited instrumentation event and the current instruction is a
// RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -229,7 +229,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
ArtField* field)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field);
}
@@ -239,19 +239,19 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
uint32_t dex_pc,
ArtField* field,
const JValue& field_value)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value);
}
void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> exception_object)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostException(exception_object.Get());
}
// We only care about branches in the Jit.
void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc << ", " << dex_pc_offset;
}
@@ -262,20 +262,20 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
ArtMethod* target ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
}
// TODO Might be worth it to post ExceptionCatch event.
void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
- Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) OVERRIDE {
+ Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
LOG(ERROR) << "Unexpected exception handled event in debugger";
}
// TODO Might be worth it to implement this.
void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
- const ShadowFrame& frame ATTRIBUTE_UNUSED) OVERRIDE {
+ const ShadowFrame& frame ATTRIBUTE_UNUSED) override {
LOG(ERROR) << "Unexpected WatchedFramePop event in debugger";
}
@@ -1087,7 +1087,7 @@ class ClassListCreator : public ClassVisitor {
public:
explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
- bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!c->IsPrimitive()) {
classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
}
@@ -2450,7 +2450,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram
expandBufAdd4BE(buf_, frame_count_);
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2608,7 +2608,7 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame
}
// Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor FINAL : public StackVisitor {
+class FindFrameVisitor final : public StackVisitor {
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -3040,7 +3040,7 @@ class CatchLocationFinder : public StackVisitor {
throw_dex_pc_(dex::kDexNoIndex) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -3693,7 +3693,7 @@ class NeedsDeoptimizationVisitor : public StackVisitor {
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
needs_deoptimization_(false) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
// The visitor is meant to be used when handling exception from compiled code only.
CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
<< ArtMethod::PrettyMethod(GetMethod());
diff --git a/runtime/debugger.h b/runtime/debugger.h
index e1de991812..33444f829c 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -54,20 +54,20 @@ class StackVisitor;
class Thread;
struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
- bool IsMethodBeingInspected(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsMethodSafeToJit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool MethodNeedsDebugVersion(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodBeingInspected(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodSafeToJit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool MethodNeedsDebugVersion(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
};
struct DebuggerDdmCallback : public DdmCallback {
void DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::mutator_lock_);
};
struct InternalDebuggerControlCallback : public DebuggerControlCallback {
- void StartDebugger() OVERRIDE;
- void StopDebugger() OVERRIDE;
- bool IsDebuggerConfigured() OVERRIDE;
+ void StartDebugger() override;
+ void StopDebugger() override;
+ bool IsDebuggerConfigured() override;
};
/*
@@ -831,15 +831,15 @@ class Dbg {
class DbgThreadLifecycleCallback : public ThreadLifecycleCallback {
public:
- void ThreadStart(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- void ThreadDeath(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadStart(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadDeath(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
};
class DbgClassLoadCallback : public ClassLoadCallback {
public:
- void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
void ClassPrepare(Handle<mirror::Class> temp_klass,
- Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
};
static DbgThreadLifecycleCallback thread_lifecycle_callback_;
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 00a95cc7bd..0b99722652 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -42,7 +42,7 @@ namespace art {
// Test class that provides some helpers to set a test up for compilation using dex2oat.
class Dex2oatEnvironmentTest : public CommonRuntimeTest {
public:
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
CommonRuntimeTest::SetUp();
const ArtDexFileLoader dex_file_loader;
@@ -106,7 +106,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
}
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
// options->push_back(std::make_pair("-verbose:oat", nullptr));
// Set up the image location.
@@ -117,7 +117,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
callbacks_.reset();
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
ClearDirectory(odex_dir_.c_str());
ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 3203ee526b..b4e52ac49c 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -26,11 +26,11 @@ namespace art {
class DexoptTest : public Dex2oatEnvironmentTest {
public:
- virtual void SetUp() OVERRIDE;
+ virtual void SetUp() override;
virtual void PreRuntimeCreate();
- virtual void PostRuntimeCreate() OVERRIDE;
+ virtual void PostRuntimeCreate() override;
// Generate an oat file for the purposes of test.
// The oat file will be generated for dex_location in the given oat_location
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aca169b924..fccfce4589 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -615,13 +615,13 @@ extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
}
// Visits arguments on the stack placing them into the shadow frame.
-class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
public:
BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
private:
ShadowFrame* const sf_;
@@ -707,7 +707,7 @@ static void HandleDeoptimization(JValue* result,
explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
// Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
// logic. Just always say we want to continue.
return true;
@@ -824,13 +824,13 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
// to jobjects.
-class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
public:
BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -959,7 +959,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
// Visitor returning a reference argument at a given position in a Quick stack frame.
// NOTE: Only used for testing purposes.
-class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
public:
GetQuickReferenceArgumentAtVisitor(ArtMethod** sp,
const char* shorty,
@@ -972,7 +972,7 @@ class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments";
}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
if (cur_pos_ == arg_pos_) {
Primitive::Type type = GetParamPrimitiveType();
CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
@@ -1014,7 +1014,7 @@ extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(s
}
// Visitor returning all the reference arguments in a Quick stack frame.
-class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
public:
GetQuickReferenceArgumentsVisitor(ArtMethod** sp,
bool is_static,
@@ -1022,7 +1022,7 @@ class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
uint32_t shorty_len)
: QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
Primitive::Type type = GetParamPrimitiveType();
if (type == Primitive::kPrimNot) {
StackReference<mirror::Object>* ref_arg =
@@ -1059,13 +1059,13 @@ std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMetho
// Read object references held in arguments from quick frames and place in a JNI local references,
// so they don't get garbage collected.
-class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
+class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
public:
RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1957,7 +1957,7 @@ class ComputeNativeCallFrameSize {
uint32_t num_stack_entries_;
};
-class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
+class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
public:
explicit ComputeGenericJniFrameSize(bool critical_native)
: num_handle_scope_references_(0), critical_native_(critical_native) {}
@@ -2038,10 +2038,10 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
return sp8;
}
- uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
+ uintptr_t PushHandle(mirror::Object* /* ptr */) override;
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
- void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
+ void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -2117,7 +2117,7 @@ class FillNativeCall {
// Visits arguments on the stack placing them into a region lower down the stack for the benefit
// of transitioning into native code.
-class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
public:
BuildGenericJniFrameVisitor(Thread* self,
bool is_static,
@@ -2150,7 +2150,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
}
}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -2168,7 +2168,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
private:
// A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
- class FillJniCall FINAL : public FillNativeCall {
+ class FillJniCall final : public FillNativeCall {
public:
FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
HandleScope* handle_scope, bool critical_native)
@@ -2177,7 +2177,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
cur_entry_(0),
critical_native_(critical_native) {}
- uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 89694e351a..0f0fb69f4b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -26,7 +26,7 @@ namespace art {
class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use 64-bit ISA for runtime setup to make method size potentially larger
// than necessary (rather than smaller) during CreateCalleeSaveMethod
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -35,7 +35,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
// Do not do any of the finalization. We don't want to run any code, we don't need the heap
// prepared, it actually will be a problem with setting the instruction set to x86_64 in
// SetUpRuntimeOptions.
- void FinalizeSetup() OVERRIDE {
+ void FinalizeSetup() override {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 3e2664c7f9..02eeefe0a0 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -90,11 +90,11 @@ class FaultHandler {
DISALLOW_COPY_AND_ASSIGN(FaultHandler);
};
-class NullPointerHandler FINAL : public FaultHandler {
+class NullPointerHandler final : public FaultHandler {
public:
explicit NullPointerHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
static bool IsValidImplicitCheck(siginfo_t* siginfo) {
// Our implicit NPE checks always limit the range to a page.
@@ -108,31 +108,31 @@ class NullPointerHandler FINAL : public FaultHandler {
DISALLOW_COPY_AND_ASSIGN(NullPointerHandler);
};
-class SuspensionHandler FINAL : public FaultHandler {
+class SuspensionHandler final : public FaultHandler {
public:
explicit SuspensionHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
private:
DISALLOW_COPY_AND_ASSIGN(SuspensionHandler);
};
-class StackOverflowHandler FINAL : public FaultHandler {
+class StackOverflowHandler final : public FaultHandler {
public:
explicit StackOverflowHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
private:
DISALLOW_COPY_AND_ASSIGN(StackOverflowHandler);
};
-class JavaStackTraceHandler FINAL : public FaultHandler {
+class JavaStackTraceHandler final : public FaultHandler {
public:
explicit JavaStackTraceHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override NO_THREAD_SAFETY_ANALYSIS;
private:
DISALLOW_COPY_AND_ASSIGN(JavaStackTraceHandler);
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 3a09634c0b..f0a82e0c88 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -33,7 +33,7 @@ class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
space::ContinuousSpace* space)
: ModUnionTableReferenceCache(name, heap, space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+ bool ShouldAddReference(const mirror::Object* ref) const override ALWAYS_INLINE {
return !space_->HasAddress(ref);
}
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0dd05cd6f0..40dc6e146a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -329,8 +329,8 @@ class ModUnionCheckReferences {
class EmptyMarkObjectVisitor : public MarkObjectVisitor {
public:
- mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
- void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {}
+ mirror::Object* MarkObject(mirror::Object* obj) override {return obj;}
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {}
};
void ModUnionTable::FilterCards() {
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 7a3c06a281..ec6f144fd9 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -125,33 +125,33 @@ class ModUnionTableReferenceCache : public ModUnionTable {
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ProcessCards() OVERRIDE;
+ void ProcessCards() override;
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify() OVERRIDE
+ void Verify() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) override;
- virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void SetCards() OVERRIDE;
+ virtual void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ virtual void ClearTable() override;
protected:
// Cleared card array, used to update the mod-union table.
@@ -172,27 +172,27 @@ class ModUnionTableCardCache : public ModUnionTable {
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- virtual void ProcessCards() OVERRIDE;
+ virtual void ProcessCards() override;
// Mark all references to the alloc space(s).
- virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Nothing to verify.
- virtual void Verify() OVERRIDE {}
+ virtual void Verify() override {}
- virtual void Dump(std::ostream& os) OVERRIDE;
+ virtual void Dump(std::ostream& os) override;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) override;
- virtual void SetCards() OVERRIDE;
+ virtual void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ virtual void ClearTable() override;
protected:
// Cleared card bitmap, used to update the mod-union table.
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index d59ff71676..5aa55506a5 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -98,12 +98,12 @@ class CollectVisitedVisitor : public MarkObjectVisitor {
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update ATTRIBUTE_UNUSED) OVERRIDE
+ bool do_atomic_update ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
@@ -122,7 +122,7 @@ class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache {
space::ContinuousSpace* target_space)
: ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+ bool ShouldAddReference(const mirror::Object* ref) const override {
return target_space_->HasAddress(ref);
}
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index a1d198652e..b9c1dc61b6 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -196,7 +196,7 @@ class AllocRecordStackVisitor : public StackVisitor {
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (trace_->GetDepth() >= max_depth_) {
return false;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c7a5f79cb2..f73ecf1c49 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -229,7 +229,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closu
explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -250,7 +250,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure
explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
@@ -393,7 +393,7 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -467,7 +467,7 @@ class ConcurrentCopying::FlipCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
ConcurrentCopying* cc = concurrent_copying_;
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
@@ -1072,7 +1072,7 @@ class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1096,7 +1096,7 @@ class ConcurrentCopying::DisableMarkingCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(concurrent_copying_->is_marking_);
@@ -1291,7 +1291,7 @@ class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor
}
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
operator()(root);
}
@@ -1457,7 +1457,7 @@ class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
disable_weak_ref_access_(disable_weak_ref_access) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1727,7 +1727,7 @@ class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a deadlock b/31500969.
CHECK(concurrent_copying_->weak_ref_access_enabled_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 0ebe6f0c25..a956d3807a 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,7 +71,7 @@ class ConcurrentCopying : public GarbageCollector {
bool measure_read_barrier_slow_path = false);
~ConcurrentCopying();
- virtual void RunPhases() OVERRIDE
+ virtual void RunPhases() override
REQUIRES(!immune_gray_stack_lock_,
!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
@@ -87,15 +87,15 @@ class ConcurrentCopying : public GarbageCollector {
void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
? kGcTypeSticky
: kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return kCollectorTypeCC;
}
- virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
+ virtual void RevokeAllThreadLocalBuffers() override;
void SetRegionSpace(space::RegionSpace* region_space) {
DCHECK(region_space != nullptr);
region_space_ = region_space;
@@ -144,7 +144,7 @@ class ConcurrentCopying : public GarbageCollector {
void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -167,7 +167,7 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
@@ -175,12 +175,12 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ virtual void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -205,20 +205,20 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
- ObjPtr<mirror::Reference> reference) OVERRIDE
+ ObjPtr<mirror::Reference> reference) override
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
@@ -293,7 +293,7 @@ class ConcurrentCopying : public GarbageCollector {
mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
+ void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
// Set the read barrier mark entrypoints to non-null.
void ActivateReadBarrierEntrypoints();
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 145bd0208d..677e3f3a05 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -167,19 +167,19 @@ class DummySpace : public space::ContinuousSpace {
end,
/*limit*/end) {}
- space::SpaceType GetType() const OVERRIDE {
+ space::SpaceType GetType() const override {
return space::kSpaceTypeMallocSpace;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 58a75ee189..840a4b03dc 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@ class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
public:
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
@@ -607,7 +607,7 @@ class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// See if the root is on any space bitmap.
auto* heap = Runtime::Current()->GetHeap();
@@ -1110,7 +1110,7 @@ class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
virtual mirror::Object* IsMarked(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
@@ -1144,7 +1144,7 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1154,14 +1154,14 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
}
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
ScopedTrace trace("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* const self = Thread::Current();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index af2bb973c9..012e17932f 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -57,7 +57,7 @@ class MarkSweep : public GarbageCollector {
~MarkSweep() {}
- virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
+ virtual void RunPhases() override REQUIRES(!mark_stack_lock_);
void InitializePhase();
void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -72,11 +72,11 @@ class MarkSweep : public GarbageCollector {
return is_concurrent_;
}
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypeFull;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
}
@@ -188,24 +188,24 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object.
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -216,7 +216,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -231,7 +231,7 @@ class MarkSweep : public GarbageCollector {
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
@@ -279,7 +279,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void ProcessMarkStack()
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 8b0d3ddf42..308699bf7e 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,7 @@ namespace collector {
class PartialMarkSweep : public MarkSweep {
public:
// Virtual as overridden by StickyMarkSweep.
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypePartial;
}
@@ -37,7 +37,7 @@ class PartialMarkSweep : public MarkSweep {
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index d1d45c8df6..49cd02e99a 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,7 @@ class SemiSpace : public GarbageCollector {
~SemiSpace() {}
- virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ virtual void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
virtual void InitializePhase();
virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
@@ -72,10 +72,10 @@ class SemiSpace : public GarbageCollector {
virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects()
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
}
@@ -106,11 +106,11 @@ class SemiSpace : public GarbageCollector {
void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* root) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void ScanObject(mirror::Object* obj)
@@ -145,11 +145,11 @@ class SemiSpace : public GarbageCollector {
void SweepSystemWeaks()
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
@@ -162,12 +162,12 @@ class SemiSpace : public GarbageCollector {
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 45f912f63a..f92a103b13 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -24,9 +24,9 @@ namespace art {
namespace gc {
namespace collector {
-class StickyMarkSweep FINAL : public PartialMarkSweep {
+class StickyMarkSweep final : public PartialMarkSweep {
public:
- GcType GetGcType() const OVERRIDE {
+ GcType GetGcType() const override {
return kGcTypeSticky;
}
@@ -34,7 +34,7 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
~StickyMarkSweep() {}
virtual void MarkConcurrentRoots(VisitRootFlags flags)
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -42,15 +42,15 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
- void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
void MarkReachableObjects()
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void Sweep(bool swap_bitmaps)
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf06cf9758..16fd78630d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1327,7 +1327,7 @@ class TrimIndirectReferenceTableClosure : public Closure {
public:
explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
thread->GetJniEnv()->TrimLocals();
// If thread is a running mutator, then act on behalf of the trim thread.
// See the code in ThreadList::RunCheckpoint.
@@ -2213,7 +2213,7 @@ void Heap::ChangeCollector(CollectorType collector_type) {
}
// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
-class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
+class ZygoteCompactingCollector final : public collector::SemiSpace {
public:
ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
: SemiSpace(heap, false, "zygote collector"),
@@ -2769,7 +2769,7 @@ class RootMatchesObjectVisitor : public SingleRootVisitor {
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
void VisitRoot(mirror::Object* root, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == obj_) {
LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
}
@@ -2826,7 +2826,7 @@ class VerifyReferenceVisitor : public SingleRootVisitor {
root->AsMirrorPtr(), RootInfo(kRootVMInternal));
}
- virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+ virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -3259,10 +3259,10 @@ void Heap::ProcessCards(TimingLogger* timings,
}
struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override {
return obj;
}
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
}
};
@@ -3633,7 +3633,7 @@ class Heap::ConcurrentGCTask : public HeapTask {
public:
ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
: HeapTask(target_time), cause_(cause), force_full_(force_full) {}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->ConcurrentGC(self, cause_, force_full_);
heap->ClearConcurrentGCRequest();
@@ -3691,7 +3691,7 @@ class Heap::CollectorTransitionTask : public HeapTask {
public:
explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->DoPendingCollectorTransition();
heap->ClearPendingCollectorTransition(self);
@@ -3733,7 +3733,7 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint
class Heap::HeapTrimTask : public HeapTask {
public:
explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->Trim(self);
heap->ClearPendingTrim(self);
@@ -4176,7 +4176,7 @@ void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, si
class Heap::TriggerPostForkCCGcTask : public HeapTask {
public:
explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
// Trigger a GC, if not already done. The first GC after fork, whenever
// takes place, will adjust the thresholds to normal levels.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 8720a3e014..7cbad3b523 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -29,7 +29,7 @@ namespace gc {
class HeapTest : public CommonRuntimeTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
MemMap::Init();
std::string error_msg;
// Reserve the preferred address to force the heap to use another one for testing.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 9b315584fb..02e84b509e 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -35,11 +35,11 @@ namespace space {
// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
// implementation as its intended to be evacuated.
-class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
+class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeBumpPointerSpace;
}
@@ -51,27 +51,27 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(size_t num_bytes);
mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
// NOPS unless we support free lists.
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
return 0;
}
@@ -94,16 +94,16 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return GetMemMap()->Size();
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
// Reset the space to empty.
- void Clear() OVERRIDE REQUIRES(!block_lock_);
+ void Clear() override REQUIRES(!block_lock_);
void Dump(std::ostream& os) const;
@@ -122,7 +122,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return Begin() == End();
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -141,7 +141,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate a new TLAB, returns false if the allocation failed.
bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
- BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
+ BumpPointerSpace* AsBumpPointerSpace() override {
return this;
}
@@ -151,7 +151,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!block_lock_);
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
// Record objects / bytes freed.
void RecordFree(int32_t objects, int32_t bytes) {
@@ -159,7 +159,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 66537d5dac..09f3970408 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -53,36 +53,36 @@ class DlMallocSpace : public MallocSpace {
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_) {
+ override REQUIRES(!lock_) {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual(obj, usable_size);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ virtual size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return num_bytes;
}
// DlMallocSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -103,23 +103,23 @@ class DlMallocSpace : public MallocSpace {
return mspace_;
}
- size_t Trim() OVERRIDE;
+ size_t Trim() override;
// Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
// in use, indicated by num_bytes equaling zero.
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
// Returns the number of bytes that the space has currently obtained from the system. This is
// greater or equal to the amount of live data in the space.
- size_t GetFootprint() OVERRIDE;
+ size_t GetFootprint() override;
// Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
- size_t GetFootprintLimit() OVERRIDE;
+ size_t GetFootprintLimit() override;
// Set the maximum number of bytes that the heap is allowed to obtain from the system via
// MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
// allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ void SetFootprintLimit(size_t limit) override;
MallocSpace* CreateInstance(MemMap&& mem_map,
const std::string& name,
@@ -128,22 +128,22 @@ class DlMallocSpace : public MallocSpace {
uint8_t* end,
uint8_t* limit,
size_t growth_limit,
- bool can_move_objects) OVERRIDE;
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
- virtual void Clear() OVERRIDE;
+ virtual void Clear() override;
- bool IsDlMallocSpace() const OVERRIDE {
+ bool IsDlMallocSpace() const override {
return true;
}
- DlMallocSpace* AsDlMallocSpace() OVERRIDE {
+ DlMallocSpace* AsDlMallocSpace() override {
return this;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
@@ -165,7 +165,7 @@ class DlMallocSpace : public MallocSpace {
REQUIRES(lock_);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
+ size_t /*maximum_size*/, bool /*low_memory_mode*/) override {
return CreateMspace(base, morecore_start, initial_size);
}
static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 20bce66957..93cf947218 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -86,11 +86,11 @@ class ImageSpace : public MemMapSpace {
return image_location_;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
// ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
// special cases to test against.
return live_bitmap_.get();
@@ -102,7 +102,7 @@ class ImageSpace : public MemMapSpace {
void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index a1ffa067d0..d93385de3a 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -113,7 +113,7 @@ TEST_F(DexoptTest, ValidateOatFile) {
template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
class ImageSpaceLoadingTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
if (kImage) {
options->emplace_back(android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str()),
nullptr);
@@ -152,7 +152,7 @@ TEST_F(ImageSpaceNoRelocateNoDex2oatNoPatchoatTest, Test) {
class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
const char* android_data = getenv("ANDROID_DATA");
CHECK(android_data != nullptr);
old_android_data_ = android_data;
@@ -172,7 +172,7 @@ class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false,
ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
int result = unlink(bad_dalvik_cache_.c_str());
CHECK_EQ(result, 0) << strerror(errno);
result = rmdir(bad_android_data_.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 76ea9fda29..09d02518a3 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -39,12 +39,12 @@ namespace art {
namespace gc {
namespace space {
-class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
public:
explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
}
- ~MemoryToolLargeObjectMapSpace() OVERRIDE {
+ ~MemoryToolLargeObjectMapSpace() override {
// Historical note: We were deleting large objects to keep Valgrind happy if there were
// any large objects such as Dex cache arrays which aren't freed since they are held live
// by the class linker.
@@ -52,7 +52,7 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE {
+ override {
mirror::Object* obj =
LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
@@ -68,21 +68,21 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
return object_without_rdz;
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
}
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+ size_t Free(Thread* self, mirror::Object* obj) override {
mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
return LargeObjectMapSpace::Free(self, object_with_rdz);
}
- bool Contains(const mirror::Object* obj) const OVERRIDE {
+ bool Contains(const mirror::Object* obj) const override {
return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b69bd91162..39ff2c3e43 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -41,7 +41,7 @@ enum class LargeObjectSpaceType {
// Abstraction implemented by all large object spaces.
class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
public:
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeLargeObjectSpace;
}
void SwapBitmaps();
@@ -49,10 +49,10 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
virtual ~LargeObjectSpace() {}
- uint64_t GetBytesAllocated() OVERRIDE {
+ uint64_t GetBytesAllocated() override {
return num_bytes_allocated_;
}
- uint64_t GetObjectsAllocated() OVERRIDE {
+ uint64_t GetObjectsAllocated() override {
return num_objects_allocated_;
}
uint64_t GetTotalBytesAllocated() const {
@@ -61,22 +61,22 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
uint64_t GetTotalObjectsAllocated() const {
return total_objects_allocated_;
}
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// LargeObjectSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
collector::ObjectBytePair Sweep(bool swap_bitmaps);
- virtual bool CanMoveObjects() const OVERRIDE {
+ virtual bool CanMoveObjects() const override {
return false;
}
// Current address at which the space begins, which may vary as the space is filled.
@@ -96,7 +96,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
@@ -140,11 +140,11 @@ class LargeObjectMapSpace : public LargeObjectSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
REQUIRES(!lock_);
size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
struct LargeObject {
@@ -154,8 +154,8 @@ class LargeObjectMapSpace : public LargeObjectSpace {
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -164,22 +164,22 @@ class LargeObjectMapSpace : public LargeObjectSpace {
};
// A continuous large object space with a free-list to handle holes.
-class FreeListSpace FINAL : public LargeObjectSpace {
+class FreeListSpace final : public LargeObjectSpace {
public:
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
void Dump(std::ostream& os) const REQUIRES(!lock_);
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
@@ -198,8 +198,8 @@ class FreeListSpace FINAL : public LargeObjectSpace {
}
// Removes header from the free blocks set by finding the corresponding iterator and erasing it.
void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
class SortByPrevFree {
public:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index e4a6f158ec..6bf2d71c7c 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,7 +133,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return can_move_objects_;
}
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 32bd204354..33bddfa4c8 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -29,28 +29,28 @@ template <typename BaseMallocSpaceType,
size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace final : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE;
+ override;
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) override {}
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override;
template <typename... Params>
MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8ad26baff1..0bf4f38a4b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -39,7 +39,7 @@ namespace space {
static constexpr bool kCyclicRegionAllocation = true;
// A space that consists of equal-sized regions.
-class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
+class RegionSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
@@ -49,7 +49,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
kEvacModeForceAll,
};
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeRegionSpace;
}
@@ -65,14 +65,14 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!region_lock_);
+ override REQUIRES(!region_lock_);
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self,
size_t num_bytes,
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
// The main allocation routine.
template<bool kForEvac>
ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
@@ -90,29 +90,29 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return mark_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
- void Clear() OVERRIDE REQUIRES(!region_lock_);
+ void Clear() override REQUIRES(!region_lock_);
// Remove read and write memory protection from the whole region space,
// i.e. make memory pages backing the region area not readable and not
@@ -188,7 +188,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return num_regions_;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -197,7 +197,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return byte_obj >= Begin() && byte_obj < Limit();
}
- RegionSpace* AsRegionSpace() OVERRIDE {
+ RegionSpace* AsRegionSpace() override {
return this;
}
@@ -212,10 +212,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
WalkInternal<true /* kToSpaceOnly */>(visitor);
}
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index c630826f48..5162a064d1 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -52,24 +52,24 @@ class RosAllocSpace : public MallocSpace {
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ override REQUIRES(Locks::mutator_lock_) {
return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual<true>(obj, usable_size);
}
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -93,7 +93,7 @@ class RosAllocSpace : public MallocSpace {
// run without allocating a new run.
ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
size_t* bytes_allocated);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
}
ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
@@ -107,13 +107,13 @@ class RosAllocSpace : public MallocSpace {
return rosalloc_;
}
- size_t Trim() OVERRIDE;
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
- size_t GetFootprint() OVERRIDE;
- size_t GetFootprintLimit() OVERRIDE;
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ size_t Trim() override;
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
+ size_t GetFootprint() override;
+ size_t GetFootprintLimit() override;
+ void SetFootprintLimit(size_t limit) override;
- void Clear() OVERRIDE;
+ void Clear() override;
MallocSpace* CreateInstance(MemMap&& mem_map,
const std::string& name,
@@ -122,10 +122,10 @@ class RosAllocSpace : public MallocSpace {
uint8_t* end,
uint8_t* limit,
size_t growth_limit,
- bool can_move_objects) OVERRIDE;
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
size_t RevokeThreadLocalBuffers(Thread* thread);
size_t RevokeAllThreadLocalBuffers();
@@ -135,11 +135,11 @@ class RosAllocSpace : public MallocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool IsRosAllocSpace() const OVERRIDE {
+ bool IsRosAllocSpace() const override {
return true;
}
- RosAllocSpace* AsRosAllocSpace() OVERRIDE {
+ RosAllocSpace* AsRosAllocSpace() override {
return this;
}
@@ -149,7 +149,7 @@ class RosAllocSpace : public MallocSpace {
virtual ~RosAllocSpace();
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
}
@@ -174,7 +174,7 @@ class RosAllocSpace : public MallocSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t maximum_size, bool low_memory_mode) OVERRIDE {
+ size_t maximum_size, bool low_memory_mode) override {
return CreateRosAlloc(
base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
}
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4e173a86f1..2fe1f82547 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -352,7 +352,7 @@ class DiscontinuousSpace : public Space {
return mark_bitmap_.get();
}
- virtual bool IsDiscontinuousSpace() const OVERRIDE {
+ virtual bool IsDiscontinuousSpace() const override {
return true;
}
@@ -409,14 +409,14 @@ class MemMapSpace : public ContinuousSpace {
// Used by the heap compaction interface to enable copying from one type of alloc space to another.
class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
public:
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
- bool IsContinuousMemMapAllocSpace() const OVERRIDE {
+ bool IsContinuousMemMapAllocSpace() const override {
return true;
}
ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
@@ -435,11 +435,11 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 200c79f00c..1f73577a3a 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -27,7 +27,7 @@ namespace gc {
namespace space {
// A zygote space is a space which you cannot allocate into or free from.
-class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
+class ZygoteSpace final : public ContinuousMemMapAllocSpace {
public:
// Returns the remaining storage in the out_map field.
static ZygoteSpace* Create(const std::string& name,
@@ -38,28 +38,28 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
void Dump(std::ostream& os) const;
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeZygoteSpace;
}
- ZygoteSpace* AsZygoteSpace() OVERRIDE {
+ ZygoteSpace* AsZygoteSpace() override {
return this;
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE;
+ size_t Free(Thread* self, mirror::Object* ptr) override;
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// ZygoteSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -71,13 +71,13 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return objects_allocated_.load(std::memory_order_seq_cst);
}
- void Clear() OVERRIDE;
+ void Clear() override;
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 60105f4e4f..ef85b3942f 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -45,7 +45,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
}
virtual ~SystemWeakHolder() {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -54,7 +54,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
new_weak_condition_.Broadcast(Thread::Current());
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -62,7 +62,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
allow_new_system_weak_ = false;
}
- void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) override
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
new_weak_condition_.Broadcast(Thread::Current());
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 897ab01251..07725b9a56 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -44,7 +44,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_(0),
sweep_count_(0) {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Allow();
@@ -52,7 +52,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
allow_count_++;
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Disallow();
@@ -60,7 +60,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_++;
}
- void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint) override
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
@@ -70,7 +70,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
}
}
- void Sweep(IsMarkedVisitor* visitor) OVERRIDE
+ void Sweep(IsMarkedVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 38581ce807..caa8802823 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -33,7 +33,7 @@ class RecursiveTask : public HeapTask {
: HeapTask(NanoTime() + MsToNs(10)), task_processor_(task_processor), counter_(counter),
max_recursion_(max_recursion) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
if (max_recursion_ > 0) {
task_processor_->AddTask(self,
new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
@@ -52,7 +52,7 @@ class WorkUntilDoneTask : public SelfDeletingTask {
WorkUntilDoneTask(TaskProcessor* task_processor, Atomic<bool>* done_running)
: task_processor_(task_processor), done_running_(done_running) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
task_processor_->RunAllTasks(self);
done_running_->store(true, std::memory_order_seq_cst);
}
@@ -105,7 +105,7 @@ class TestOrderTask : public HeapTask {
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
+ virtual void Run(Thread* thread ATTRIBUTE_UNUSED) override {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index d6a2fa0cb5..5d234eaac3 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -198,7 +198,7 @@ class Verification::CollectRootVisitor : public SingleRootVisitor {
CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
void VisitRoot(mirror::Object* obj, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (obj != nullptr && visited_->insert(obj).second) {
std::ostringstream oss;
oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 986e28ec79..0bd43f95c0 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -133,7 +133,7 @@ class RootVisitor {
// critical.
class SingleRootVisitor : public RootVisitor {
private:
- void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(*roots[i], info);
@@ -141,7 +141,7 @@ class SingleRootVisitor : public RootVisitor {
}
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(roots[i]->AsMirrorPtr(), info);
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 28a230291d..9eaf1ec71a 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -259,7 +259,7 @@ class PACKED(4) FixedSizeHandleScope : public HandleScope {
// Scoped handle storage of a fixed size that is stack allocated.
template<size_t kNumReferences>
-class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> {
+class PACKED(4) StackHandleScope final : public FixedSizeHandleScope<kNumReferences> {
public:
explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
ALWAYS_INLINE ~StackHandleScope();
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index a41d28492d..4c7efe666f 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -27,7 +27,7 @@ using hiddenapi::GetActionFromAccessFlags;
class HiddenApiTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
// Do the normal setup.
CommonRuntimeTest::SetUp();
self_ = Thread::Current();
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3f44928e3a..e8a47d1087 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -303,7 +303,7 @@ class EndianOutputBuffered : public EndianOutput {
}
virtual ~EndianOutputBuffered() {}
- void UpdateU4(size_t offset, uint32_t new_value) OVERRIDE {
+ void UpdateU4(size_t offset, uint32_t new_value) override {
DCHECK_LE(offset, length_ - 4);
buffer_[offset + 0] = static_cast<uint8_t>((new_value >> 24) & 0xFF);
buffer_[offset + 1] = static_cast<uint8_t>((new_value >> 16) & 0xFF);
@@ -312,12 +312,12 @@ class EndianOutputBuffered : public EndianOutput {
}
protected:
- void HandleU1List(const uint8_t* values, size_t count) OVERRIDE {
+ void HandleU1List(const uint8_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
buffer_.insert(buffer_.end(), values, values + count);
}
- void HandleU1AsU2List(const uint8_t* values, size_t count) OVERRIDE {
+ void HandleU1AsU2List(const uint8_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
// All 8-bits are grouped in 2 to make 16-bit block like Java Char
if (count & 1) {
@@ -330,7 +330,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU2List(const uint16_t* values, size_t count) OVERRIDE {
+ void HandleU2List(const uint16_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint16_t value = *values;
@@ -340,7 +340,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU4List(const uint32_t* values, size_t count) OVERRIDE {
+ void HandleU4List(const uint32_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint32_t value = *values;
@@ -352,7 +352,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU8List(const uint64_t* values, size_t count) OVERRIDE {
+ void HandleU8List(const uint64_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint64_t value = *values;
@@ -368,7 +368,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleEndRecord() OVERRIDE {
+ void HandleEndRecord() override {
DCHECK_EQ(buffer_.size(), length_);
if (kIsDebugBuild && started_) {
uint32_t stored_length =
@@ -388,7 +388,7 @@ class EndianOutputBuffered : public EndianOutput {
std::vector<uint8_t> buffer_;
};
-class FileEndianOutput FINAL : public EndianOutputBuffered {
+class FileEndianOutput final : public EndianOutputBuffered {
public:
FileEndianOutput(File* fp, size_t reserved_size)
: EndianOutputBuffered(reserved_size), fp_(fp), errors_(false) {
@@ -402,7 +402,7 @@ class FileEndianOutput FINAL : public EndianOutputBuffered {
}
protected:
- void HandleFlush(const uint8_t* buffer, size_t length) OVERRIDE {
+ void HandleFlush(const uint8_t* buffer, size_t length) override {
if (!errors_) {
errors_ = !fp_->WriteFully(buffer, length);
}
@@ -413,14 +413,14 @@ class FileEndianOutput FINAL : public EndianOutputBuffered {
bool errors_;
};
-class VectorEndianOuputput FINAL : public EndianOutputBuffered {
+class VectorEndianOuputput final : public EndianOutputBuffered {
public:
VectorEndianOuputput(std::vector<uint8_t>& data, size_t reserved_size)
: EndianOutputBuffered(reserved_size), full_data_(data) {}
~VectorEndianOuputput() {}
protected:
- void HandleFlush(const uint8_t* buf, size_t length) OVERRIDE {
+ void HandleFlush(const uint8_t* buf, size_t length) override {
size_t old_size = full_data_.size();
full_data_.resize(old_size + length);
memcpy(full_data_.data() + old_size, buf, length);
@@ -604,7 +604,7 @@ class Hprof : public SingleRootVisitor {
}
void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::mutator_lock_);
void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
uint32_t thread_serial);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4196e19383..b42433cad3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -85,7 +85,7 @@ class InstallStubsClassVisitor : public ClassVisitor {
explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
: instrumentation_(instrumentation) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
instrumentation_->InstallStubsForClass(klass.Ptr());
return true; // we visit all classes.
}
@@ -264,7 +264,7 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
// existing instrumentation frames.
static void InstrumentationInstallStack(Thread* thread, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- struct InstallStackVisitor FINAL : public StackVisitor {
+ struct InstallStackVisitor final : public StackVisitor {
InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
: StackVisitor(thread_in, context, kInstrumentationStackWalk),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -273,7 +273,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
last_return_pc_(0) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr) {
if (kVerboseInstrumentation) {
@@ -429,7 +429,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- struct RestoreStackVisitor FINAL : public StackVisitor {
+ struct RestoreStackVisitor final : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
: StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
@@ -439,7 +439,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 8ac26afe9f..9146245895 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -36,7 +36,7 @@
namespace art {
namespace instrumentation {
-class TestInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class TestInstrumentationListener final : public instrumentation::InstrumentationListener {
public:
TestInstrumentationListener()
: received_method_enter_event(false),
@@ -59,7 +59,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_enter_event = true;
}
@@ -68,7 +68,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
Handle<mirror::Object> return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_object_event = true;
}
@@ -77,7 +77,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_event = true;
}
@@ -85,7 +85,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_unwind_event = true;
}
@@ -93,7 +93,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_dex_pc_moved_event = true;
}
@@ -102,7 +102,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_read_event = true;
}
@@ -112,7 +112,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
Handle<mirror::Object> field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_object_event = true;
}
@@ -122,19 +122,19 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_event = true;
}
void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_thrown_event = true;
}
void ExceptionHandled(Thread* self ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_handled_event = true;
}
@@ -142,7 +142,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_branch_event = true;
}
@@ -151,12 +151,12 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtMethod* callee ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_invoke_virtual_or_interface_event = true;
}
void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_watched_frame_pop = true;
}
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b56c48d78c..8b4fe44c15 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -86,7 +86,7 @@ TEST_F(InternTableTest, CrossHash) {
class TestPredicate : public IsMarkedVisitor {
public:
- mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Object* IsMarked(mirror::Object* s) override REQUIRES_SHARED(Locks::mutator_lock_) {
bool erased = false;
for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 25ac6e2a31..452a76b89a 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -27,12 +27,12 @@ namespace art {
class StackVisitor;
-class JavaFrameRootInfo FINAL : public RootInfo {
+class JavaFrameRootInfo final : public RootInfo {
public:
JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
: RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
}
- void Describe(std::ostream& os) const OVERRIDE
+ void Describe(std::ostream& os) const override
REQUIRES_SHARED(Locks::mutator_lock_);
size_t GetVReg() const {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ed449b5433..a6bc029828 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -399,7 +399,7 @@ void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
struct CollectClasses : public ClassVisitor {
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
classes_.push_back(klass.Ptr());
return true;
}
@@ -576,7 +576,7 @@ void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
memory_use_.AddValue(bytes);
}
-class JitCompileTask FINAL : public Task {
+class JitCompileTask final : public Task {
public:
enum TaskKind {
kAllocateProfile,
@@ -596,7 +596,7 @@ class JitCompileTask FINAL : public Task {
soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
ScopedObjectAccess soa(self);
if (kind_ == kCompile) {
Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
@@ -611,7 +611,7 @@ class JitCompileTask FINAL : public Task {
ProfileSaver::NotifyJitActivity();
}
- void Finalize() OVERRIDE {
+ void Finalize() override {
delete this;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index d9c7900577..33adc18673 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1088,14 +1088,14 @@ size_t JitCodeCache::ReserveData(Thread* self,
}
}
-class MarkCodeVisitor FINAL : public StackVisitor {
+class MarkCodeVisitor final : public StackVisitor {
public:
MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
code_cache_(code_cache_in),
bitmap_(code_cache_->GetLiveBitmap()) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
if (method_header == nullptr) {
return true;
@@ -1113,12 +1113,12 @@ class MarkCodeVisitor FINAL : public StackVisitor {
CodeCacheBitmap* const bitmap_;
};
-class MarkCodeClosure FINAL : public Closure {
+class MarkCodeClosure final : public Closure {
public:
MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
: code_cache_(code_cache), barrier_(barrier) {}
- void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
MarkCodeVisitor visitor(thread, code_cache_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 6ccda8b0bb..d9ef922390 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -255,7 +255,7 @@ class GetClassLoadersVisitor : public ClassLoaderVisitor {
class_loaders_(class_loaders) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
class_loaders_->push_back(hs_->NewHandle(class_loader));
}
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index 8424610cf8..f695c8fd9b 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -40,7 +40,7 @@ static constexpr size_t kMaxMethodIds = 65535;
class ProfileCompilationInfoTest : public CommonRuntimeTest {
public:
- void PostRuntimeCreate() OVERRIDE {
+ void PostRuntimeCreate() override {
allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index 74e4a30905..fe1c168db2 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -34,7 +34,7 @@ class JavaVmExtTest : public CommonRuntimeTest {
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
CommonRuntimeTest::TearDown();
}
@@ -137,7 +137,7 @@ TEST_F(JavaVmExtTest, DetachCurrentThread) {
class JavaVmExtStackTraceTest : public JavaVmExtTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr);
}
};
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a25049e681..a4b151a7e7 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -84,7 +84,7 @@ class JniInternalTest : public CommonCompilerTest {
}
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
CleanUpJniEnv();
CommonCompilerTest::TearDown();
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c3e167c306..811ee515d3 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -63,7 +63,7 @@ using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
// C++ mirror of java.lang.Class
-class MANAGED Class FINAL : public Object {
+class MANAGED Class final : public Object {
public:
// A magic value for reference_instance_offsets_. Ignore the bits and walk the super chain when
// this is the value.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 87f4f0ab7b..ba91e4fd58 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -141,7 +141,7 @@ using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
// C++ mirror of java.lang.DexCache.
-class MANAGED DexCache FINAL : public Object {
+class MANAGED DexCache final : public Object {
public:
// Size of java.lang.DexCache.class.
static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 7a70cae1ef..528740bea9 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,7 +34,7 @@ class DexCacheTest : public CommonRuntimeTest {};
class DexCacheMethodHandlesTest : public DexCacheTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d72c7866c5..9e3c9af86d 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -23,7 +23,7 @@
namespace art {
namespace mirror {
-class MANAGED IfTable FINAL : public ObjectArray<Object> {
+class MANAGED IfTable final : public ObjectArray<Object> {
public:
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/mirror/proxy.h b/runtime/mirror/proxy.h
index db511d6425..7775de35d2 100644
--- a/runtime/mirror/proxy.h
+++ b/runtime/mirror/proxy.h
@@ -26,7 +26,7 @@ struct ProxyOffsets;
namespace mirror {
// C++ mirror of java.lang.reflect.Proxy.
-class MANAGED Proxy FINAL : public Object {
+class MANAGED Proxy final : public Object {
private:
HeapReference<Object> h_;
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 55a2ef0b49..37ac57587f 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -27,7 +27,7 @@ struct StackTraceElementOffsets;
namespace mirror {
// C++ mirror of java.lang.StackTraceElement
-class MANAGED StackTraceElement FINAL : public Object {
+class MANAGED StackTraceElement final : public Object {
public:
String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 0e2fc903b5..d08717ca82 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -40,7 +40,7 @@ enum class StringCompressionFlag : uint32_t {
};
// C++ mirror of java.lang.String
-class MANAGED String FINAL : public Object {
+class MANAGED String final : public Object {
public:
// Size of java.lang.String.class.
static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 56c953b816..864e1eab73 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -353,7 +353,7 @@ inline void StoreResult(ObjPtr<Object> value, JValue* result)
//
template <typename T>
-class JValueByteSwapper FINAL {
+class JValueByteSwapper final {
public:
static void ByteSwap(JValue* value);
static void MaybeByteSwap(bool byte_swap, JValue* value) {
@@ -392,7 +392,7 @@ class AtomicGetAccessor : public Object::Accessor<T> {
public:
explicit AtomicGetAccessor(JValue* result) : result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
StoreResult(atom->load(MO), result_);
}
@@ -406,7 +406,7 @@ class AtomicSetAccessor : public Object::Accessor<T> {
public:
explicit AtomicSetAccessor(T new_value) : new_value_(new_value) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
atom->store(new_value_, MO);
}
@@ -431,7 +431,7 @@ class AtomicStrongCompareAndSetAccessor : public Object::Accessor<T> {
AtomicStrongCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
bool success = atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -453,7 +453,7 @@ class AtomicStrongCompareAndExchangeAccessor : public Object::Accessor<T> {
AtomicStrongCompareAndExchangeAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
StoreResult(expected_value_, result_);
@@ -475,7 +475,7 @@ class AtomicWeakCompareAndSetAccessor : public Object::Accessor<T> {
AtomicWeakCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
bool success = atom->compare_exchange_weak(expected_value_, desired_value_, MOS, MOF);
StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -496,7 +496,7 @@ class AtomicGetAndSetAccessor : public Object::Accessor<T> {
public:
AtomicGetAndSetAccessor(T new_value, JValue* result) : new_value_(new_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->exchange(new_value_, MO);
StoreResult(old_value, result_);
@@ -540,7 +540,7 @@ class AtomicGetAndAddAccessor : public Object::Accessor<T> {
public:
AtomicGetAndAddAccessor(T addend, JValue* result) : addend_(addend), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
constexpr bool kIsFloatingPoint = std::is_floating_point<T>::value;
T old_value = AtomicGetAndAddOperator<T, kIsFloatingPoint, MO>::Apply(addr, addend_);
StoreResult(old_value, result_);
@@ -562,7 +562,7 @@ class AtomicGetAndAddWithByteSwapAccessor : public Object::Accessor<T> {
public:
AtomicGetAndAddWithByteSwapAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* const atom = reinterpret_cast<std::atomic<T>*>(addr);
T current_value = atom->load(std::memory_order_relaxed);
T sum;
@@ -591,7 +591,7 @@ class AtomicGetAndBitwiseOrAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseOrAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_or(value_, MO);
StoreResult(old_value, result_);
@@ -610,7 +610,7 @@ class AtomicGetAndBitwiseAndAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseAndAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_and(value_, MO);
StoreResult(old_value, result_);
@@ -630,7 +630,7 @@ class AtomicGetAndBitwiseXorAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseXorAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_xor(value_, MO);
StoreResult(old_value, result_);
@@ -679,7 +679,7 @@ class TypeAdaptorAccessor : public Object::Accessor<T> {
explicit TypeAdaptorAccessor(Object::Accessor<U>* inner_accessor)
: inner_accessor_(inner_accessor) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
static_assert(sizeof(T) == sizeof(U), "bad conversion");
inner_accessor_->Access(reinterpret_cast<U*>(addr));
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index d47bc0d12e..6e5786a6c3 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -184,7 +184,7 @@ bool Monitor::Install(Thread* self) {
if (locking_method_ != nullptr && UNLIKELY(locking_method_->IsProxyMethod())) {
// Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
// enough that it's OK to walk the stack twice.
- struct NextMethodVisitor FINAL : public StackVisitor {
+ struct NextMethodVisitor final : public StackVisitor {
explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread,
nullptr,
@@ -193,7 +193,7 @@ bool Monitor::Install(Thread* self) {
count_(0),
method_(nullptr),
dex_pc_(0) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -271,7 +271,7 @@ void Monitor::SetObject(mirror::Object* object) {
// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
-struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
+struct NthCallerWithDexPcVisitor final : public StackVisitor {
explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -279,7 +279,7 @@ struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
dex_pc_(0),
current_frame_number_(0),
wanted_frame_number_(frame) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr || m->IsRuntimeMethod()) {
// Runtime method, upcall, or resolution issue. Skip.
@@ -514,7 +514,7 @@ void Monitor::Lock(Thread* self) {
if (should_dump_stacks) {
// Very long contention. Dump stacks.
struct CollectStackTrace : public Closure {
- void Run(art::Thread* thread) OVERRIDE
+ void Run(art::Thread* thread) override
REQUIRES_SHARED(art::Locks::mutator_lock_) {
thread->DumpJavaStack(oss);
}
@@ -1574,7 +1574,7 @@ class MonitorDeflateVisitor : public IsMarkedVisitor {
public:
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Monitor::Deflate(self_, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index 5c962c3b26..c943402126 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -54,7 +54,7 @@ class MonitorObjectsStackVisitor : public StackVisitor {
kEndStackWalk,
};
- bool VisitFrame() FINAL REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() final REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index bff8d7678c..c88748ffb8 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -34,7 +34,7 @@ namespace art {
class MonitorTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use a smaller heap
SetUpRuntimeOptionsForFillHeap(options);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 7ac4086362..6f98a6d381 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -207,7 +207,7 @@ static void VMDebug_printLoadedClasses(JNIEnv* env, jclass, jint flags) {
public:
explicit DumpClassVisitor(int dump_flags) : flags_(dump_flags) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
klass->DumpClass(LOG_STREAM(ERROR), flags_);
return true;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 9b3fd16ac0..0e619407e5 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -332,7 +332,7 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::String> string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 5b47eaca86..72dae4791c 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -120,9 +120,9 @@ static void DoCollectNonDebuggableCallback(Thread* thread, void* data)
: StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_set_(class_set) {}
- ~NonDebuggableStacksVisitor() OVERRIDE {}
+ ~NonDebuggableStacksVisitor() override {}
- bool VisitFrame() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true;
}
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 9c777cc277..496a6f3d09 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -21,22 +21,22 @@
namespace art {
-class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
+class NoopCompilerCallbacks final : public CompilerCallbacks {
public:
NoopCompilerCallbacks() : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp) {}
~NoopCompilerCallbacks() {}
- void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
+ void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {
}
- void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
// This is only used by compilers which need to be able to run without relocation even when it
// would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
// to disable the relocation since both deal with writing out the images directly.
- bool IsRelocationPossible() OVERRIDE { return false; }
+ bool IsRelocationPossible() override { return false; }
- verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return nullptr; }
+ verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
private:
DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index c7daef8310..4780aea003 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -889,7 +889,7 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e
// OatFile via dlopen //
////////////////////////
-class DlOpenOatFile FINAL : public OatFileBase {
+class DlOpenOatFile final : public OatFileBase {
public:
DlOpenOatFile(const std::string& filename, bool executable)
: OatFileBase(filename, executable),
@@ -911,7 +911,7 @@ class DlOpenOatFile FINAL : public OatFileBase {
protected:
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
const uint8_t* ptr =
reinterpret_cast<const uint8_t*>(dlsym(dlopen_handle_, symbol_name.c_str()));
if (ptr == nullptr) {
@@ -920,21 +920,21 @@ class DlOpenOatFile FINAL : public OatFileBase {
return ptr;
}
- void PreLoad() OVERRIDE;
+ void PreLoad() override;
bool Load(const std::string& elf_filename,
uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ std::string* error_msg) override;
bool Load(int, uint8_t*, bool, bool, bool, std::string*) {
return false;
}
// Ask the linker where it mmaped the file and notify our mmap wrapper of the regions.
- void PreSetup(const std::string& elf_filename) OVERRIDE;
+ void PreSetup(const std::string& elf_filename) override;
private:
bool Dlopen(const std::string& elf_filename,
@@ -1156,7 +1156,7 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
// OatFile via our own ElfFile implementation //
////////////////////////////////////////////////
-class ElfOatFile FINAL : public OatFileBase {
+class ElfOatFile final : public OatFileBase {
public:
ElfOatFile(const std::string& filename, bool executable) : OatFileBase(filename, executable) {}
@@ -1179,7 +1179,7 @@ class ElfOatFile FINAL : public OatFileBase {
protected:
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
const uint8_t* ptr = elf_file_->FindDynamicSymbolAddress(symbol_name);
if (ptr == nullptr) {
*error_msg = "(Internal implementation could not find symbol)";
@@ -1187,7 +1187,7 @@ class ElfOatFile FINAL : public OatFileBase {
return ptr;
}
- void PreLoad() OVERRIDE {
+ void PreLoad() override {
}
bool Load(const std::string& elf_filename,
@@ -1195,16 +1195,16 @@ class ElfOatFile FINAL : public OatFileBase {
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ std::string* error_msg) override;
bool Load(int oat_fd,
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ std::string* error_msg) override;
- void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) OVERRIDE {
+ void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) override {
}
private:
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 4ed26facf7..21e214408d 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -146,7 +146,7 @@ class OatFile {
const OatHeader& GetOatHeader() const;
- class OatMethod FINAL {
+ class OatMethod final {
public:
void LinkMethod(ArtMethod* method) const;
@@ -201,7 +201,7 @@ class OatFile {
friend class OatClass;
};
- class OatClass FINAL {
+ class OatClass final {
public:
ClassStatus GetStatus() const {
return status_;
@@ -444,7 +444,7 @@ class OatFile {
// support forward declarations of inner classes, and we want to
// forward-declare OatDexFile so that we can store an opaque pointer to an
// OatDexFile in DexFile.
-class OatDexFile FINAL {
+class OatDexFile final {
public:
// Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
std::unique_ptr<const DexFile> OpenDexFile(std::string* error_msg) const;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 36dea60367..f1e485b951 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -30,7 +30,7 @@ namespace proxy_test {
class ProxyTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
// The creation of a Proxy class uses WellKnownClasses. These are not normally initialized by
// CommonRuntimeTest so we need to do that now.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7f5717f736..7b92151c66 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -58,7 +58,7 @@ QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimizatio
full_fragment_done_(false) {}
// Finds catch handler.
-class CatchBlockStackVisitor FINAL : public StackVisitor {
+class CatchBlockStackVisitor final : public StackVisitor {
public:
CatchBlockStackVisitor(Thread* self,
Context* context,
@@ -72,7 +72,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
skip_frames_(skip_frames) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
@@ -350,7 +350,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
}
// Prepares deoptimization.
-class DeoptimizeStackVisitor FINAL : public StackVisitor {
+class DeoptimizeStackVisitor final : public StackVisitor {
public:
DeoptimizeStackVisitor(Thread* self,
Context* context,
@@ -399,7 +399,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
if (method == nullptr || single_frame_done_) {
@@ -667,14 +667,14 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
}
// Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
+class DumpFramesWithTypeStackVisitor final : public StackVisitor {
public:
explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
show_details_(show_details) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
if (show_details_) {
LOG(INFO) << "|> pc = " << std::hex << GetCurrentQuickFramePc();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 30d45871ff..243150759b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2638,7 +2638,7 @@ class UpdateEntryPointsClassVisitor : public ClassVisitor {
explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
: instrumentation_(instrumentation) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index ed0472f414..e1e0e23dac 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -50,7 +50,7 @@ namespace art {
class RuntimeCallbacksTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
Thread* self = Thread::Current();
@@ -60,7 +60,7 @@ class RuntimeCallbacksTest : public CommonRuntimeTest {
AddListener();
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
{
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -101,10 +101,10 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
}
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddThreadLifecycleCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveThreadLifecycleCallback(&cb_);
}
@@ -117,7 +117,7 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
};
struct Callback : public ThreadLifecycleCallback {
- void ThreadStart(Thread* self) OVERRIDE {
+ void ThreadStart(Thread* self) override {
if (state == CallbackState::kBase) {
state = CallbackState::kStarted;
stored_self = self;
@@ -126,7 +126,7 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
}
}
- void ThreadDeath(Thread* self) OVERRIDE {
+ void ThreadDeath(Thread* self) override {
if (state == CallbackState::kStarted && self == stored_self) {
state = CallbackState::kDied;
} else {
@@ -219,10 +219,10 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttac
class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddClassLoadCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveClassLoadCallback(&cb_);
}
@@ -259,7 +259,7 @@ class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
/*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
const std::string& location = initial_dex_file.GetLocation();
std::string event =
std::string("PreDefine:") + descriptor + " <" +
@@ -267,14 +267,14 @@ class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
data.push_back(event);
}
- void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string tmp;
std::string event = std::string("Load:") + klass->GetDescriptor(&tmp);
data.push_back(event);
}
void ClassPrepare(Handle<mirror::Class> temp_klass,
- Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string tmp, tmp2;
std::string event = std::string("Prepare:") + klass->GetDescriptor(&tmp)
+ "[" + temp_klass->GetDescriptor(&tmp2) + "]";
@@ -319,15 +319,15 @@ TEST_F(ClassLoadCallbackRuntimeCallbacksTest, ClassLoadCallback) {
class RuntimeSigQuitCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddRuntimeSigQuitCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimeSigQuitCallback(&cb_);
}
struct Callback : public RuntimeSigQuitCallback {
- void SigQuit() OVERRIDE {
+ void SigQuit() override {
++sigquit_count;
}
@@ -362,20 +362,20 @@ TEST_F(RuntimeSigQuitCallbackRuntimeCallbacksTest, SigQuit) {
class RuntimePhaseCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&cb_);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
// Bypass RuntimeCallbacksTest::TearDown, as the runtime is already gone.
CommonRuntimeTest::TearDown();
}
struct Callback : public RuntimePhaseCallback {
- void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) OVERRIDE {
+ void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) override {
if (p == RuntimePhaseCallback::RuntimePhase::kInitialAgents) {
if (start_seen > 0 || init_seen > 0 || death_seen > 0) {
LOG(FATAL) << "Unexpected order";
@@ -434,10 +434,10 @@ TEST_F(RuntimePhaseCallbackRuntimeCallbacksTest, Phases) {
class MonitorWaitCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(&cb_);
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ce99fb9591..eb9c661d18 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -461,7 +461,7 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
: StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
- bool VisitFrame() OVERRIDE {
+ bool VisitFrame() override {
frames++;
return true;
}
@@ -487,7 +487,7 @@ bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next
next_dex_pc_(0) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (found_frame_) {
ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -520,7 +520,7 @@ void StackVisitor::DescribeStack(Thread* thread) {
explicit DescribeStackVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
return true;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index df7f19d118..8a637a250d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1486,7 +1486,7 @@ class BarrierClosure : public Closure {
public:
explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
wrapped_->Run(self);
barrier_.Pass(self);
}
@@ -1844,7 +1844,7 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
static constexpr size_t kMaxRepetition = 3u;
VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
ObjPtr<mirror::Class> c = m->GetDeclaringClass();
@@ -1883,24 +1883,24 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
return VisitMethodResult::kContinueMethod;
}
- VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
return VisitMethodResult::kContinueMethod;
}
void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId);
}
void VisitSleepingObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId);
}
void VisitBlockedOnObject(mirror::Object* obj,
ThreadState state,
uint32_t owner_tid)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
const char* msg;
switch (state) {
@@ -1919,7 +1919,7 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
PrintObject(obj, msg, owner_tid);
}
void VisitLockedObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
}
@@ -2216,7 +2216,7 @@ class MonitorExitVisitor : public SingleRootVisitor {
// NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ override NO_THREAD_SAFETY_ANALYSIS {
if (self_->HoldsLock(entered_monitor)) {
LOG(WARNING) << "Calling MonitorExit on object "
<< entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
@@ -2845,7 +2845,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
protected:
VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
soaa_, m, GetDexPc(/* abort on error */ false));
@@ -2856,7 +2856,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
return VisitMethodResult::kContinueMethod;
}
- VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
lock_objects_.push_back({});
lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
@@ -2866,24 +2866,24 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
}
void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitSleepingObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitBlockedOnObject(mirror::Object* obj,
ThreadState state ATTRIBUTE_UNUSED,
uint32_t owner_tid ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitLockedObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
}
@@ -3450,7 +3450,7 @@ Context* Thread::GetLongJumpContext() {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
-struct CurrentMethodVisitor FINAL : public StackVisitor {
+struct CurrentMethodVisitor final : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread,
@@ -3461,7 +3461,7 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
method_(nullptr),
dex_pc_(0),
abort_on_error_(abort_on_error) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -3857,7 +3857,7 @@ void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
class VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyObject(root);
}
};
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 922202418e..cddc275839 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -199,7 +199,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack)
static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
// A closure used by Thread::Dump.
-class DumpCheckpoint FINAL : public Closure {
+class DumpCheckpoint final : public Closure {
public:
DumpCheckpoint(std::ostream* os, bool dump_native_stack)
: os_(os),
@@ -211,7 +211,7 @@ class DumpCheckpoint FINAL : public Closure {
}
}
- void Run(Thread* thread) OVERRIDE {
+ void Run(Thread* thread) override {
// Note thread and self may not be equal if thread was already suspended at the point of the
// request.
Thread* self = Thread::Current();
diff --git a/runtime/trace.h b/runtime/trace.h
index 1fae250d77..5d9649320a 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -102,7 +102,7 @@ enum TraceAction {
// Class for recording event traces. Trace data is either collected
// synchronously during execution (TracingMode::kMethodTracingActive),
// or by a separate sampling thread (TracingMode::kSampleProfilingActive).
-class Trace FINAL : public instrumentation::InstrumentationListener {
+class Trace final : public instrumentation::InstrumentationListener {
public:
enum TraceFlag {
kTraceCountAllocs = 1,
@@ -181,57 +181,57 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void MethodExited(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
const JValue& return_value)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void MethodUnwind(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void DexPcMoved(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t new_dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void FieldRead(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void FieldWritten(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field,
const JValue& field_value)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void ExceptionThrown(Thread* thread,
Handle<mirror::Throwable> exception_object)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void Branch(Thread* thread,
ArtMethod* method,
uint32_t dex_pc,
int32_t dex_pc_offset)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void InvokeVirtualOrInterface(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) override;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 7adf140218..de6edd2ff3 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -39,7 +39,7 @@ class String;
} // namespace mirror
class InternTable;
-class Transaction FINAL {
+class Transaction final {
public:
static constexpr const char* kAbortExceptionDescriptor = "dalvik.system.TransactionAbortError";
static constexpr const char* kAbortExceptionSignature = "Ldalvik/system/TransactionAbortError;";
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index e67067cdde..e5e71a4d07 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -25,7 +25,7 @@
namespace art {
namespace verifier {
-class InstructionFlags FINAL {
+class InstructionFlags final {
public:
InstructionFlags() : flags_(0) {}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 29da376091..04a7dfba66 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -378,11 +378,11 @@ class RegType {
};
// Bottom type.
-class ConflictType FINAL : public RegType {
+class ConflictType final : public RegType {
public:
- bool IsConflict() const OVERRIDE { return true; }
+ bool IsConflict() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static const ConflictType* GetInstance() PURE;
@@ -396,7 +396,7 @@ class ConflictType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kConflict;
}
@@ -414,11 +414,11 @@ class ConflictType FINAL : public RegType {
// A variant of the bottom type used to specify an undefined value in the
// incoming registers.
// Merging with UndefinedType yields ConflictType which is the true bottom.
-class UndefinedType FINAL : public RegType {
+class UndefinedType final : public RegType {
public:
- bool IsUndefined() const OVERRIDE { return true; }
+ bool IsUndefined() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static const UndefinedType* GetInstance() PURE;
@@ -432,7 +432,7 @@ class UndefinedType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -453,7 +453,7 @@ class PrimitiveType : public RegType {
const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
};
class Cat1Type : public PrimitiveType {
@@ -462,10 +462,10 @@ class Cat1Type : public PrimitiveType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
-class IntegerType FINAL : public Cat1Type {
+class IntegerType final : public Cat1Type {
public:
- bool IsInteger() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInteger() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const IntegerType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -473,7 +473,7 @@ class IntegerType FINAL : public Cat1Type {
static const IntegerType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kInteger;
}
@@ -487,10 +487,10 @@ class IntegerType FINAL : public Cat1Type {
static const IntegerType* instance_;
};
-class BooleanType FINAL : public Cat1Type {
+class BooleanType final : public Cat1Type {
public:
- bool IsBoolean() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsBoolean() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const BooleanType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -498,7 +498,7 @@ class BooleanType FINAL : public Cat1Type {
static const BooleanType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kBoolean;
}
@@ -513,10 +513,10 @@ class BooleanType FINAL : public Cat1Type {
static const BooleanType* instance_;
};
-class ByteType FINAL : public Cat1Type {
+class ByteType final : public Cat1Type {
public:
- bool IsByte() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsByte() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const ByteType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -524,7 +524,7 @@ class ByteType FINAL : public Cat1Type {
static const ByteType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kByte;
}
@@ -538,10 +538,10 @@ class ByteType FINAL : public Cat1Type {
static const ByteType* instance_;
};
-class ShortType FINAL : public Cat1Type {
+class ShortType final : public Cat1Type {
public:
- bool IsShort() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsShort() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const ShortType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -549,7 +549,7 @@ class ShortType FINAL : public Cat1Type {
static const ShortType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kShort;
}
@@ -562,10 +562,10 @@ class ShortType FINAL : public Cat1Type {
static const ShortType* instance_;
};
-class CharType FINAL : public Cat1Type {
+class CharType final : public Cat1Type {
public:
- bool IsChar() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsChar() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const CharType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -573,7 +573,7 @@ class CharType FINAL : public Cat1Type {
static const CharType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kChar;
}
@@ -587,10 +587,10 @@ class CharType FINAL : public Cat1Type {
static const CharType* instance_;
};
-class FloatType FINAL : public Cat1Type {
+class FloatType final : public Cat1Type {
public:
- bool IsFloat() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsFloat() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const FloatType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -598,7 +598,7 @@ class FloatType FINAL : public Cat1Type {
static const FloatType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kFloat;
}
@@ -619,11 +619,11 @@ class Cat2Type : public PrimitiveType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
-class LongLoType FINAL : public Cat2Type {
+class LongLoType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsLongLo() const OVERRIDE { return true; }
- bool IsLong() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsLongLo() const override { return true; }
+ bool IsLong() const override { return true; }
static const LongLoType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -631,7 +631,7 @@ class LongLoType FINAL : public Cat2Type {
static const LongLoType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kLongLo;
}
@@ -645,10 +645,10 @@ class LongLoType FINAL : public Cat2Type {
static const LongLoType* instance_;
};
-class LongHiType FINAL : public Cat2Type {
+class LongHiType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsLongHi() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsLongHi() const override { return true; }
static const LongHiType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -656,7 +656,7 @@ class LongHiType FINAL : public Cat2Type {
static const LongHiType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -670,11 +670,11 @@ class LongHiType FINAL : public Cat2Type {
static const LongHiType* instance_;
};
-class DoubleLoType FINAL : public Cat2Type {
+class DoubleLoType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsDoubleLo() const OVERRIDE { return true; }
- bool IsDouble() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsDoubleLo() const override { return true; }
+ bool IsDouble() const override { return true; }
static const DoubleLoType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -682,7 +682,7 @@ class DoubleLoType FINAL : public Cat2Type {
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kDoubleLo;
}
@@ -696,10 +696,10 @@ class DoubleLoType FINAL : public Cat2Type {
static const DoubleLoType* instance_;
};
-class DoubleHiType FINAL : public Cat2Type {
+class DoubleHiType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsDoubleHi() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual bool IsDoubleHi() const override { return true; }
static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -707,7 +707,7 @@ class DoubleHiType FINAL : public Cat2Type {
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -751,30 +751,30 @@ class ConstantType : public RegType {
}
}
- bool IsZero() const OVERRIDE {
+ bool IsZero() const override {
return IsPreciseConstant() && ConstantValue() == 0;
}
- bool IsOne() const OVERRIDE {
+ bool IsOne() const override {
return IsPreciseConstant() && ConstantValue() == 1;
}
- bool IsConstantChar() const OVERRIDE {
+ bool IsConstantChar() const override {
return IsConstant() && ConstantValue() >= 0 &&
ConstantValue() <= std::numeric_limits<uint16_t>::max();
}
- bool IsConstantByte() const OVERRIDE {
+ bool IsConstantByte() const override {
return IsConstant() &&
ConstantValue() >= std::numeric_limits<int8_t>::min() &&
ConstantValue() <= std::numeric_limits<int8_t>::max();
}
- bool IsConstantShort() const OVERRIDE {
+ bool IsConstantShort() const override {
return IsConstant() &&
ConstantValue() >= std::numeric_limits<int16_t>::min() &&
ConstantValue() <= std::numeric_limits<int16_t>::max();
}
- virtual bool IsConstantTypes() const OVERRIDE { return true; }
+ virtual bool IsConstantTypes() const override { return true; }
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -782,7 +782,7 @@ class ConstantType : public RegType {
const uint32_t constant_;
};
-class PreciseConstType FINAL : public ConstantType {
+class PreciseConstType final : public ConstantType {
public:
PreciseConstType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -790,94 +790,94 @@ class PreciseConstType FINAL : public ConstantType {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstant() const OVERRIDE { return true; }
+ bool IsPreciseConstant() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class PreciseConstLoType FINAL : public ConstantType {
+class PreciseConstLoType final : public ConstantType {
public:
PreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPreciseConstantLo() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class PreciseConstHiType FINAL : public ConstantType {
+class PreciseConstHiType final : public ConstantType {
public:
PreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPreciseConstantHi() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstType FINAL : public ConstantType {
+class ImpreciseConstType final : public ConstantType {
public:
ImpreciseConstType(uint32_t constat, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constat, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstant() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstLoType FINAL : public ConstantType {
+class ImpreciseConstLoType final : public ConstantType {
public:
ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstantLo() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstHiType FINAL : public ConstantType {
+class ImpreciseConstHiType final : public ConstantType {
public:
ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstantHi() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
// Special "null" type that captures the semantics of null / bottom.
-class NullType FINAL : public RegType {
+class NullType final : public RegType {
public:
- bool IsNull() const OVERRIDE {
+ bool IsNull() const override {
return true;
}
@@ -892,15 +892,15 @@ class NullType FINAL : public RegType {
static void Destroy();
- std::string Dump() const OVERRIDE {
+ std::string Dump() const override {
return "null";
}
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
- bool IsConstantTypes() const OVERRIDE {
+ bool IsConstantTypes() const override {
return true;
}
@@ -925,15 +925,15 @@ class UninitializedType : public RegType {
uint16_t cache_id)
: RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
- bool IsUninitializedTypes() const OVERRIDE;
- bool IsNonZeroReferenceTypes() const OVERRIDE;
+ bool IsUninitializedTypes() const override;
+ bool IsNonZeroReferenceTypes() const override;
uint32_t GetAllocationPc() const {
DCHECK(IsUninitializedTypes());
return allocation_pc_;
}
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
@@ -942,7 +942,7 @@ class UninitializedType : public RegType {
};
// Similar to ReferenceType but not yet having been passed to a constructor.
-class UninitializedReferenceType FINAL : public UninitializedType {
+class UninitializedReferenceType final : public UninitializedType {
public:
UninitializedReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -953,16 +953,16 @@ class UninitializedReferenceType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUninitializedReference() const OVERRIDE { return true; }
+ bool IsUninitializedReference() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a
// constructor.
-class UnresolvedUninitializedRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedRefType final : public UninitializedType {
public:
UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
@@ -971,19 +971,19 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedAndUninitializedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// Similar to UninitializedReferenceType but special case for the this argument
// of a constructor.
-class UninitializedThisReferenceType FINAL : public UninitializedType {
+class UninitializedThisReferenceType final : public UninitializedType {
public:
UninitializedThisReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -993,17 +993,17 @@ class UninitializedThisReferenceType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
+ virtual bool IsUninitializedThisReference() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
-class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedThisRefType final : public UninitializedType {
public:
UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
@@ -1012,19 +1012,19 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
+ bool IsUnresolvedAndUninitializedThisReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// A type of register holding a reference to an Object of type GetClass or a
// sub-class.
-class ReferenceType FINAL : public RegType {
+class ReferenceType final : public RegType {
public:
ReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -1033,15 +1033,15 @@ class ReferenceType FINAL : public RegType {
CheckConstructorInvariants(this);
}
- bool IsReference() const OVERRIDE { return true; }
+ bool IsReference() const override { return true; }
- bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+ bool IsNonZeroReferenceTypes() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1049,22 +1049,22 @@ class ReferenceType FINAL : public RegType {
// A type of register holding a reference to an Object of type GetClass and only
// an object of that
// type.
-class PreciseReferenceType FINAL : public RegType {
+class PreciseReferenceType final : public RegType {
public:
PreciseReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsPreciseReference() const OVERRIDE { return true; }
+ bool IsPreciseReference() const override { return true; }
- bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+ bool IsNonZeroReferenceTypes() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1076,9 +1076,9 @@ class UnresolvedType : public RegType {
REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
- bool IsNonZeroReferenceTypes() const OVERRIDE;
+ bool IsNonZeroReferenceTypes() const override;
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1086,7 +1086,7 @@ class UnresolvedType : public RegType {
// Similar to ReferenceType except the Class couldn't be loaded. Assignability
// and other tests made
// of this type must be conservative.
-class UnresolvedReferenceType FINAL : public UnresolvedType {
+class UnresolvedReferenceType final : public UnresolvedType {
public:
UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1094,18 +1094,18 @@ class UnresolvedReferenceType FINAL : public UnresolvedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// Type representing the super-class of an unresolved type.
-class UnresolvedSuperClass FINAL : public UnresolvedType {
+class UnresolvedSuperClass final : public UnresolvedType {
public:
UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
uint16_t cache_id)
@@ -1116,19 +1116,19 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
+ bool IsUnresolvedSuperClass() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
uint16_t GetUnresolvedSuperClassChildId() const {
DCHECK(IsUnresolvedSuperClass());
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
@@ -1136,7 +1136,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
// A merge of unresolved (and resolved) types. If the types were resolved this may be
// Conflict or another known ReferenceType.
-class UnresolvedMergedType FINAL : public UnresolvedType {
+class UnresolvedMergedType final : public UnresolvedType {
public:
// Note: the constructor will copy the unresolved BitVector, not use it directly.
UnresolvedMergedType(const RegType& resolved,
@@ -1154,17 +1154,17 @@ class UnresolvedMergedType FINAL : public UnresolvedType {
return unresolved_types_;
}
- bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedMergedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- bool IsArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
const RegTypeCache* const reg_type_cache_;
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 15a38f3fd7..0430d205af 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -1042,7 +1042,7 @@ TEST_F(RegTypeTest, ConstPrecision) {
class RegTypeOOMTest : public RegTypeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
SetUpRuntimeOptionsForFillHeap(options);
// We must not appear to be a compiler, or we'll abort on the host.