summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--adbconnection/adbconnection.h6
-rw-r--r--compiler/common_compiler_test.h6
-rw-r--r--compiler/compiled_method.h2
-rw-r--r--compiler/debug/dwarf/debug_abbrev_writer.h2
-rw-r--r--compiler/debug/dwarf/debug_info_entry_writer.h2
-rw-r--r--compiler/debug/dwarf/debug_line_opcode_writer.h2
-rw-r--r--compiler/dex/quick_compiler_callbacks.h20
-rw-r--r--compiler/driver/compiler_driver.cc22
-rw-r--r--compiler/driver/compiler_driver_test.cc6
-rw-r--r--compiler/driver/compiler_options.h2
-rw-r--r--compiler/jni/jni_compiler_test.cc4
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.h56
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.h54
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.h56
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.h54
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.h54
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.h54
-rw-r--r--compiler/linker/buffered_output_stream.h10
-rw-r--r--compiler/linker/elf_builder.h18
-rw-r--r--compiler/linker/error_delaying_output_stream.h8
-rw-r--r--compiler/linker/file_output_stream.h10
-rw-r--r--compiler/linker/output_stream_test.cc8
-rw-r--r--compiler/linker/vector_output_stream.h10
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc32
-rw-r--r--compiler/optimizing/bounds_check_elimination.h2
-rw-r--r--compiler/optimizing/cha_guard_optimization.cc4
-rw-r--r--compiler/optimizing/cha_guard_optimization.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc52
-rw-r--r--compiler/optimizing/code_generator_arm64.h118
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc52
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h116
-rw-r--r--compiler/optimizing/code_generator_mips.cc60
-rw-r--r--compiler/optimizing/code_generator_mips.h110
-rw-r--r--compiler/optimizing/code_generator_mips64.cc60
-rw-r--r--compiler/optimizing/code_generator_mips64.h108
-rw-r--r--compiler/optimizing/code_generator_x86.cc66
-rw-r--r--compiler/optimizing/code_generator_x86.h108
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc66
-rw-r--r--compiler/optimizing/code_generator_x86_64.h106
-rw-r--r--compiler/optimizing/code_sinking.h2
-rw-r--r--compiler/optimizing/codegen_test_utils.h10
-rw-r--r--compiler/optimizing/constant_folding.cc46
-rw-r--r--compiler/optimizing/constant_folding.h2
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc36
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.h2
-rw-r--r--compiler/optimizing/dead_code_elimination.h2
-rw-r--r--compiler/optimizing/emit_swap_mips_test.cc4
-rw-r--r--compiler/optimizing/graph_checker.h48
-rw-r--r--compiler/optimizing/graph_visualizer.cc84
-rw-r--r--compiler/optimizing/gvn.h2
-rw-r--r--compiler/optimizing/induction_var_analysis.h2
-rw-r--r--compiler/optimizing/inliner.h2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc76
-rw-r--r--compiler/optimizing/instruction_simplifier.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc20
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc26
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.h2
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc12
-rw-r--r--compiler/optimizing/intrinsics_arm64.h8
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc12
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.h8
-rw-r--r--compiler/optimizing/intrinsics_mips.cc4
-rw-r--r--compiler/optimizing/intrinsics_mips.h8
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc4
-rw-r--r--compiler/optimizing/intrinsics_mips64.h8
-rw-r--r--compiler/optimizing/intrinsics_utils.h4
-rw-r--r--compiler/optimizing/intrinsics_x86.cc4
-rw-r--r--compiler/optimizing/intrinsics_x86.h8
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc4
-rw-r--r--compiler/optimizing/intrinsics_x86_64.h8
-rw-r--r--compiler/optimizing/licm.h2
-rw-r--r--compiler/optimizing/load_store_analysis.h22
-rw-r--r--compiler/optimizing/load_store_elimination.cc36
-rw-r--r--compiler/optimizing/load_store_elimination.h2
-rw-r--r--compiler/optimizing/loop_analysis.cc12
-rw-r--r--compiler/optimizing/loop_optimization.h2
-rw-r--r--compiler/optimizing/nodes.h900
-rw-r--r--compiler/optimizing/nodes_mips.h14
-rw-r--r--compiler/optimizing/nodes_shared.h36
-rw-r--r--compiler/optimizing/nodes_vector.h124
-rw-r--r--compiler/optimizing/nodes_x86.h12
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc22
-rw-r--r--compiler/optimizing/parallel_move_resolver.h4
-rw-r--r--compiler/optimizing/parallel_move_test.cc18
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc8
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.h2
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc38
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.h2
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h24
-rw-r--r--compiler/optimizing/pretty_printer.h14
-rw-r--r--compiler/optimizing/reference_type_propagation.cc40
-rw-r--r--compiler/optimizing/reference_type_propagation.h2
-rw-r--r--compiler/optimizing/register_allocator_graph_color.h4
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.h6
-rw-r--r--compiler/optimizing/register_allocator_test.cc2
-rw-r--r--compiler/optimizing/scheduler.h10
-rw-r--r--compiler/optimizing/scheduler_arm.h6
-rw-r--r--compiler/optimizing/scheduler_arm64.h8
-rw-r--r--compiler/optimizing/select_generator.h2
-rw-r--r--compiler/optimizing/sharpening.h2
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h2
-rw-r--r--compiler/optimizing/ssa_liveness_analysis_test.cc2
-rw-r--r--compiler/optimizing/ssa_phi_elimination.h4
-rw-r--r--compiler/optimizing/ssa_test.cc8
-rw-r--r--compiler/optimizing/x86_memory_gen.cc2
-rw-r--r--compiler/optimizing/x86_memory_gen.h2
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.h16
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h94
-rw-r--r--compiler/utils/arm64/assembler_arm64.h12
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h96
-rw-r--r--compiler/utils/assembler.h4
-rw-r--r--compiler/utils/assembler_test.h4
-rw-r--r--compiler/utils/jni_macro_assembler.h10
-rw-r--r--compiler/utils/jni_macro_assembler_test.h4
-rw-r--r--compiler/utils/mips/assembler_mips.h102
-rw-r--r--compiler/utils/mips/assembler_mips32r5_test.cc24
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc26
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc18
-rw-r--r--compiler/utils/mips64/assembler_mips64.h100
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc26
-rw-r--r--compiler/utils/x86/assembler_x86.h6
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc18
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc4
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h94
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h6
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc24
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc4
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h92
-rw-r--r--compiler/verifier_deps_test.cc8
-rw-r--r--dex2oat/dex2oat.cc2
-rw-r--r--dex2oat/dex2oat_image_test.cc2
-rw-r--r--dex2oat/dex2oat_test.cc10
-rw-r--r--dex2oat/linker/arm/relative_patcher_arm_base.h8
-rw-r--r--dex2oat/linker/arm/relative_patcher_thumb2.h12
-rw-r--r--dex2oat/linker/arm64/relative_patcher_arm64.h18
-rw-r--r--dex2oat/linker/elf_writer_quick.cc36
-rw-r--r--dex2oat/linker/image_test.h4
-rw-r--r--dex2oat/linker/image_writer.cc16
-rw-r--r--dex2oat/linker/image_writer.h2
-rw-r--r--dex2oat/linker/mips/relative_patcher_mips.h16
-rw-r--r--dex2oat/linker/mips64/relative_patcher_mips64.h16
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher.h6
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher_test.cc10
-rw-r--r--dex2oat/linker/oat_writer.cc36
-rw-r--r--dex2oat/linker/relative_patcher.cc14
-rw-r--r--dex2oat/linker/relative_patcher_test.h10
-rw-r--r--dex2oat/linker/x86/relative_patcher_x86.h6
-rw-r--r--dex2oat/linker/x86/relative_patcher_x86_base.h10
-rw-r--r--dex2oat/linker/x86_64/relative_patcher_x86_64.h6
-rw-r--r--dexlayout/compact_dex_writer.h20
-rw-r--r--dexlayout/dex_container.h8
-rw-r--r--dexlayout/dex_ir.h38
-rw-r--r--dexlayout/dex_ir_builder.cc2
-rw-r--r--dexlayout/dex_writer.h6
-rw-r--r--dexoptanalyzer/dexoptanalyzer.cc2
-rw-r--r--disassembler/disassembler_arm.cc14
-rw-r--r--disassembler/disassembler_arm.h6
-rw-r--r--disassembler/disassembler_arm64.h14
-rw-r--r--disassembler/disassembler_mips.h6
-rw-r--r--disassembler/disassembler_x86.h6
-rw-r--r--imgdiag/imgdiag.cc12
-rw-r--r--imgdiag/imgdiag_test.cc2
-rw-r--r--libartbase/base/allocator.cc4
-rw-r--r--libartbase/base/arena_bit_vector.cc2
-rw-r--r--libartbase/base/bit_memory_region.h2
-rw-r--r--libartbase/base/common_art_test.h4
-rw-r--r--libartbase/base/dumpable.h2
-rw-r--r--libartbase/base/indenter.h4
-rw-r--r--libartbase/base/leb128.h2
-rw-r--r--libartbase/base/macros.h3
-rw-r--r--libartbase/base/malloc_arena_pool.cc2
-rw-r--r--libartbase/base/malloc_arena_pool.h14
-rw-r--r--libartbase/base/memory_region.h2
-rw-r--r--libartbase/base/unix_file/fd_file.h12
-rw-r--r--libdexfile/dex/art_dex_file_loader.cc10
-rw-r--r--libdexfile/dex/art_dex_file_loader.h4
-rw-r--r--libdexfile/dex/art_dex_file_loader_test.cc2
-rw-r--r--libdexfile/dex/compact_dex_file.h12
-rw-r--r--libdexfile/dex/dex_file_loader.cc10
-rw-r--r--libdexfile/dex/dex_instruction.h12
-rw-r--r--libdexfile/dex/standard_dex_file.h10
-rw-r--r--libprofile/profile/profile_compilation_info_test.cc2
-rw-r--r--oatdump/oatdump.cc12
-rw-r--r--openjdkjvmti/deopt_manager.h6
-rw-r--r--openjdkjvmti/events-inl.h2
-rw-r--r--openjdkjvmti/events.cc50
-rw-r--r--openjdkjvmti/object_tagging.h10
-rw-r--r--openjdkjvmti/ti_class.cc8
-rw-r--r--openjdkjvmti/ti_dump.cc2
-rw-r--r--openjdkjvmti/ti_heap.cc10
-rw-r--r--openjdkjvmti/ti_method.cc14
-rw-r--r--openjdkjvmti/ti_phase.cc2
-rw-r--r--openjdkjvmti/ti_redefine.cc4
-rw-r--r--openjdkjvmti/ti_search.cc2
-rw-r--r--openjdkjvmti/ti_stack.cc16
-rw-r--r--openjdkjvmti/ti_thread.cc4
-rw-r--r--openjdkjvmti/transform.cc4
-rw-r--r--patchoat/patchoat.cc8
-rw-r--r--profman/profile_assistant_test.cc2
-rw-r--r--profman/profman.cc2
-rw-r--r--runtime/aot_class_linker.h4
-rw-r--r--runtime/arch/arch_test.cc4
-rw-r--r--runtime/arch/arm/context_arm.h30
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.h14
-rw-r--r--runtime/arch/arm64/context_arm64.h30
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.h12
-rw-r--r--runtime/arch/mips/context_mips.h28
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.h12
-rw-r--r--runtime/arch/mips64/context_mips64.h28
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.h12
-rw-r--r--runtime/arch/stub_test.cc4
-rw-r--r--runtime/arch/x86/context_x86.h30
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.h12
-rw-r--r--runtime/arch/x86_64/context_x86_64.h30
-rw-r--r--runtime/arch/x86_64/instruction_set_features_x86_64.h6
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/base/mem_map_arena_pool.cc4
-rw-r--r--runtime/base/mem_map_arena_pool.h14
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h4
-rw-r--r--runtime/cha.cc8
-rw-r--r--runtime/class_linker.cc24
-rw-r--r--runtime/class_linker_test.cc4
-rw-r--r--runtime/common_runtime_test.h4
-rw-r--r--runtime/compiler_filter.h2
-rw-r--r--runtime/debugger.cc34
-rw-r--r--runtime/debugger.h22
-rw-r--r--runtime/dex2oat_environment_test.h6
-rw-r--r--runtime/dexopt_test.h4
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc36
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc4
-rw-r--r--runtime/fault_handler.h16
-rw-r--r--runtime/gc/accounting/mod_union_table-inl.h2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc4
-rw-r--r--runtime/gc/accounting/mod_union_table.h32
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc6
-rw-r--r--runtime/gc/allocation_record.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc18
-rw-r--r--runtime/gc/collector/concurrent_copying.h26
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc8
-rw-r--r--runtime/gc/collector/mark_sweep.cc12
-rw-r--r--runtime/gc/collector/mark_sweep.h20
-rw-r--r--runtime/gc/collector/partial_mark_sweep.h4
-rw-r--r--runtime/gc/collector/semi_space.h18
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h12
-rw-r--r--runtime/gc/heap.cc20
-rw-r--r--runtime/gc/heap_test.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space.h28
-rw-r--r--runtime/gc/space/dlmalloc_space.h42
-rw-r--r--runtime/gc/space/image_space.h6
-rw-r--r--runtime/gc/space/image_space_test.cc6
-rw-r--r--runtime/gc/space/large_object_space.cc14
-rw-r--r--runtime/gc/space/large_object_space.h44
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/memory_tool_malloc_space.h18
-rw-r--r--runtime/gc/space/region_space.h28
-rw-r--r--runtime/gc/space/rosalloc_space.h40
-rw-r--r--runtime/gc/space/space.h12
-rw-r--r--runtime/gc/space/zygote_space.h24
-rw-r--r--runtime/gc/system_weak.h6
-rw-r--r--runtime/gc/system_weak_test.cc8
-rw-r--r--runtime/gc/task_processor_test.cc6
-rw-r--r--runtime/gc/verification.cc2
-rw-r--r--runtime/gc_root.h4
-rw-r--r--runtime/handle_scope.h2
-rw-r--r--runtime/hidden_api_test.cc2
-rw-r--r--runtime/hprof/hprof.cc24
-rw-r--r--runtime/instrumentation.cc10
-rw-r--r--runtime/instrumentation_test.cc28
-rw-r--r--runtime/intern_table_test.cc2
-rw-r--r--runtime/java_frame_root_info.h4
-rw-r--r--runtime/jit/jit.cc8
-rw-r--r--runtime/jit/jit_code_cache.cc8
-rw-r--r--runtime/jit/profile_saver.cc2
-rw-r--r--runtime/jit/profiling_info_test.cc2
-rw-r--r--runtime/jni/java_vm_ext_test.cc4
-rw-r--r--runtime/jni/jni_internal_test.cc2
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/dex_cache.h2
-rw-r--r--runtime/mirror/dex_cache_test.cc2
-rw-r--r--runtime/mirror/iftable.h2
-rw-r--r--runtime/mirror/proxy.h2
-rw-r--r--runtime/mirror/stack_trace_element.h2
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/mirror/var_handle.cc26
-rw-r--r--runtime/monitor.cc12
-rw-r--r--runtime/monitor_objects_stack_visitor.h2
-rw-r--r--runtime/monitor_test.cc2
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc4
-rw-r--r--runtime/noop_compiler_callbacks.h10
-rw-r--r--runtime/oat_file.cc22
-rw-r--r--runtime/oat_file.h6
-rw-r--r--runtime/proxy_test.cc2
-rw-r--r--runtime/quick_exception_handler.cc12
-rw-r--r--runtime/runtime.cc2
-rw-r--r--runtime/runtime_callbacks_test.cc40
-rw-r--r--runtime/stack.cc6
-rw-r--r--runtime/thread.cc34
-rw-r--r--runtime/thread_list.cc4
-rw-r--r--runtime/trace.h24
-rw-r--r--runtime/transaction.h2
-rw-r--r--runtime/verifier/instruction_flags.h2
-rw-r--r--runtime/verifier/reg_type.h280
-rw-r--r--runtime/verifier/reg_type_test.cc2
-rw-r--r--simulator/code_simulator_arm64.h8
-rw-r--r--test/167-visit-locks/visit_locks.cc2
-rw-r--r--test/1945-proxy-method-arguments/get_args.cc4
-rw-r--r--test/203-multi-checkpoint/multi_checkpoint.cc4
-rw-r--r--test/305-other-fault-handler/fault_handler.cc4
-rw-r--r--test/616-cha-unloading/cha_unload.cc2
-rw-r--r--test/906-iterate-heap/iterate_heap.cc6
-rw-r--r--test/913-heaps/heaps.cc12
-rw-r--r--test/common/stack_inspect.cc2
-rw-r--r--test/ti-agent/ti_macros.h2
-rw-r--r--tools/art_verifier/art_verifier.cc10
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.h12
-rw-r--r--tools/dexanalyze/dexanalyze_strings.h4
-rw-r--r--tools/hiddenapi/hiddenapi.cc8
-rw-r--r--tools/tracefast-plugin/tracefast.cc30
-rw-r--r--tools/veridex/flow_analysis.h8
328 files changed, 3243 insertions, 3250 deletions
diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h
index 04e39bf4ff..c51f981f40 100644
--- a/adbconnection/adbconnection.h
+++ b/adbconnection/adbconnection.h
@@ -46,12 +46,12 @@ struct AdbConnectionDebuggerController : public art::DebuggerControlCallback {
: connection_(connection) {}
// Begin running the debugger.
- void StartDebugger() OVERRIDE;
+ void StartDebugger() override;
// The debugger should begin shutting down since the runtime is ending.
- void StopDebugger() OVERRIDE;
+ void StopDebugger() override;
- bool IsDebuggerConfigured() OVERRIDE;
+ bool IsDebuggerConfigured() override;
private:
AdbConnectionState* connection_;
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 366489c58f..e6d1564621 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -57,9 +57,9 @@ class CommonCompilerTest : public CommonRuntimeTest {
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
- void SetUp() OVERRIDE;
+ void SetUp() override;
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE;
+ void SetUpRuntimeOptions(RuntimeOptions* options) override;
Compiler::Kind GetCompilerKind() const;
void SetCompilerKind(Compiler::Kind compiler_kind);
@@ -73,7 +73,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
return CompilerFilter::kDefaultCompilerFilter;
}
- void TearDown() OVERRIDE;
+ void TearDown() override;
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index f88028034d..864ce585cf 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -104,7 +104,7 @@ class CompiledCode {
uint32_t packed_fields_;
};
-class CompiledMethod FINAL : public CompiledCode {
+class CompiledMethod final : public CompiledCode {
public:
// Constructs a CompiledMethod.
// Note: Consider using the static allocation methods below that will allocate the CompiledMethod
diff --git a/compiler/debug/dwarf/debug_abbrev_writer.h b/compiler/debug/dwarf/debug_abbrev_writer.h
index cccca255c1..63a049b2cc 100644
--- a/compiler/debug/dwarf/debug_abbrev_writer.h
+++ b/compiler/debug/dwarf/debug_abbrev_writer.h
@@ -37,7 +37,7 @@ namespace dwarf {
// determines all the attributes and their format.
// It is possible to think of them as type definitions.
template <typename Vector = std::vector<uint8_t>>
-class DebugAbbrevWriter FINAL : private Writer<Vector> {
+class DebugAbbrevWriter final : private Writer<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
public:
diff --git a/compiler/debug/dwarf/debug_info_entry_writer.h b/compiler/debug/dwarf/debug_info_entry_writer.h
index 89d16f2b2a..b198178d5d 100644
--- a/compiler/debug/dwarf/debug_info_entry_writer.h
+++ b/compiler/debug/dwarf/debug_info_entry_writer.h
@@ -42,7 +42,7 @@ namespace dwarf {
* EndTag();
*/
template <typename Vector = std::vector<uint8_t>>
-class DebugInfoEntryWriter FINAL : private Writer<Vector> {
+class DebugInfoEntryWriter final : private Writer<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
public:
diff --git a/compiler/debug/dwarf/debug_line_opcode_writer.h b/compiler/debug/dwarf/debug_line_opcode_writer.h
index b4a4d63f01..bb4e87ff7e 100644
--- a/compiler/debug/dwarf/debug_line_opcode_writer.h
+++ b/compiler/debug/dwarf/debug_line_opcode_writer.h
@@ -31,7 +31,7 @@ namespace dwarf {
// * Keep track of current state and convert absolute values to deltas.
// * Divide by header-defined factors as appropriate.
template<typename Vector = std::vector<uint8_t>>
-class DebugLineOpCodeWriter FINAL : private Writer<Vector> {
+class DebugLineOpCodeWriter final : private Writer<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
public:
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 8a07e9c12c..b7117bd223 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -26,7 +26,7 @@ class CompilerDriver;
class DexFile;
class VerificationResults;
-class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
+class QuickCompilerCallbacks final : public CompilerCallbacks {
public:
explicit QuickCompilerCallbacks(CompilerCallbacks::CallbackMode mode)
: CompilerCallbacks(mode), dex_files_(nullptr) {}
@@ -34,20 +34,20 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
~QuickCompilerCallbacks() { }
void MethodVerified(verifier::MethodVerifier* verifier)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) override;
- void ClassRejected(ClassReference ref) OVERRIDE;
+ void ClassRejected(ClassReference ref) override;
// We are running in an environment where we can call patchoat safely so we should.
- bool IsRelocationPossible() OVERRIDE {
+ bool IsRelocationPossible() override {
return true;
}
- verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
+ verifier::VerifierDeps* GetVerifierDeps() const override {
return verifier_deps_.get();
}
- void SetVerifierDeps(verifier::VerifierDeps* deps) OVERRIDE {
+ void SetVerifierDeps(verifier::VerifierDeps* deps) override {
verifier_deps_.reset(deps);
}
@@ -55,18 +55,18 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
verification_results_ = verification_results;
}
- ClassStatus GetPreviousClassState(ClassReference ref) OVERRIDE;
+ ClassStatus GetPreviousClassState(ClassReference ref) override;
void SetDoesClassUnloading(bool does_class_unloading, CompilerDriver* compiler_driver)
- OVERRIDE {
+ override {
does_class_unloading_ = does_class_unloading;
compiler_driver_ = compiler_driver;
DCHECK(!does_class_unloading || compiler_driver_ != nullptr);
}
- void UpdateClassState(ClassReference ref, ClassStatus state) OVERRIDE;
+ void UpdateClassState(ClassReference ref, ClassStatus state) override;
- bool CanUseOatStatusForVerification(mirror::Class* klass) OVERRIDE
+ bool CanUseOatStatusForVerification(mirror::Class* klass) override
REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexFiles(const std::vector<const DexFile*>* dex_files) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6eca304223..fd7a35f16a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -971,7 +971,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
ResolveCatchBlockExceptionsClassVisitor() : classes_() {}
- virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ virtual bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
classes_.push_back(c);
return true;
}
@@ -1034,7 +1034,7 @@ class RecordImageClassesVisitor : public ClassVisitor {
explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
: image_classes_(image_classes) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
image_classes_->insert(klass->GetDescriptor(&temp));
return true;
@@ -1210,7 +1210,7 @@ class ClinitImageUpdate {
: data_(data),
hs_(hs) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
StringPiece name(klass->GetDescriptor(&temp));
if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
@@ -1475,7 +1475,7 @@ class ParallelCompilationManager {
end_(end),
fn_(fn) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
while (true) {
const size_t index = manager_->NextIndex();
if (UNLIKELY(index >= end_)) {
@@ -1486,7 +1486,7 @@ class ParallelCompilationManager {
}
}
- void Finalize() OVERRIDE {
+ void Finalize() override {
delete this;
}
@@ -1568,7 +1568,7 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
: manager_(manager) {}
- void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ void Visit(size_t class_def_index) override REQUIRES(!Locks::mutator_lock_) {
ScopedTrace trace(__FUNCTION__);
Thread* const self = Thread::Current();
jobject jclass_loader = manager_->GetClassLoader();
@@ -1667,7 +1667,7 @@ class ResolveTypeVisitor : public CompilationVisitor {
public:
explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) {
}
- void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ void Visit(size_t type_idx) override REQUIRES(!Locks::mutator_lock_) {
// Class derived values are more complicated, they require the linker and loader.
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = manager_->GetClassLinker();
@@ -1888,7 +1888,7 @@ class VerifyClassVisitor : public CompilationVisitor {
VerifyClassVisitor(const ParallelCompilationManager* manager, verifier::HardFailLogMode log_level)
: manager_(manager), log_level_(log_level) {}
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2020,7 +2020,7 @@ class SetVerifiedClassVisitor : public CompilationVisitor {
public:
explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2085,7 +2085,7 @@ class InitializeClassVisitor : public CompilationVisitor {
public:
explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- void Visit(size_t class_def_index) OVERRIDE {
+ void Visit(size_t class_def_index) override {
ScopedTrace trace(__FUNCTION__);
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2470,7 +2470,7 @@ class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor
explicit InitializeArrayClassesAndCreateConflictTablesVisitor(VariableSizedHandleScope& hs)
: hs_(hs) {}
- virtual bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE
+ virtual bool operator()(ObjPtr<mirror::Class> klass) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 2eeb4399db..fe1568da83 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -186,7 +186,7 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
class CompilerDriverProfileTest : public CompilerDriverTest {
protected:
- ProfileCompilationInfo* GetProfileCompilationInfo() OVERRIDE {
+ ProfileCompilationInfo* GetProfileCompilationInfo() override {
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
@@ -200,7 +200,7 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
return &profile_info_;
}
- CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+ CompilerFilter::Filter GetCompilerFilter() const override {
// Use a profile based filter.
return CompilerFilter::kSpeedProfile;
}
@@ -278,7 +278,7 @@ TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) {
// which will be used for OatClass.
class CompilerDriverVerifyTest : public CompilerDriverTest {
protected:
- CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+ CompilerFilter::Filter GetCompilerFilter() const override {
return CompilerFilter::kVerify;
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 601c9140dd..34aceba1c4 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -43,7 +43,7 @@ class DexFile;
enum class InstructionSet;
class InstructionSetFeatures;
-class CompilerOptions FINAL {
+class CompilerOptions final {
public:
// Guide heuristics to determine whether to compile method if profile data not available.
static const size_t kDefaultHugeMethodThreshold = 10000;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3cb4a652ad..92b9543c27 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -221,12 +221,12 @@ struct jni_remove_extra_parameters : public remove_extra_parameters_helper<T, fn
class JniCompilerTest : public CommonCompilerTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonCompilerTest::SetUp();
check_generic_jni_ = false;
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
android::ResetNativeLoader();
CommonCompilerTest::TearDown();
}
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 249f20225d..b327898483 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -25,24 +25,24 @@ namespace arm {
constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
-class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class ArmManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k32) {}
- ~ArmManagedRuntimeCallingConvention() OVERRIDE {}
+ ~ArmManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -50,37 +50,37 @@ class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
};
-class ArmJniCallingConvention FINAL : public JniCallingConvention {
+class ArmJniCallingConvention final : public JniCallingConvention {
public:
ArmJniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~ArmJniCallingConvention() OVERRIDE {}
+ ~ArmJniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- void Next() OVERRIDE; // Override default behavior for AAPCS
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ void Next() override; // Override default behavior for AAPCS
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// AAPCS mandates return values are extended.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return false;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
// Padding to ensure longs and doubles are not split in AAPCS
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 56189427b6..ed0ddeb1b2 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -25,24 +25,24 @@ namespace arm64 {
constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
-class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Arm64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k64) {}
- ~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~Arm64ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -50,36 +50,36 @@ class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingC
DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
};
-class Arm64JniCallingConvention FINAL : public JniCallingConvention {
+class Arm64JniCallingConvention final : public JniCallingConvention {
public:
Arm64JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~Arm64JniCallingConvention() OVERRIDE {}
+ ~Arm64JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// aarch64 calling convention leaves upper bits undefined.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return true;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention);
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index ad3f118bad..165fc6056e 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -27,24 +27,24 @@ constexpr size_t kFramePointerSize = 4;
static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
"Invalid frame pointer size");
-class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class MipsManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k32) {}
- ~MipsManagedRuntimeCallingConvention() OVERRIDE {}
+ ~MipsManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -52,37 +52,37 @@ class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCo
DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
};
-class MipsJniCallingConvention FINAL : public JniCallingConvention {
+class MipsJniCallingConvention final : public JniCallingConvention {
public:
MipsJniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~MipsJniCallingConvention() OVERRIDE {}
+ ~MipsJniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- void Next() OVERRIDE; // Override default behavior for o32.
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ void Next() override; // Override default behavior for o32.
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// Mips does not need to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return false;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
// Padding to ensure longs and doubles are not split in o32.
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index faedaeff6c..d87f73a1ea 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -27,24 +27,24 @@ constexpr size_t kFramePointerSize = 8;
static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
"Invalid frame pointer size");
-class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Mips64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k64) {}
- ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~Mips64ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -52,36 +52,36 @@ class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCalling
DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
};
-class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+class Mips64JniCallingConvention final : public JniCallingConvention {
public:
Mips64JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~Mips64JniCallingConvention() OVERRIDE {}
+ ~Mips64JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// Mips64 does not need to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return false;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index be83cdaad0..d0c6198e77 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -25,7 +25,7 @@ namespace x86 {
constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
-class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
@@ -33,17 +33,17 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
shorty,
PointerSize::k32),
gpr_arg_count_(0) {}
- ~X86ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~X86ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
int gpr_arg_count_;
@@ -53,36 +53,36 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
};
// Implements the x86 cdecl calling convention.
-class X86JniCallingConvention FINAL : public JniCallingConvention {
+class X86JniCallingConvention final : public JniCallingConvention {
public:
X86JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~X86JniCallingConvention() OVERRIDE {}
+ ~X86JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// x86 needs to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return true;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index cdba334d81..dfab41b154 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -23,59 +23,59 @@
namespace art {
namespace x86_64 {
-class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86_64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k64) {}
- ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~X86_64ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
};
-class X86_64JniCallingConvention FINAL : public JniCallingConvention {
+class X86_64JniCallingConvention final : public JniCallingConvention {
public:
X86_64JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~X86_64JniCallingConvention() OVERRIDE {}
+ ~X86_64JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// x86-64 needs to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return true;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
diff --git a/compiler/linker/buffered_output_stream.h b/compiler/linker/buffered_output_stream.h
index 512409cb2f..cb1c44ba23 100644
--- a/compiler/linker/buffered_output_stream.h
+++ b/compiler/linker/buffered_output_stream.h
@@ -26,17 +26,17 @@
namespace art {
namespace linker {
-class BufferedOutputStream FINAL : public OutputStream {
+class BufferedOutputStream final : public OutputStream {
public:
explicit BufferedOutputStream(std::unique_ptr<OutputStream> out);
- ~BufferedOutputStream() OVERRIDE;
+ ~BufferedOutputStream() override;
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+ bool WriteFully(const void* buffer, size_t byte_count) override;
- off_t Seek(off_t offset, Whence whence) OVERRIDE;
+ off_t Seek(off_t offset, Whence whence) override;
- bool Flush() OVERRIDE;
+ bool Flush() override;
private:
static const size_t kBufferSize = 8 * KB;
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 974c590a65..81ecc175b5 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -75,7 +75,7 @@ namespace linker {
// The debug sections are written last for easier stripping.
//
template <typename ElfTypes>
-class ElfBuilder FINAL {
+class ElfBuilder final {
public:
static constexpr size_t kMaxProgramHeaders = 16;
// SHA-1 digest. Not using SHA_DIGEST_LENGTH from openssl/sha.h to avoid
@@ -173,21 +173,21 @@ class ElfBuilder FINAL {
// This function always succeeds to simplify code.
// Use builder's Good() to check the actual status.
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
CHECK(owner_->current_section_ == this);
return owner_->stream_.WriteFully(buffer, byte_count);
}
// This function always succeeds to simplify code.
// Use builder's Good() to check the actual status.
- off_t Seek(off_t offset, Whence whence) OVERRIDE {
+ off_t Seek(off_t offset, Whence whence) override {
// Forward the seek as-is and trust the caller to use it reasonably.
return owner_->stream_.Seek(offset, whence);
}
// This function flushes the output and returns whether it succeeded.
// If there was a previous failure, this does nothing and returns false, i.e. failed.
- bool Flush() OVERRIDE {
+ bool Flush() override {
return owner_->stream_.Flush();
}
@@ -271,7 +271,7 @@ class ElfBuilder FINAL {
};
// Writer of .dynstr section.
- class CachedStringSection FINAL : public CachedSection {
+ class CachedStringSection final : public CachedSection {
public:
CachedStringSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
@@ -295,7 +295,7 @@ class ElfBuilder FINAL {
};
// Writer of .strtab and .shstrtab sections.
- class StringSection FINAL : public Section {
+ class StringSection final : public Section {
public:
StringSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
@@ -338,7 +338,7 @@ class ElfBuilder FINAL {
};
// Writer of .dynsym and .symtab sections.
- class SymbolSection FINAL : public Section {
+ class SymbolSection final : public Section {
public:
SymbolSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
@@ -410,7 +410,7 @@ class ElfBuilder FINAL {
std::vector<Elf_Sym> syms_; // Buffered/cached content of the whole section.
};
- class AbiflagsSection FINAL : public Section {
+ class AbiflagsSection final : public Section {
public:
// Section with Mips abiflag info.
static constexpr uint8_t MIPS_AFL_REG_NONE = 0; // no registers
@@ -480,7 +480,7 @@ class ElfBuilder FINAL {
} abiflags_;
};
- class BuildIdSection FINAL : public Section {
+ class BuildIdSection final : public Section {
public:
BuildIdSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
diff --git a/compiler/linker/error_delaying_output_stream.h b/compiler/linker/error_delaying_output_stream.h
index 659f1dc093..cadd71c3f0 100644
--- a/compiler/linker/error_delaying_output_stream.h
+++ b/compiler/linker/error_delaying_output_stream.h
@@ -27,7 +27,7 @@ namespace art {
namespace linker {
// OutputStream wrapper that delays reporting an error until Flush().
-class ErrorDelayingOutputStream FINAL : public OutputStream {
+class ErrorDelayingOutputStream final : public OutputStream {
public:
explicit ErrorDelayingOutputStream(OutputStream* output)
: OutputStream(output->GetLocation()),
@@ -37,7 +37,7 @@ class ErrorDelayingOutputStream FINAL : public OutputStream {
// This function always succeeds to simplify code.
// Use Good() to check the actual status of the output stream.
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
if (output_good_) {
if (!output_->WriteFully(buffer, byte_count)) {
PLOG(ERROR) << "Failed to write " << byte_count
@@ -51,7 +51,7 @@ class ErrorDelayingOutputStream FINAL : public OutputStream {
// This function always succeeds to simplify code.
// Use Good() to check the actual status of the output stream.
- off_t Seek(off_t offset, Whence whence) OVERRIDE {
+ off_t Seek(off_t offset, Whence whence) override {
// We keep shadow copy of the offset so that we return
// the expected value even if the output stream failed.
off_t new_offset;
@@ -81,7 +81,7 @@ class ErrorDelayingOutputStream FINAL : public OutputStream {
// Flush the output and return whether all operations have succeeded.
// Do nothing if we already have a pending error.
- bool Flush() OVERRIDE {
+ bool Flush() override {
if (output_good_) {
output_good_ = output_->Flush();
}
diff --git a/compiler/linker/file_output_stream.h b/compiler/linker/file_output_stream.h
index deb051fca4..1417132981 100644
--- a/compiler/linker/file_output_stream.h
+++ b/compiler/linker/file_output_stream.h
@@ -24,17 +24,17 @@
namespace art {
namespace linker {
-class FileOutputStream FINAL : public OutputStream {
+class FileOutputStream final : public OutputStream {
public:
explicit FileOutputStream(File* file);
- ~FileOutputStream() OVERRIDE {}
+ ~FileOutputStream() override {}
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+ bool WriteFully(const void* buffer, size_t byte_count) override;
- off_t Seek(off_t offset, Whence whence) OVERRIDE;
+ off_t Seek(off_t offset, Whence whence) override;
- bool Flush() OVERRIDE;
+ bool Flush() override;
private:
File* const file_;
diff --git a/compiler/linker/output_stream_test.cc b/compiler/linker/output_stream_test.cc
index f93ea7a709..bcb129c2da 100644
--- a/compiler/linker/output_stream_test.cc
+++ b/compiler/linker/output_stream_test.cc
@@ -106,20 +106,20 @@ TEST_F(OutputStreamTest, BufferedFlush) {
CheckingOutputStream()
: OutputStream("dummy"),
flush_called(false) { }
- ~CheckingOutputStream() OVERRIDE {}
+ ~CheckingOutputStream() override {}
bool WriteFully(const void* buffer ATTRIBUTE_UNUSED,
- size_t byte_count ATTRIBUTE_UNUSED) OVERRIDE {
+ size_t byte_count ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
- off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) OVERRIDE {
+ off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
- bool Flush() OVERRIDE {
+ bool Flush() override {
flush_called = true;
return true;
}
diff --git a/compiler/linker/vector_output_stream.h b/compiler/linker/vector_output_stream.h
index 92caf596ab..0d34da6cba 100644
--- a/compiler/linker/vector_output_stream.h
+++ b/compiler/linker/vector_output_stream.h
@@ -26,13 +26,13 @@
namespace art {
namespace linker {
-class VectorOutputStream FINAL : public OutputStream {
+class VectorOutputStream final : public OutputStream {
public:
VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector);
- ~VectorOutputStream() OVERRIDE {}
+ ~VectorOutputStream() override {}
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
if (static_cast<size_t>(offset_) == vector_->size()) {
const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer);
vector_->insert(vector_->end(), &start[0], &start[byte_count]);
@@ -46,9 +46,9 @@ class VectorOutputStream FINAL : public OutputStream {
return true;
}
- off_t Seek(off_t offset, Whence whence) OVERRIDE;
+ off_t Seek(off_t offset, Whence whence) override;
- bool Flush() OVERRIDE {
+ bool Flush() override {
return true;
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index dfefa524bf..1c3660c0a7 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -388,10 +388,10 @@ class MonotonicValueRange : public ValueRange {
return induction_variable_->GetBlock();
}
- MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
+ MonotonicValueRange* AsMonotonicValueRange() override { return this; }
// If it's certain that this value range fits in other_range.
- bool FitsIn(ValueRange* other_range) const OVERRIDE {
+ bool FitsIn(ValueRange* other_range) const override {
if (other_range == nullptr) {
return true;
}
@@ -402,7 +402,7 @@ class MonotonicValueRange : public ValueRange {
// Try to narrow this MonotonicValueRange given another range.
// Ideally it will return a normal ValueRange. But due to
// possible overflow/underflow, that may not be possible.
- ValueRange* Narrow(ValueRange* range) OVERRIDE {
+ ValueRange* Narrow(ValueRange* range) override {
if (range == nullptr) {
return this;
}
@@ -530,7 +530,7 @@ class BCEVisitor : public HGraphVisitor {
induction_range_(induction_analysis),
next_(nullptr) {}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
DCHECK(!IsAddedBlock(block));
first_index_bounds_check_map_.clear();
// Visit phis and instructions using a safe iterator. The iteration protects
@@ -820,7 +820,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+ void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
HBasicBlock* block = bounds_check->GetBlock();
HInstruction* index = bounds_check->InputAt(0);
HInstruction* array_length = bounds_check->InputAt(1);
@@ -945,7 +945,7 @@ class BCEVisitor : public HGraphVisitor {
return true;
}
- void VisitPhi(HPhi* phi) OVERRIDE {
+ void VisitPhi(HPhi* phi) override {
if (phi->IsLoopHeaderPhi()
&& (phi->GetType() == DataType::Type::kInt32)
&& HasSameInputAtBackEdges(phi)) {
@@ -992,14 +992,14 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitIf(HIf* instruction) OVERRIDE {
+ void VisitIf(HIf* instruction) override {
if (instruction->InputAt(0)->IsCondition()) {
HCondition* cond = instruction->InputAt(0)->AsCondition();
HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition());
}
}
- void VisitAdd(HAdd* add) OVERRIDE {
+ void VisitAdd(HAdd* add) override {
HInstruction* right = add->GetRight();
if (right->IsIntConstant()) {
ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock());
@@ -1013,7 +1013,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitSub(HSub* sub) OVERRIDE {
+ void VisitSub(HSub* sub) override {
HInstruction* left = sub->GetLeft();
HInstruction* right = sub->GetRight();
if (right->IsIntConstant()) {
@@ -1115,19 +1115,19 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitDiv(HDiv* div) OVERRIDE {
+ void VisitDiv(HDiv* div) override {
FindAndHandlePartialArrayLength(div);
}
- void VisitShr(HShr* shr) OVERRIDE {
+ void VisitShr(HShr* shr) override {
FindAndHandlePartialArrayLength(shr);
}
- void VisitUShr(HUShr* ushr) OVERRIDE {
+ void VisitUShr(HUShr* ushr) override {
FindAndHandlePartialArrayLength(ushr);
}
- void VisitAnd(HAnd* instruction) OVERRIDE {
+ void VisitAnd(HAnd* instruction) override {
if (instruction->GetRight()->IsIntConstant()) {
int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
if (constant > 0) {
@@ -1142,7 +1142,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitRem(HRem* instruction) OVERRIDE {
+ void VisitRem(HRem* instruction) override {
HInstruction* left = instruction->GetLeft();
HInstruction* right = instruction->GetRight();
@@ -1202,7 +1202,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitNewArray(HNewArray* new_array) OVERRIDE {
+ void VisitNewArray(HNewArray* new_array) override {
HInstruction* len = new_array->GetLength();
if (!len->IsIntConstant()) {
HInstruction *left;
@@ -1240,7 +1240,7 @@ class BCEVisitor : public HGraphVisitor {
* has occurred (see AddCompareWithDeoptimization()), since in those cases it would be
* unsafe to hoist array references across their deoptimization instruction inside a loop.
*/
- void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+ void VisitArrayGet(HArrayGet* array_get) override {
if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) {
HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 92ab7984c8..ef08877daa 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -34,7 +34,7 @@ class BoundsCheckElimination : public HOptimization {
side_effects_(side_effects),
induction_analysis_(induction_analysis) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kBoundsCheckEliminationPassName = "BCE";
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index bdc395b52d..c6232ef661 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -44,9 +44,9 @@ class CHAGuardVisitor : HGraphVisitor {
GetGraph()->SetNumberOfCHAGuards(0);
}
- void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) OVERRIDE;
+ void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) override;
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+ void VisitBasicBlock(HBasicBlock* block) override;
private:
void RemoveGuard(HShouldDeoptimizeFlag* flag);
diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h
index d2c5a344b7..440d51a969 100644
--- a/compiler/optimizing/cha_guard_optimization.h
+++ b/compiler/optimizing/cha_guard_optimization.h
@@ -30,7 +30,7 @@ class CHAGuardOptimization : public HOptimization {
const char* name = kCHAGuardOptimizationPassName)
: HOptimization(graph, name) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization";
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a460f77132..d56f7aaca1 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -247,7 +247,7 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -273,9 +273,9 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
@@ -285,16 +285,16 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
@@ -308,7 +308,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -349,7 +349,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathARM64"; }
private:
// The class this slow path will load.
@@ -363,7 +363,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
explicit LoadStringSlowPathARM64(HLoadString* instruction)
: SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -384,7 +384,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
@@ -394,7 +394,7 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -408,9 +408,9 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
@@ -421,7 +421,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeARM64(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
@@ -445,7 +445,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathARM64"; }
private:
// If not null, the block to branch to after the suspend check.
@@ -462,7 +462,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
: SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(instruction_->IsCheckCast()
@@ -503,8 +503,8 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathARM64"; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -517,7 +517,7 @@ class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
: SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -529,7 +529,7 @@ class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
@@ -539,7 +539,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -570,7 +570,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
@@ -628,7 +628,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
@@ -754,7 +754,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
+ const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -794,7 +794,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
DCHECK(locations->CanCall());
@@ -831,7 +831,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARM64"; }
private:
const Location out_;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 4f6a44fe4d..2e7a20b553 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -125,8 +125,8 @@ class SlowPathCodeARM64 : public SlowPathCode {
vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
- void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
- void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
private:
vixl::aarch64::Label entry_label_;
@@ -216,11 +216,11 @@ class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConvention
InvokeDexCallingConventionVisitorARM64() {}
virtual ~InvokeDexCallingConventionVisitorARM64() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type return_type) const OVERRIDE {
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type return_type) const override {
return calling_convention.GetReturnLocation(return_type);
}
- Location GetMethodLocation() const OVERRIDE;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -232,22 +232,22 @@ class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionARM64() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return helpers::LocationFrom(vixl::aarch64::x1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return helpers::LocationFrom(vixl::aarch64::x0);
}
- Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return helpers::LocationFrom(vixl::aarch64::x0);
}
Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
- bool is_instance) const OVERRIDE {
+ bool is_instance) const override {
return is_instance
? helpers::LocationFrom(vixl::aarch64::x2)
: helpers::LocationFrom(vixl::aarch64::x1);
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return helpers::LocationFrom(vixl::aarch64::d0);
}
@@ -260,7 +260,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -268,7 +268,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -360,7 +360,7 @@ class LocationsBuilderARM64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -368,7 +368,7 @@ class LocationsBuilderARM64 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -393,11 +393,11 @@ class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
: ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
protected:
- void PrepareForEmitNativeCode() OVERRIDE;
- void FinishEmitNativeCode() OVERRIDE;
- Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE;
- void FreeScratchLocation(Location loc) OVERRIDE;
- void EmitMove(size_t index) OVERRIDE;
+ void PrepareForEmitNativeCode() override;
+ void FinishEmitNativeCode() override;
+ Location AllocateScratchLocationFor(Location::Kind kind) override;
+ void FreeScratchLocation(Location loc) override;
+ void EmitMove(size_t index) override;
private:
Arm64Assembler* GetAssembler() const;
@@ -418,39 +418,39 @@ class CodeGeneratorARM64 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARM64() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
- void Bind(HBasicBlock* block) OVERRIDE;
+ void Bind(HBasicBlock* block) override;
vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
block = FirstNonEmptyBlock(block);
return &(block_labels_[block->GetBlockId()]);
}
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return kArm64WordSize;
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kArm64WordSize // 16 bytes == 2 arm64 words for each spill
: 1 * kArm64WordSize; // 8 bytes == 1 arm64 words for each spill
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
return block_entry_label->GetLocation();
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
- Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
- const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+ Arm64Assembler* GetAssembler() override { return &assembler_; }
+ const Arm64Assembler& GetAssembler() const override { return assembler_; }
vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
// Emit a write barrier.
@@ -462,12 +462,12 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Register allocation.
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
// The number of registers that can be allocated. The register allocator may
// decide to reserve and not use a few of them.
@@ -479,35 +479,35 @@ class CodeGeneratorARM64 : public CodeGenerator {
static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
static constexpr int kNumberOfAllocatableRegisterPairs = 0;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm64;
}
const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const;
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_.resize(GetGraph()->GetBlocks().size());
}
// We want to use the STP and LDP instructions to spill and restore registers for slow paths.
// These instructions can only encode offsets that are multiples of the register size accessed.
- uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
+ uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; }
JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
return jump_tables_.back().get();
}
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Code generation helpers.
void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
void Load(DataType::Type type,
vixl::aarch64::CPURegister dst,
@@ -529,7 +529,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -537,35 +537,35 @@ class CodeGeneratorARM64 : public CodeGenerator {
HInstruction* instruction,
SlowPathCode* slow_path);
- ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+ ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; }
- bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
return false;
}
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
- DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ DataType::Type type ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL);
}
@@ -652,13 +652,13 @@ class CodeGeneratorARM64 : public CodeGenerator {
void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
void EmitThunkCode(const linker::LinkerPatch& patch,
/*out*/ ArenaVector<uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE;
+ /*out*/ std::string* debug_name) override;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Generate a GC root reference load:
//
@@ -765,10 +765,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
- void GenerateNop() OVERRIDE;
+ void GenerateNop() override;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
private:
// Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 8c5eafd0bb..3580975c62 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -383,7 +383,7 @@ class NullCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
explicit NullCheckSlowPathARMVIXL(HNullCheck* instruction) : SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -397,9 +397,9 @@ class NullCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARMVIXL);
@@ -410,16 +410,16 @@ class DivZeroCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL);
@@ -430,7 +430,7 @@ class SuspendCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
SuspendCheckSlowPathARMVIXL(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeARMVIXL(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
@@ -451,7 +451,7 @@ class SuspendCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathARMVIXL"; }
private:
// If not null, the block to branch to after the suspend check.
@@ -468,7 +468,7 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
@@ -495,9 +495,9 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL);
@@ -511,7 +511,7 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -549,7 +549,7 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathARMVIXL"; }
private:
// The class this slow path will load.
@@ -563,7 +563,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit LoadStringSlowPathARMVIXL(HLoadString* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
@@ -585,7 +585,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARMVIXL);
@@ -596,7 +596,7 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal)
: SlowPathCodeARMVIXL(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -640,9 +640,9 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathARMVIXL"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -655,7 +655,7 @@ class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -668,7 +668,7 @@ class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL);
@@ -678,7 +678,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -709,7 +709,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL);
@@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
vixl32::Register reg_out = RegisterFrom(out_);
@@ -868,7 +868,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierForHeapReferenceSlowPathARMVIXL";
}
@@ -910,7 +910,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
vixl32::Register reg_out = RegisterFrom(out_);
DCHECK(locations->CanCall());
@@ -936,7 +936,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARMVIXL"; }
private:
const Location out_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index cb131a7ac1..33502d4f68 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -178,9 +178,9 @@ class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventi
InvokeDexCallingConventionVisitorARMVIXL() {}
virtual ~InvokeDexCallingConventionVisitorARMVIXL() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConventionARMVIXL calling_convention;
@@ -193,25 +193,25 @@ class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention
public:
FieldAccessCallingConventionARMVIXL() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return helpers::LocationFrom(vixl::aarch32::r1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return helpers::LocationFrom(vixl::aarch32::r0);
}
- Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
: helpers::LocationFrom(vixl::aarch32::r0);
}
- Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+ Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
return DataType::Is64BitType(type)
? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
: (is_instance
? helpers::LocationFrom(vixl::aarch32::r2)
: helpers::LocationFrom(vixl::aarch32::r1));
}
- Location GetFpuLocation(DataType::Type type) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
: helpers::LocationFrom(vixl::aarch32::s0);
@@ -229,8 +229,8 @@ class SlowPathCodeARMVIXL : public SlowPathCode {
vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
- void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
- void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
private:
vixl::aarch32::Label entry_label_;
@@ -244,10 +244,10 @@ class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap {
ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
ArmVIXLAssembler* GetAssembler() const;
@@ -266,7 +266,7 @@ class LocationsBuilderARMVIXL : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -274,7 +274,7 @@ class LocationsBuilderARMVIXL : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -304,7 +304,7 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -312,7 +312,7 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -432,48 +432,48 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARMVIXL() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
- void Bind(HBasicBlock* block) OVERRIDE;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
+ void Bind(HBasicBlock* block) override;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return static_cast<size_t>(kArmPointerSize);
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
+ size_t GetFloatingPointSpillSlotSize() const override { return vixl::aarch32::kRegSizeInBytes; }
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
- ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+ ArmVIXLAssembler* GetAssembler() override { return &assembler_; }
- const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+ const ArmVIXLAssembler& GetAssembler() const override { return assembler_; }
ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
vixl::aarch32::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
return block_entry_label->GetLocation();
}
void FixJumpTables();
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
- InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
+ ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
+ InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; }
const ArmInstructionSetFeatures& GetInstructionSetFeatures() const;
@@ -495,7 +495,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -519,42 +519,42 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label);
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_.resize(GetGraph()->GetBlocks().size());
}
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
- bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type) const override {
return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
}
- void ComputeSpillMask() OVERRIDE;
+ void ComputeSpillMask() override;
vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void MoveFromReturnRegister(Location trg, DataType::Type type) override;
// The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
// whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
@@ -604,13 +604,13 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
void EmitThunkCode(const linker::LinkerPatch& patch,
/*out*/ ArenaVector<uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE;
+ /*out*/ std::string* debug_name) override;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Generate a GC root reference load:
//
@@ -722,10 +722,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
- void GenerateNop() OVERRIDE;
+ void GenerateNop() override;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index aed334b024..d74a7a760f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -176,7 +176,7 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
@@ -201,9 +201,9 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
@@ -213,16 +213,16 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
@@ -236,7 +236,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -280,7 +280,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathMIPS"; }
private:
// The class this slow path will load.
@@ -294,7 +294,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
explicit LoadStringSlowPathMIPS(HLoadString* instruction)
: SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
@@ -318,7 +318,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
@@ -328,7 +328,7 @@ class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -342,9 +342,9 @@ class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
@@ -355,7 +355,7 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeMIPS(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
@@ -375,7 +375,7 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
return &return_label_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS"; }
HBasicBlock* GetSuccessor() const {
return successor_;
@@ -396,7 +396,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
: SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
@@ -435,9 +435,9 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathMIPS"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -450,7 +450,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
: SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -462,7 +462,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
@@ -472,7 +472,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -503,7 +503,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
@@ -533,9 +533,9 @@ class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -627,11 +627,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -798,7 +798,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
@@ -922,7 +922,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -965,7 +965,7 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -995,7 +995,7 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS"; }
private:
const Location out_;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 4830ac9bc6..bf9589331b 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -81,9 +81,9 @@ class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionV
InvokeDexCallingConventionVisitorMIPS() {}
virtual ~InvokeDexCallingConventionVisitorMIPS() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -110,23 +110,23 @@ class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionMIPS() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(A1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(A0);
}
- Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? Location::RegisterPairLocation(V0, V1)
: Location::RegisterLocation(V0);
}
- Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+ Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
return DataType::Is64BitType(type)
? Location::RegisterPairLocation(A2, A3)
: (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(F0);
}
@@ -139,10 +139,10 @@ class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
void Exchange(int index1, int index2, bool double_slot);
void ExchangeQuadSlots(int index1, int index2);
@@ -176,14 +176,14 @@ class LocationsBuilderMIPS : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -210,14 +210,14 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -374,35 +374,35 @@ class CodeGeneratorMIPS : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorMIPS() {}
- void ComputeSpillMask() OVERRIDE;
- bool HasAllocatedCalleeSaveRegisters() const OVERRIDE;
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
+ void ComputeSpillMask() override;
+ bool HasAllocatedCalleeSaveRegisters() const override;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
- void Bind(HBasicBlock* block) OVERRIDE;
+ void Bind(HBasicBlock* block) override;
void MoveConstant(Location location, HConstant* c);
- size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+ size_t GetWordSize() const override { return kMipsWordSize; }
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kMipsDoublewordSize // 16 bytes for each spill.
: 1 * kMipsDoublewordSize; // 8 bytes for each spill.
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
- MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
- const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+ MipsAssembler* GetAssembler() override { return &assembler_; }
+ const MipsAssembler& GetAssembler() const override { return assembler_; }
// Emit linker patches.
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -493,20 +493,20 @@ class CodeGeneratorMIPS : public CodeGenerator {
// Register allocation.
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
void ClobberRA() {
clobbered_ra_ = true;
}
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+ InstructionSet GetInstructionSet() const override { return InstructionSet::kMips; }
const MipsInstructionSetFeatures& GetInstructionSetFeatures() const;
@@ -514,25 +514,25 @@ class CodeGeneratorMIPS : public CodeGenerator {
return CommonGetLabelOf<MipsLabel>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<MipsLabel>();
}
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Code generation helpers.
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) override;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -543,41 +543,41 @@ class CodeGeneratorMIPS : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct);
- ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+ ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
- bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type) const override {
return type == DataType::Type::kInt64;
}
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
- DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ DataType::Type type ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
// whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 72318e98b0..7c89808d54 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -128,7 +128,7 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
@@ -153,9 +153,9 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
@@ -166,16 +166,16 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction)
: SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
@@ -189,7 +189,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -233,7 +233,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathMIPS64"; }
private:
// The class this slow path will load.
@@ -247,7 +247,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
: SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
@@ -274,7 +274,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
@@ -284,7 +284,7 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -298,9 +298,9 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
@@ -311,7 +311,7 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeMIPS64(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
@@ -331,7 +331,7 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
return &return_label_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS64"; }
HBasicBlock* GetSuccessor() const {
return successor_;
@@ -352,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal)
: SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
uint32_t dex_pc = instruction_->GetDexPc();
@@ -392,9 +392,9 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathMIPS64"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -407,7 +407,7 @@ class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
: SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -419,7 +419,7 @@ class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
@@ -429,7 +429,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -460,7 +460,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64);
@@ -490,9 +490,9 @@ class ReadBarrierMarkSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
DCHECK(locations->CanCall());
@@ -583,11 +583,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64";
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
DCHECK(locations->CanCall());
@@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
@@ -864,7 +864,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierForHeapReferenceSlowPathMIPS64";
}
@@ -909,7 +909,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
GpuRegister reg_out = out_.AsRegister<GpuRegister>();
@@ -938,7 +938,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS64"; }
private:
const Location out_;
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index fc0908b2cb..ddc154d40f 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -79,9 +79,9 @@ class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventio
InvokeDexCallingConventionVisitorMIPS64() {}
virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -108,22 +108,22 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionMIPS64() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(A1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(A0);
}
- Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::RegisterLocation(V0);
}
Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
- bool is_instance) const OVERRIDE {
+ bool is_instance) const override {
return is_instance
? Location::RegisterLocation(A2)
: Location::RegisterLocation(A1);
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(F0);
}
@@ -136,10 +136,10 @@ class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
void Exchange(int index1, int index2, bool double_slot);
void ExchangeQuadSlots(int index1, int index2);
@@ -173,14 +173,14 @@ class LocationsBuilderMIPS64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -207,14 +207,14 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -356,31 +356,31 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorMIPS64() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
- void Bind(HBasicBlock* block) OVERRIDE;
+ void Bind(HBasicBlock* block) override;
- size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; }
+ size_t GetWordSize() const override { return kMips64DoublewordSize; }
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kMips64DoublewordSize // 16 bytes for each spill.
: 1 * kMips64DoublewordSize; // 8 bytes for each spill.
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
- Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
- const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+ Mips64Assembler* GetAssembler() override { return &assembler_; }
+ const Mips64Assembler& GetAssembler() const override { return assembler_; }
// Emit linker patches.
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -471,17 +471,17 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
// Register allocation.
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
+ InstructionSet GetInstructionSet() const override { return InstructionSet::kMips64; }
const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const;
@@ -489,22 +489,22 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
return CommonGetLabelOf<Mips64Label>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<Mips64Label>();
}
// We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
// at aligned locations.
- uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; }
+ uint32_t GetPreferredSlotsAlignment() const override { return kMips64DoublewordSize; }
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Code generation helpers.
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) override;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
void SwapLocations(Location loc1, Location loc2, DataType::Type type);
@@ -513,7 +513,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -523,39 +523,39 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset);
- ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+ ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
- bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
+ bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; }
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
- DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ DataType::Type type ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
// whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index df00ec7d30..6a27081dab 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -72,7 +72,7 @@ class NullCheckSlowPathX86 : public SlowPathCode {
public:
explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -86,9 +86,9 @@ class NullCheckSlowPathX86 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
@@ -98,16 +98,16 @@ class DivZeroCheckSlowPathX86 : public SlowPathCode {
public:
explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
@@ -118,7 +118,7 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCode {
DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div)
: SlowPathCode(instruction), reg_(reg), is_div_(is_div) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
__ Bind(GetEntryLabel());
if (is_div_) {
__ negl(reg_);
@@ -128,7 +128,7 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86"; }
+ const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86"; }
private:
Register reg_;
@@ -140,7 +140,7 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
public:
explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -187,9 +187,9 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
@@ -200,7 +200,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCode(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -224,7 +224,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathX86"; }
private:
HBasicBlock* const successor_;
@@ -237,7 +237,7 @@ class LoadStringSlowPathX86 : public SlowPathCode {
public:
explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -256,7 +256,7 @@ class LoadStringSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
@@ -270,7 +270,7 @@ class LoadClassSlowPathX86 : public SlowPathCode {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -308,7 +308,7 @@ class LoadClassSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathX86"; }
private:
// The class this slow path will load.
@@ -322,7 +322,7 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal)
: SlowPathCode(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -375,8 +375,8 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathX86"; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -389,7 +389,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode {
explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
: SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -402,7 +402,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
@@ -412,7 +412,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
public:
explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -443,7 +443,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
@@ -471,9 +471,9 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -558,9 +558,9 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -724,7 +724,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
@@ -843,7 +843,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86"; }
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -883,7 +883,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -909,7 +909,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86"; }
private:
const Location out_;
@@ -8100,7 +8100,7 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
HX86ComputeBaseMethodAddress* base_method_address_;
private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ void Process(const MemoryRegion& region, int pos) override {
// Patch the correct offset for the instruction. The place to patch is the
// last 4 bytes of the instruction.
// The value to patch is the distance from the offset in the constant area
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index cb58e920ea..615477171b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -83,9 +83,9 @@ class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVi
InvokeDexCallingConventionVisitorX86() {}
virtual ~InvokeDexCallingConventionVisitorX86() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -97,18 +97,18 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionX86() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(ECX);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(EAX);
}
- Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? Location::RegisterPairLocation(EAX, EDX)
: Location::RegisterLocation(EAX);
}
- Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+ Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
return DataType::Is64BitType(type)
? (is_instance
? Location::RegisterPairLocation(EDX, EBX)
@@ -117,7 +117,7 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
? Location::RegisterLocation(EDX)
: Location::RegisterLocation(ECX));
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(XMM0);
}
@@ -130,10 +130,10 @@ class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
X86Assembler* GetAssembler() const;
@@ -155,14 +155,14 @@ class LocationsBuilderX86 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -186,14 +186,14 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -320,23 +320,23 @@ class CodeGeneratorX86 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
- void Bind(HBasicBlock* block) OVERRIDE;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
+ void Bind(HBasicBlock* block) override;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -346,46 +346,46 @@ class CodeGeneratorX86 : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset);
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return kX86WordSize;
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 4 * kX86WordSize // 16 bytes == 4 words for each spill
: 2 * kX86WordSize; // 8 bytes == 2 words for each spill
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() override {
return &location_builder_;
}
- HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() override {
return &instruction_visitor_;
}
- X86Assembler* GetAssembler() OVERRIDE {
+ X86Assembler* GetAssembler() override {
return &assembler_;
}
- const X86Assembler& GetAssembler() const OVERRIDE {
+ const X86Assembler& GetAssembler() const override {
return assembler_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
+ ParallelMoveResolverX86* GetMoveResolver() override {
return &move_resolver_;
}
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86;
}
@@ -399,25 +399,25 @@ class CodeGeneratorX86 : public CodeGenerator {
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
// Generate a call to a virtual method.
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t intrinsic_data);
@@ -442,16 +442,16 @@ class CodeGeneratorX86 : public CodeGenerator {
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void MoveFromReturnRegister(Location trg, DataType::Type type) override;
// Emit linker patches.
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
void PatchJitRootUse(uint8_t* code,
const uint8_t* roots_data,
const PatchInfo<Label>& info,
uint64_t index_in_table) const;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Emit a write barrier.
void MarkGCCard(Register temp,
@@ -466,15 +466,15 @@ class CodeGeneratorX86 : public CodeGenerator {
return CommonGetLabelOf<Label>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<Label>();
}
- bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type) const override {
return type == DataType::Type::kInt64;
}
- bool ShouldSplitLongMoves() const OVERRIDE { return true; }
+ bool ShouldSplitLongMoves() const override { return true; }
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
@@ -513,7 +513,7 @@ class CodeGeneratorX86 : public CodeGenerator {
Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -609,9 +609,9 @@ class CodeGeneratorX86 : public CodeGenerator {
}
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// When we don't know the proper offset for the value, we use kDummy32BitOffset.
// The correct value will be inserted when processing Assembler fixups.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ae2a000d07..489652b85b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -71,7 +71,7 @@ class NullCheckSlowPathX86_64 : public SlowPathCode {
public:
explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -85,9 +85,9 @@ class NullCheckSlowPathX86_64 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
@@ -97,16 +97,16 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode {
public:
explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
@@ -117,7 +117,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode {
DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, DataType::Type type, bool is_div)
: SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
__ Bind(GetEntryLabel());
if (type_ == DataType::Type::kInt32) {
if (is_div_) {
@@ -137,7 +137,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86_64"; }
+ const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86_64"; }
private:
const CpuRegister cpu_reg_;
@@ -151,7 +151,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCode(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -175,7 +175,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathX86_64"; }
private:
HBasicBlock* const successor_;
@@ -189,7 +189,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction)
: SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -236,9 +236,9 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
@@ -252,7 +252,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -291,7 +291,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86_64"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathX86_64"; }
private:
// The class this slow path will load.
@@ -304,7 +304,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
public:
explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -326,7 +326,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
@@ -337,7 +337,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
: SlowPathCode(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
@@ -385,9 +385,9 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathX86_64"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -400,7 +400,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode {
explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
: SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -413,7 +413,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
@@ -423,7 +423,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode {
public:
explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -454,7 +454,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
@@ -482,9 +482,9 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86_64"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
Register ref_reg = ref_cpu_reg.AsRegister();
@@ -573,11 +573,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierMarkAndUpdateFieldSlowPathX86_64";
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
Register ref_reg = ref_cpu_reg.AsRegister();
@@ -745,7 +745,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
CpuRegister reg_out = out_.AsRegister<CpuRegister>();
@@ -864,7 +864,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierForHeapReferenceSlowPathX86_64";
}
@@ -906,7 +906,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
@@ -931,7 +931,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86_64"; }
private:
const Location out_;
@@ -7395,7 +7395,7 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
CodeGeneratorX86_64* codegen_;
private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ void Process(const MemoryRegion& region, int pos) override {
// Patch the correct offset for the instruction. We use the address of the
// 'next' instruction, which is 'pos' (patch the 4 bytes before).
int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 5ba7f9cb71..f77a5c84b4 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -83,22 +83,22 @@ class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionX86_64() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(RSI);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(RDI);
}
- Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::RegisterLocation(RAX);
}
Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance)
- const OVERRIDE {
+ const override {
return is_instance
? Location::RegisterLocation(RDX)
: Location::RegisterLocation(RSI);
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(XMM0);
}
@@ -112,9 +112,9 @@ class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventio
InvokeDexCallingConventionVisitorX86_64() {}
virtual ~InvokeDexCallingConventionVisitorX86_64() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -129,10 +129,10 @@ class ParallelMoveResolverX86_64 : public ParallelMoveResolverWithSwap {
ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
X86_64Assembler* GetAssembler() const;
@@ -157,14 +157,14 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -188,14 +188,14 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -300,23 +300,23 @@ class CodeGeneratorX86_64 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86_64() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
- void Bind(HBasicBlock* block) OVERRIDE;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
+ void Bind(HBasicBlock* block) override;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -326,46 +326,46 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset);
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return kX86_64WordSize;
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kX86_64WordSize // 16 bytes == 2 x86_64 words for each spill
: 1 * kX86_64WordSize; // 8 bytes == 1 x86_64 words for each spill
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() override {
return &location_builder_;
}
- HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() override {
return &instruction_visitor_;
}
- X86_64Assembler* GetAssembler() OVERRIDE {
+ X86_64Assembler* GetAssembler() override {
return &assembler_;
}
- const X86_64Assembler& GetAssembler() const OVERRIDE {
+ const X86_64Assembler& GetAssembler() const override {
return assembler_;
}
- ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
+ ParallelMoveResolverX86_64* GetMoveResolver() override {
return &move_resolver_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters() const OVERRIDE;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void SetupBlockedRegisters() const override;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
+ void Finalize(CodeAllocator* allocator) override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86_64;
}
@@ -387,34 +387,34 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return CommonGetLabelOf<Label>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<Label>();
}
- bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
return false;
}
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
@@ -434,14 +434,14 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference);
void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
void PatchJitRootUse(uint8_t* code,
const uint8_t* roots_data,
const PatchInfo<Label>& info,
uint64_t index_in_table) const;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -565,7 +565,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void MoveFromReturnRegister(Location trg, DataType::Type type) override;
// Assign a 64 bit constant to an address.
void MoveInt64ToAddress(const Address& addr_low,
@@ -585,9 +585,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
}
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// When we don't know the proper offset for the value, we use kDummy32BitOffset.
// We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
index 5db0b6dcc5..8eb3a520c3 100644
--- a/compiler/optimizing/code_sinking.h
+++ b/compiler/optimizing/code_sinking.h
@@ -33,7 +33,7 @@ class CodeSinking : public HOptimization {
const char* name = kCodeSinkingPassName)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kCodeSinkingPassName = "code_sinking";
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 8c062f03b7..0289e9c4a7 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -101,7 +101,7 @@ class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
AddAllocatedRegister(Location::RegisterLocation(arm::R7));
}
- void SetupBlockedRegisters() const OVERRIDE {
+ void SetupBlockedRegisters() const override {
arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
blocked_core_registers_[arm::R4] = true;
blocked_core_registers_[arm::R6] = false;
@@ -109,7 +109,7 @@ class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
}
void MaybeGenerateMarkingRegisterCheck(int code ATTRIBUTE_UNUSED,
- Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+ Location temp_loc ATTRIBUTE_UNUSED) override {
// When turned on, the marking register checks in
// CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck expects the
// Thread Register and the Marking Register to be set to
@@ -141,7 +141,7 @@ class TestCodeGeneratorARM64 : public arm64::CodeGeneratorARM64 {
: arm64::CodeGeneratorARM64(graph, compiler_options) {}
void MaybeGenerateMarkingRegisterCheck(int codem ATTRIBUTE_UNUSED,
- Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+ Location temp_loc ATTRIBUTE_UNUSED) override {
// When turned on, the marking register checks in
// CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck expect the
// Thread Register and the Marking Register to be set to
@@ -161,7 +161,7 @@ class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 {
AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
}
- void SetupBlockedRegisters() const OVERRIDE {
+ void SetupBlockedRegisters() const override {
x86::CodeGeneratorX86::SetupBlockedRegisters();
// ebx is a callee-save register in C, but caller-save for ART.
blocked_core_registers_[x86::EBX] = true;
@@ -183,7 +183,7 @@ class InternalCodeAllocator : public CodeAllocator {
}
size_t GetSize() const { return size_; }
- ArrayRef<const uint8_t> GetMemory() const OVERRIDE {
+ ArrayRef<const uint8_t> GetMemory() const override {
return ArrayRef<const uint8_t>(memory_.get(), size_);
}
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index bb78c2357e..09e7cabfa4 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -26,13 +26,13 @@ class HConstantFoldingVisitor : public HGraphDelegateVisitor {
: HGraphDelegateVisitor(graph) {}
private:
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+ void VisitBasicBlock(HBasicBlock* block) override;
- void VisitUnaryOperation(HUnaryOperation* inst) OVERRIDE;
- void VisitBinaryOperation(HBinaryOperation* inst) OVERRIDE;
+ void VisitUnaryOperation(HUnaryOperation* inst) override;
+ void VisitBinaryOperation(HBinaryOperation* inst) override;
- void VisitTypeConversion(HTypeConversion* inst) OVERRIDE;
- void VisitDivZeroCheck(HDivZeroCheck* inst) OVERRIDE;
+ void VisitTypeConversion(HTypeConversion* inst) override;
+ void VisitDivZeroCheck(HDivZeroCheck* inst) override;
DISALLOW_COPY_AND_ASSIGN(HConstantFoldingVisitor);
};
@@ -47,24 +47,24 @@ class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor {
private:
void VisitShift(HBinaryOperation* shift);
- void VisitEqual(HEqual* instruction) OVERRIDE;
- void VisitNotEqual(HNotEqual* instruction) OVERRIDE;
-
- void VisitAbove(HAbove* instruction) OVERRIDE;
- void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
- void VisitBelow(HBelow* instruction) OVERRIDE;
- void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE;
-
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitCompare(HCompare* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitRem(HRem* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitSub(HSub* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
+ void VisitEqual(HEqual* instruction) override;
+ void VisitNotEqual(HNotEqual* instruction) override;
+
+ void VisitAbove(HAbove* instruction) override;
+ void VisitAboveOrEqual(HAboveOrEqual* instruction) override;
+ void VisitBelow(HBelow* instruction) override;
+ void VisitBelowOrEqual(HBelowOrEqual* instruction) override;
+
+ void VisitAnd(HAnd* instruction) override;
+ void VisitCompare(HCompare* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitRem(HRem* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitSub(HSub* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
+ void VisitXor(HXor* instruction) override;
};
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index f4dbc805c4..72bd95b3cb 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -41,7 +41,7 @@ class HConstantFolding : public HOptimization {
public:
HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kConstantFoldingPassName = "constant_folding";
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 54bff22e98..3cb8bf2f47 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -34,7 +34,7 @@ class CFREVisitor : public HGraphVisitor {
candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
stats_(stats) {}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// Visit all instructions in block.
HGraphVisitor::VisitBasicBlock(block);
@@ -43,7 +43,7 @@ class CFREVisitor : public HGraphVisitor {
MergeCandidateFences();
}
- void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE {
+ void VisitConstructorFence(HConstructorFence* constructor_fence) override {
candidate_fences_.push_back(constructor_fence);
for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
@@ -51,29 +51,29 @@ class CFREVisitor : public HGraphVisitor {
}
}
- void VisitBoundType(HBoundType* bound_type) OVERRIDE {
+ void VisitBoundType(HBoundType* bound_type) override {
VisitAlias(bound_type);
}
- void VisitNullCheck(HNullCheck* null_check) OVERRIDE {
+ void VisitNullCheck(HNullCheck* null_check) override {
VisitAlias(null_check);
}
- void VisitSelect(HSelect* select) OVERRIDE {
+ void VisitSelect(HSelect* select) override {
VisitAlias(select);
}
- void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
HInstruction* value = instruction->InputAt(1);
VisitSetLocation(instruction, value);
}
- void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
HInstruction* value = instruction->InputAt(1);
VisitSetLocation(instruction, value);
}
- void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ void VisitArraySet(HArraySet* instruction) override {
HInstruction* value = instruction->InputAt(2);
VisitSetLocation(instruction, value);
}
@@ -83,46 +83,46 @@ class CFREVisitor : public HGraphVisitor {
MergeCandidateFences();
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ void VisitInvokeInterface(HInvokeInterface* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+ void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
HandleInvoke(invoke);
}
- void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+ void VisitClinitCheck(HClinitCheck* clinit) override {
HandleInvoke(clinit);
}
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h
index 367d9f21a0..014b342258 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.h
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h
@@ -52,7 +52,7 @@ class ConstructorFenceRedundancyElimination : public HOptimization {
const char* name = kCFREPassName)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination";
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 90caa53764..799721acf2 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -32,7 +32,7 @@ class HDeadCodeElimination : public HOptimization {
HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 293c1ab3f3..63a370a47b 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -27,7 +27,7 @@ namespace art {
class EmitSwapMipsTest : public OptimizingUnitTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
instruction_set_ = InstructionSet::kMips;
instruction_set_features_ = MipsInstructionSetFeatures::FromCppDefines();
OptimizingUnitTest::SetUp();
@@ -46,7 +46,7 @@ class EmitSwapMipsTest : public OptimizingUnitTest {
GetAssemblyHeader()));
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
test_helper_.reset();
codegen_.reset();
graph_ = nullptr;
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 3a2bb7a00c..d085609197 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -44,30 +44,30 @@ class GraphChecker : public HGraphDelegateVisitor {
// and return value pass along the observed graph sizes.
size_t Run(bool pass_change = true, size_t last_size = 0);
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
-
- void VisitInstruction(HInstruction* instruction) OVERRIDE;
- void VisitPhi(HPhi* phi) OVERRIDE;
-
- void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE;
- void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE;
- void VisitBoundType(HBoundType* instruction) OVERRIDE;
- void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
- void VisitCheckCast(HCheckCast* check) OVERRIDE;
- void VisitCondition(HCondition* op) OVERRIDE;
- void VisitConstant(HConstant* instruction) OVERRIDE;
- void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE;
- void VisitIf(HIf* instruction) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitLoadException(HLoadException* load) OVERRIDE;
- void VisitNeg(HNeg* instruction) OVERRIDE;
- void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
- void VisitReturn(HReturn* ret) OVERRIDE;
- void VisitReturnVoid(HReturnVoid* ret) OVERRIDE;
- void VisitSelect(HSelect* instruction) OVERRIDE;
- void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+ void VisitBasicBlock(HBasicBlock* block) override;
+
+ void VisitInstruction(HInstruction* instruction) override;
+ void VisitPhi(HPhi* phi) override;
+
+ void VisitBinaryOperation(HBinaryOperation* op) override;
+ void VisitBooleanNot(HBooleanNot* instruction) override;
+ void VisitBoundType(HBoundType* instruction) override;
+ void VisitBoundsCheck(HBoundsCheck* check) override;
+ void VisitCheckCast(HCheckCast* check) override;
+ void VisitCondition(HCondition* op) override;
+ void VisitConstant(HConstant* instruction) override;
+ void VisitDeoptimize(HDeoptimize* instruction) override;
+ void VisitIf(HIf* instruction) override;
+ void VisitInstanceOf(HInstanceOf* check) override;
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+ void VisitLoadException(HLoadException* load) override;
+ void VisitNeg(HNeg* instruction) override;
+ void VisitPackedSwitch(HPackedSwitch* instruction) override;
+ void VisitReturn(HReturn* ret) override;
+ void VisitReturnVoid(HReturnVoid* ret) override;
+ void VisitSelect(HSelect* instruction) override;
+ void VisitTryBoundary(HTryBoundary* try_boundary) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
void CheckTypeCheckBitstringInput(HTypeCheckInstruction* check,
size_t input_pos,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index d65ad40565..31db8c205f 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -333,7 +333,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
return output_;
}
- void VisitParallelMove(HParallelMove* instruction) OVERRIDE {
+ void VisitParallelMove(HParallelMove* instruction) override {
StartAttributeStream("liveness") << instruction->GetLifetimePosition();
StringList moves;
for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) {
@@ -346,36 +346,36 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("moves") << moves;
}
- void VisitIntConstant(HIntConstant* instruction) OVERRIDE {
+ void VisitIntConstant(HIntConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitLongConstant(HLongConstant* instruction) OVERRIDE {
+ void VisitLongConstant(HLongConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitFloatConstant(HFloatConstant* instruction) OVERRIDE {
+ void VisitFloatConstant(HFloatConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitDoubleConstant(HDoubleConstant* instruction) OVERRIDE {
+ void VisitDoubleConstant(HDoubleConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitPhi(HPhi* phi) OVERRIDE {
+ void VisitPhi(HPhi* phi) override {
StartAttributeStream("reg") << phi->GetRegNumber();
StartAttributeStream("is_catch_phi") << std::boolalpha << phi->IsCatchPhi() << std::noboolalpha;
}
- void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE {
+ void VisitMemoryBarrier(HMemoryBarrier* barrier) override {
StartAttributeStream("kind") << barrier->GetBarrierKind();
}
- void VisitMonitorOperation(HMonitorOperation* monitor) OVERRIDE {
+ void VisitMonitorOperation(HMonitorOperation* monitor) override {
StartAttributeStream("kind") << (monitor->IsEnter() ? "enter" : "exit");
}
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ void VisitLoadClass(HLoadClass* load_class) override {
StartAttributeStream("load_kind") << load_class->GetLoadKind();
const char* descriptor = load_class->GetDexFile().GetTypeDescriptor(
load_class->GetDexFile().GetTypeId(load_class->GetTypeIndex()));
@@ -386,19 +386,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
<< load_class->NeedsAccessCheck() << std::noboolalpha;
}
- void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) OVERRIDE {
+ void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) override {
StartAttributeStream("load_kind") << "RuntimeCall";
StartAttributeStream("method_handle_index") << load_method_handle->GetMethodHandleIndex();
}
- void VisitLoadMethodType(HLoadMethodType* load_method_type) OVERRIDE {
+ void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
StartAttributeStream("load_kind") << "RuntimeCall";
const DexFile& dex_file = load_method_type->GetDexFile();
const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ void VisitLoadString(HLoadString* load_string) override {
StartAttributeStream("load_kind") << load_string->GetLoadKind();
}
@@ -413,15 +413,15 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
+ void VisitCheckCast(HCheckCast* check_cast) override {
HandleTypeCheckInstruction(check_cast);
}
- void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE {
+ void VisitInstanceOf(HInstanceOf* instance_of) override {
HandleTypeCheckInstruction(instance_of);
}
- void VisitArrayLength(HArrayLength* array_length) OVERRIDE {
+ void VisitArrayLength(HArrayLength* array_length) override {
StartAttributeStream("is_string_length") << std::boolalpha
<< array_length->IsStringLength() << std::noboolalpha;
if (array_length->IsEmittedAtUseSite()) {
@@ -429,31 +429,31 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+ void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
StartAttributeStream("is_string_char_at") << std::boolalpha
<< bounds_check->IsStringCharAt() << std::noboolalpha;
}
- void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+ void VisitArrayGet(HArrayGet* array_get) override {
StartAttributeStream("is_string_char_at") << std::boolalpha
<< array_get->IsStringCharAt() << std::noboolalpha;
}
- void VisitArraySet(HArraySet* array_set) OVERRIDE {
+ void VisitArraySet(HArraySet* array_set) override {
StartAttributeStream("value_can_be_null") << std::boolalpha
<< array_set->GetValueCanBeNull() << std::noboolalpha;
StartAttributeStream("needs_type_check") << std::boolalpha
<< array_set->NeedsTypeCheck() << std::noboolalpha;
}
- void VisitCompare(HCompare* compare) OVERRIDE {
+ void VisitCompare(HCompare* compare) override {
ComparisonBias bias = compare->GetBias();
StartAttributeStream("bias") << (bias == ComparisonBias::kGtBias
? "gt"
: (bias == ComparisonBias::kLtBias ? "lt" : "none"));
}
- void VisitInvoke(HInvoke* invoke) OVERRIDE {
+ void VisitInvoke(HInvoke* invoke) override {
StartAttributeStream("dex_file_index") << invoke->GetDexMethodIndex();
ArtMethod* method = invoke->GetResolvedMethod();
// We don't print signatures, which conflict with c1visualizer format.
@@ -470,12 +470,12 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
<< std::noboolalpha;
}
- void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+ void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("invoke_type") << invoke->GetInvokeType();
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("method_load_kind") << invoke->GetMethodLoadKind();
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
@@ -484,96 +484,96 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
- void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("invoke_type") << "InvokePolymorphic";
}
- void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE {
+ void VisitInstanceFieldGet(HInstanceFieldGet* iget) override {
StartAttributeStream("field_name") <<
iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << iget->GetFieldType();
}
- void VisitInstanceFieldSet(HInstanceFieldSet* iset) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* iset) override {
StartAttributeStream("field_name") <<
iset->GetFieldInfo().GetDexFile().PrettyField(iset->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << iset->GetFieldType();
}
- void VisitStaticFieldGet(HStaticFieldGet* sget) OVERRIDE {
+ void VisitStaticFieldGet(HStaticFieldGet* sget) override {
StartAttributeStream("field_name") <<
sget->GetFieldInfo().GetDexFile().PrettyField(sget->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << sget->GetFieldType();
}
- void VisitStaticFieldSet(HStaticFieldSet* sset) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* sset) override {
StartAttributeStream("field_name") <<
sset->GetFieldInfo().GetDexFile().PrettyField(sset->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << sset->GetFieldType();
}
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
+ void VisitTryBoundary(HTryBoundary* try_boundary) override {
StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
}
- void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE {
+ void VisitDeoptimize(HDeoptimize* deoptimize) override {
StartAttributeStream("kind") << deoptimize->GetKind();
}
- void VisitVecOperation(HVecOperation* vec_operation) OVERRIDE {
+ void VisitVecOperation(HVecOperation* vec_operation) override {
StartAttributeStream("packed_type") << vec_operation->GetPackedType();
}
- void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) OVERRIDE {
+ void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) override {
StartAttributeStream("alignment") << vec_mem_operation->GetAlignment().ToString();
}
- void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
+ void VisitVecHalvingAdd(HVecHalvingAdd* hadd) override {
VisitVecBinaryOperation(hadd);
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
- void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
+ void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) override {
VisitVecOperation(instruction);
StartAttributeStream("kind") << instruction->GetOpKind();
}
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
- void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE {
+ void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
StartAttributeStream("kind") << instruction->GetOpKind();
}
- void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) OVERRIDE {
+ void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) override {
StartAttributeStream("kind") << instruction->GetOpKind();
}
- void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) OVERRIDE {
+ void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) override {
StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind();
if (HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) {
StartAttributeStream("shift") << instruction->GetShiftAmount();
@@ -814,7 +814,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
Flush();
}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
StartTag("block");
PrintProperty("name", "B", block->GetBlockId());
if (block->GetLifetimeStart() != kNoLifetime) {
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 75cfff2140..bbf2265e98 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -31,7 +31,7 @@ class GVNOptimization : public HOptimization {
const char* pass_name = kGlobalValueNumberingPassName)
: HOptimization(graph, pass_name), side_effects_(side_effects) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kGlobalValueNumberingPassName = "GVN";
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 89fed2ec64..a48aa90059 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -37,7 +37,7 @@ class HInductionVarAnalysis : public HOptimization {
public:
explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kInductionPassName = "induction_var_analysis";
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 2fdf6a1306..6fd0c204b2 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -60,7 +60,7 @@ class HInliner : public HOptimization {
handles_(handles),
inline_stats_(nullptr) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kInlinerPassName = "inliner";
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f493b66cfd..2757f7b719 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -66,44 +66,44 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
bool TryCombineVecMultiplyAccumulate(HVecMul* mul);
void VisitShift(HBinaryOperation* shift);
- void VisitEqual(HEqual* equal) OVERRIDE;
- void VisitNotEqual(HNotEqual* equal) OVERRIDE;
- void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE;
- void VisitInstanceFieldSet(HInstanceFieldSet* equal) OVERRIDE;
- void VisitStaticFieldSet(HStaticFieldSet* equal) OVERRIDE;
- void VisitArraySet(HArraySet* equal) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
- void VisitArrayLength(HArrayLength* instruction) OVERRIDE;
- void VisitCheckCast(HCheckCast* instruction) OVERRIDE;
- void VisitAbs(HAbs* instruction) OVERRIDE;
- void VisitAdd(HAdd* instruction) OVERRIDE;
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitCondition(HCondition* instruction) OVERRIDE;
- void VisitGreaterThan(HGreaterThan* condition) OVERRIDE;
- void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) OVERRIDE;
- void VisitLessThan(HLessThan* condition) OVERRIDE;
- void VisitLessThanOrEqual(HLessThanOrEqual* condition) OVERRIDE;
- void VisitBelow(HBelow* condition) OVERRIDE;
- void VisitBelowOrEqual(HBelowOrEqual* condition) OVERRIDE;
- void VisitAbove(HAbove* condition) OVERRIDE;
- void VisitAboveOrEqual(HAboveOrEqual* condition) OVERRIDE;
- void VisitDiv(HDiv* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitNeg(HNeg* instruction) OVERRIDE;
- void VisitNot(HNot* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitSub(HSub* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
- void VisitSelect(HSelect* select) OVERRIDE;
- void VisitIf(HIf* instruction) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
- void VisitInvoke(HInvoke* invoke) OVERRIDE;
- void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
- void VisitVecMul(HVecMul* instruction) OVERRIDE;
+ void VisitEqual(HEqual* equal) override;
+ void VisitNotEqual(HNotEqual* equal) override;
+ void VisitBooleanNot(HBooleanNot* bool_not) override;
+ void VisitInstanceFieldSet(HInstanceFieldSet* equal) override;
+ void VisitStaticFieldSet(HStaticFieldSet* equal) override;
+ void VisitArraySet(HArraySet* equal) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
+ void VisitNullCheck(HNullCheck* instruction) override;
+ void VisitArrayLength(HArrayLength* instruction) override;
+ void VisitCheckCast(HCheckCast* instruction) override;
+ void VisitAbs(HAbs* instruction) override;
+ void VisitAdd(HAdd* instruction) override;
+ void VisitAnd(HAnd* instruction) override;
+ void VisitCondition(HCondition* instruction) override;
+ void VisitGreaterThan(HGreaterThan* condition) override;
+ void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) override;
+ void VisitLessThan(HLessThan* condition) override;
+ void VisitLessThanOrEqual(HLessThanOrEqual* condition) override;
+ void VisitBelow(HBelow* condition) override;
+ void VisitBelowOrEqual(HBelowOrEqual* condition) override;
+ void VisitAbove(HAbove* condition) override;
+ void VisitAboveOrEqual(HAboveOrEqual* condition) override;
+ void VisitDiv(HDiv* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitNeg(HNeg* instruction) override;
+ void VisitNot(HNot* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitSub(HSub* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
+ void VisitXor(HXor* instruction) override;
+ void VisitSelect(HSelect* select) override;
+ void VisitIf(HIf* instruction) override;
+ void VisitInstanceOf(HInstanceOf* instruction) override;
+ void VisitInvoke(HInvoke* invoke) override;
+ void VisitDeoptimize(HDeoptimize* deoptimize) override;
+ void VisitVecMul(HVecMul* instruction) override;
bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 2d134e0067..982a24a6f0 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -46,7 +46,7 @@ class InstructionSimplifier : public HOptimization {
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 37fcdb9d5c..24fbb6cb4c 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -56,7 +56,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
* (2) Since statements can be removed in a "forward" fashion,
* the visitor should test if each statement is still there.
*/
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// TODO: fragile iteration, provide more robust iterators?
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
@@ -66,15 +66,15 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
}
}
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
+ void VisitAnd(HAnd* instruction) override;
+ void VisitArrayGet(HArrayGet* instruction) override;
+ void VisitArraySet(HArraySet* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
OptimizingCompilerStats* stats_;
};
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index f1a16efc61..fca9341d59 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -30,7 +30,7 @@ class InstructionSimplifierArm : public HOptimization {
static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
- bool Run() OVERRIDE;
+ bool Run() override;
};
} // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index e0a627994d..b536cb4dc4 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -58,7 +58,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
* (2) Since statements can be removed in a "forward" fashion,
* the visitor should test if each statement is still there.
*/
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// TODO: fragile iteration, provide more robust iterators?
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
@@ -69,18 +69,18 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
}
// HInstruction visitors, sorted alphabetically.
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
- void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
- void VisitVecStore(HVecStore* instruction) OVERRIDE;
+ void VisitAnd(HAnd* instruction) override;
+ void VisitArrayGet(HArrayGet* instruction) override;
+ void VisitArraySet(HArraySet* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
+ void VisitXor(HXor* instruction) override;
+ void VisitVecLoad(HVecLoad* instruction) override;
+ void VisitVecStore(HVecStore* instruction) override;
OptimizingCompilerStats* stats_;
};
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 8659c1f5f4..8d93c01ebf 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -30,7 +30,7 @@ class InstructionSimplifierArm64 : public HOptimization {
static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
- bool Run() OVERRIDE;
+ bool Run() override;
};
} // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 3bdf90f652..5d0c63b76b 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -39,8 +39,8 @@ class InstructionSimplifierMipsVisitor : public HGraphVisitor {
bool TryExtractArrayAccessIndex(HInstruction* access,
HInstruction* index,
DataType::Type packed_type);
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
+ void VisitArrayGet(HArrayGet* instruction) override;
+ void VisitArraySet(HArraySet* instruction) override;
OptimizingCompilerStats* stats_;
CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 94ef73d425..b431334811 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -35,7 +35,7 @@ class InstructionSimplifierMips : public HOptimization {
static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 993648f765..06e2fbb355 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -42,7 +42,7 @@ class IntrinsicsRecognizer : public HOptimization {
const char* name = kIntrinsicsRecognizerPassName)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
// Static helper that recognizes intrinsic call. Returns true on success.
// If it fails due to invoke type mismatch, wrong_invoke_type is set.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a657b5818f..1abfcb022b 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -112,7 +112,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
explicit IntrinsicSlowPathARM64(HInvoke* invoke)
: SlowPathCodeARM64(invoke), invoke_(invoke) { }
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
__ Bind(GetEntryLabel());
@@ -145,7 +145,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathARM64"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPathARM64"; }
private:
// The instruction where this slow path is happening.
@@ -163,7 +163,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -216,7 +216,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
+ const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
private:
Location tmp_;
@@ -1006,9 +1006,9 @@ class BakerReadBarrierCasSlowPathARM64 : public SlowPathCodeARM64 {
explicit BakerReadBarrierCasSlowPathARM64(HInvoke* invoke)
: SlowPathCodeARM64(invoke) {}
- const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARM64"; }
+ const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARM64"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
Arm64Assembler* assembler = arm64_codegen->GetAssembler();
MacroAssembler* masm = assembler->GetVIXLAssembler();
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 033a644f34..9c46efddec 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -37,7 +37,7 @@ namespace arm64 {
class CodeGeneratorARM64;
-class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARM64 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
: allocator_(allocator), codegen_(codegen) {}
@@ -45,7 +45,7 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -63,14 +63,14 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
};
-class IntrinsicCodeGeneratorARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARM64 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorARM64(CodeGeneratorARM64* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 74a779d9e2..1127fb8191 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -85,7 +85,7 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL {
return calling_convention_visitor.GetMethodLocation();
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
ArmVIXLAssembler* assembler = down_cast<ArmVIXLAssembler*>(codegen->GetAssembler());
__ Bind(GetEntryLabel());
@@ -111,7 +111,7 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPath"; }
private:
// The instruction where this slow path is happening.
@@ -173,7 +173,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
LocationSummary* locations = instruction_->GetLocations();
@@ -233,7 +233,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierSystemArrayCopySlowPathARMVIXL";
}
@@ -969,9 +969,9 @@ class BakerReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit BakerReadBarrierCasSlowPathARMVIXL(HInvoke* invoke)
: SlowPathCodeARMVIXL(invoke) {}
- const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARMVIXL"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
__ Bind(GetEntryLabel());
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 9c02d0a4ad..1fea776f0d 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -27,14 +27,14 @@ namespace arm {
class ArmVIXLAssembler;
class CodeGeneratorARMVIXL;
-class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARMVIXL final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -54,14 +54,14 @@ class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
};
-class IntrinsicCodeGeneratorARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARMVIXL final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorARMVIXL(CodeGeneratorARMVIXL* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 01d9f962f2..771714bf41 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -108,7 +108,7 @@ class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { }
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in);
__ Bind(GetEntryLabel());
@@ -137,7 +137,7 @@ class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPathMIPS"; }
private:
// The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 1c1ba40132..08d4e82139 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -30,14 +30,14 @@ namespace mips {
class CodeGeneratorMIPS;
class MipsAssembler;
-class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
};
-class IntrinsicCodeGeneratorMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorMIPS(CodeGeneratorMIPS* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 0bd69c6ec8..4a1bd5b7b2 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -97,7 +97,7 @@ class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit IntrinsicSlowPathMIPS64(HInvoke* invoke)
: SlowPathCodeMIPS64(invoke), invoke_(invoke) { }
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
__ Bind(GetEntryLabel());
@@ -126,7 +126,7 @@ class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPathMIPS64"; }
private:
// The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 748b0b02b2..ca8bc8f55a 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -30,14 +30,14 @@ namespace mips64 {
class CodeGeneratorMIPS64;
class Mips64Assembler;
-class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS64 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
};
-class IntrinsicCodeGeneratorMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS64 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorMIPS64(CodeGeneratorMIPS64* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index 8c69d9b643..41947f1ccd 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -47,7 +47,7 @@ class IntrinsicSlowPath : public SlowPathCode {
return calling_convention_visitor.GetMethodLocation();
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
Assembler* assembler = codegen->GetAssembler();
assembler->Bind(GetEntryLabel());
@@ -73,7 +73,7 @@ class IntrinsicSlowPath : public SlowPathCode {
assembler->Jump(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPath"; }
private:
// The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5c7be54037..d33c0c344e 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -82,7 +82,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -160,7 +160,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86);
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index e3555e78fc..ae150dad43 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -30,14 +30,14 @@ namespace x86 {
class CodeGeneratorX86;
class X86Assembler;
-class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
};
-class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorX86(CodeGeneratorX86* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index b5afe931ff..ae889744ad 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -80,7 +80,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -118,7 +118,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
+ const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64);
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 5cb601edfe..199cfede1a 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -30,14 +30,14 @@ namespace x86_64 {
class CodeGeneratorX86_64;
class X86_64Assembler;
-class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86_64 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
};
-class IntrinsicCodeGeneratorX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86_64 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorX86_64(CodeGeneratorX86_64* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index f72d195ab2..9cafddb05a 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -33,7 +33,7 @@ class LICM : public HOptimization {
: HOptimization(graph, name, stats),
side_effects_(side_effects) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoopInvariantCodeMotionPassName = "licm";
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 769a3f1b59..08d9309a3e 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -492,12 +492,12 @@ class HeapLocationCollector : public HGraphVisitor {
HeapLocation::kDeclaringClassDefIndexForArrays);
}
- void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+ void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
has_heap_stores_ = true;
if (location->GetReferenceInfo()->IsSingleton()) {
@@ -523,12 +523,12 @@ class HeapLocationCollector : public HGraphVisitor {
}
}
- void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+ void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
has_heap_stores_ = true;
}
@@ -536,7 +536,7 @@ class HeapLocationCollector : public HGraphVisitor {
// We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
// since we cannot accurately track the fields.
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+ void VisitArrayGet(HArrayGet* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetType();
@@ -544,7 +544,7 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ void VisitArraySet(HArraySet* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetComponentType();
@@ -552,7 +552,7 @@ class HeapLocationCollector : public HGraphVisitor {
has_heap_stores_ = true;
}
- void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
+ void VisitVecLoad(HVecLoad* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetPackedType();
@@ -560,7 +560,7 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitVecStore(HVecStore* instruction) OVERRIDE {
+ void VisitVecStore(HVecStore* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetPackedType();
@@ -568,7 +568,7 @@ class HeapLocationCollector : public HGraphVisitor {
has_heap_stores_ = true;
}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
// Any new-instance or new-array cannot alias with references that
// pre-exist the new-instance/new-array. We append entries into
// ref_info_array_ which keeps track of the order of creation
@@ -580,7 +580,7 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+ void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) override {
has_monitor_operations_ = true;
}
@@ -605,7 +605,7 @@ class LoadStoreAnalysis : public HOptimization {
return heap_location_collector_;
}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis";
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 28ac94273c..7f71745a43 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -107,7 +107,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) {
}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// Populate the heap_values array for this block.
// TODO: try to reuse the heap_values array from one predecessor if possible.
if (block->IsLoopHeader()) {
@@ -656,13 +656,13 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+ void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
HInstruction* object = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(object, &field));
}
- void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
HInstruction* object = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
HInstruction* value = instruction->InputAt(1);
@@ -670,24 +670,24 @@ class LSEVisitor : public HGraphDelegateVisitor {
VisitSetLocation(instruction, idx, value);
}
- void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+ void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
HInstruction* cls = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(cls, &field));
}
- void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
HInstruction* cls = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
size_t idx = heap_location_collector_.GetFieldHeapLocation(cls, &field);
VisitSetLocation(instruction, idx, instruction->InputAt(1));
}
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+ void VisitArrayGet(HArrayGet* instruction) override {
VisitGetLocation(instruction, heap_location_collector_.GetArrayHeapLocation(instruction));
}
- void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ void VisitArraySet(HArraySet* instruction) override {
size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction);
VisitSetLocation(instruction, idx, instruction->InputAt(2));
}
@@ -743,15 +743,15 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitReturn(HReturn* instruction) OVERRIDE {
+ void VisitReturn(HReturn* instruction) override {
HandleExit(instruction->GetBlock());
}
- void VisitReturnVoid(HReturnVoid* return_void) OVERRIDE {
+ void VisitReturnVoid(HReturnVoid* return_void) override {
HandleExit(return_void->GetBlock());
}
- void VisitThrow(HThrow* throw_instruction) OVERRIDE {
+ void VisitThrow(HThrow* throw_instruction) override {
HandleExit(throw_instruction->GetBlock());
}
@@ -777,35 +777,35 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitInvoke(HInvoke* invoke) OVERRIDE {
+ void VisitInvoke(HInvoke* invoke) override {
HandleInvoke(invoke);
}
- void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+ void VisitClinitCheck(HClinitCheck* clinit) override {
HandleInvoke(clinit);
}
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+ void VisitNewInstance(HNewInstance* new_instance) override {
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance);
if (ref_info == nullptr) {
// new_instance isn't used for field accesses. No need to process it.
@@ -829,7 +829,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitNewArray(HNewArray* new_array) OVERRIDE {
+ void VisitNewArray(HNewArray* new_array) override {
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_array);
if (ref_info == nullptr) {
// new_array isn't used for array accesses. No need to process it.
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 408386bd82..f7ba41a1af 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -35,7 +35,7 @@ class LoadStoreElimination : public HOptimization {
side_effects_(side_effects),
lsa_(lsa) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index d355cedb35..2ae3683ffa 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -87,14 +87,14 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
// Maximum number of instructions to be created as a result of full unrolling.
static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35;
- bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const override {
return analysis_info->HasLongTypeInstructions() ||
IsLoopTooBig(analysis_info,
kScalarHeuristicMaxBodySizeInstr,
kScalarHeuristicMaxBodySizeBlocks);
}
- uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const override {
int64_t trip_count = analysis_info->GetTripCount();
// Unroll only loops with known trip count.
if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
@@ -108,9 +108,9 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
return desired_unrolling_factor;
}
- bool IsLoopPeelingEnabled() const OVERRIDE { return true; }
+ bool IsLoopPeelingEnabled() const override { return true; }
- bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const override {
int64_t trip_count = analysis_info->GetTripCount();
// We assume that trip count is known.
DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount);
@@ -144,7 +144,7 @@ class Arm64LoopHelper : public ArchDefaultLoopHelper {
// Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8;
- bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE {
+ bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const override {
return IsLoopTooBig(loop_analysis_info,
kArm64ScalarHeuristicMaxBodySizeInstr,
kArm64ScalarHeuristicMaxBodySizeBlocks);
@@ -153,7 +153,7 @@ class Arm64LoopHelper : public ArchDefaultLoopHelper {
uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
int64_t trip_count,
uint32_t max_peel,
- uint32_t vector_length) const OVERRIDE {
+ uint32_t vector_length) const override {
// Don't unroll with insufficient iterations.
// TODO: Unroll loops with unknown trip count.
DCHECK_NE(vector_length, 0u);
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 644b740ed4..2b202fda75 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -43,7 +43,7 @@ class HLoopOptimization : public HOptimization {
OptimizingCompilerStats* stats,
const char* name = kLoopOptimizationPassName);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoopOptimizationPassName = "loop_optimization";
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d88b0364f5..748e21f510 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1529,12 +1529,12 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
private: \
H##type& operator=(const H##type&) = delete; \
public: \
- const char* DebugName() const OVERRIDE { return #type; } \
- HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE { \
+ const char* DebugName() const override { return #type; } \
+ HInstruction* Clone(ArenaAllocator* arena) const override { \
DCHECK(IsClonable()); \
return new (arena) H##type(*this->As##type()); \
} \
- void Accept(HGraphVisitor* visitor) OVERRIDE
+ void Accept(HGraphVisitor* visitor) override
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
private: \
@@ -2595,7 +2595,7 @@ class HBackwardInstructionIterator : public ValueObject {
class HVariableInputSizeInstruction : public HInstruction {
public:
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
}
@@ -2645,7 +2645,7 @@ class HExpression : public HInstruction {
virtual ~HExpression() {}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
}
@@ -2667,7 +2667,7 @@ class HExpression<0> : public HInstruction {
virtual ~HExpression() {}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
return ArrayRef<HUserRecord<HInstruction*>>();
}
@@ -2680,13 +2680,13 @@ class HExpression<0> : public HInstruction {
// Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
// instruction that branches to the exit block.
-class HReturnVoid FINAL : public HExpression<0> {
+class HReturnVoid final : public HExpression<0> {
public:
explicit HReturnVoid(uint32_t dex_pc = kNoDexPc)
: HExpression(kReturnVoid, SideEffects::None(), dex_pc) {
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
DECLARE_INSTRUCTION(ReturnVoid);
@@ -2696,14 +2696,14 @@ class HReturnVoid FINAL : public HExpression<0> {
// Represents dex's RETURN opcodes. A HReturn is a control flow
// instruction that branches to the exit block.
-class HReturn FINAL : public HExpression<1> {
+class HReturn final : public HExpression<1> {
public:
explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc)
: HExpression(kReturn, SideEffects::None(), dex_pc) {
SetRawInputAt(0, value);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
DECLARE_INSTRUCTION(Return);
@@ -2711,7 +2711,7 @@ class HReturn FINAL : public HExpression<1> {
DEFAULT_COPY_CONSTRUCTOR(Return);
};
-class HPhi FINAL : public HVariableInputSizeInstruction {
+class HPhi final : public HVariableInputSizeInstruction {
public:
HPhi(ArenaAllocator* allocator,
uint32_t reg_number,
@@ -2735,7 +2735,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
SetPackedFlag<kFlagCanBeNull>(true);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
// Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
static DataType::Type ToPhiType(DataType::Type type) {
@@ -2755,7 +2755,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
SetPackedField<TypeField>(new_type);
}
- bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+ bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
uint32_t GetRegNumber() const { return reg_number_; }
@@ -2813,13 +2813,13 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
// The exit instruction is the only instruction of the exit block.
// Instructions aborting the method (HThrow and HReturn) must branch to the
// exit block.
-class HExit FINAL : public HExpression<0> {
+class HExit final : public HExpression<0> {
public:
explicit HExit(uint32_t dex_pc = kNoDexPc)
: HExpression(kExit, SideEffects::None(), dex_pc) {
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
DECLARE_INSTRUCTION(Exit);
@@ -2828,14 +2828,14 @@ class HExit FINAL : public HExpression<0> {
};
// Jumps from one block to another.
-class HGoto FINAL : public HExpression<0> {
+class HGoto final : public HExpression<0> {
public:
explicit HGoto(uint32_t dex_pc = kNoDexPc)
: HExpression(kGoto, SideEffects::None(), dex_pc) {
}
- bool IsClonable() const OVERRIDE { return true; }
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool IsControlFlow() const override { return true; }
HBasicBlock* GetSuccessor() const {
return GetBlock()->GetSingleSuccessor();
@@ -2853,7 +2853,7 @@ class HConstant : public HExpression<0> {
: HExpression(kind, type, SideEffects::None(), dex_pc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
// Is this constant -1 in the arithmetic sense?
virtual bool IsMinusOne() const { return false; }
@@ -2872,15 +2872,15 @@ class HConstant : public HExpression<0> {
DEFAULT_COPY_CONSTRUCTOR(Constant);
};
-class HNullConstant FINAL : public HConstant {
+class HNullConstant final : public HConstant {
public:
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- uint64_t GetValueAsUint64() const OVERRIDE { return 0; }
+ uint64_t GetValueAsUint64() const override { return 0; }
- size_t ComputeHashCode() const OVERRIDE { return 0; }
+ size_t ComputeHashCode() const override { return 0; }
// The null constant representation is a 0-bit pattern.
virtual bool IsZeroBitPattern() const { return true; }
@@ -2900,25 +2900,25 @@ class HNullConstant FINAL : public HConstant {
// Constants of the type int. Those can be from Dex instructions, or
// synthesized (for example with the if-eqz instruction).
-class HIntConstant FINAL : public HConstant {
+class HIntConstant final : public HConstant {
public:
int32_t GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE {
+ uint64_t GetValueAsUint64() const override {
return static_cast<uint64_t>(static_cast<uint32_t>(value_));
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsIntConstant()) << other->DebugName();
return other->AsIntConstant()->value_ == value_;
}
- size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
+ size_t ComputeHashCode() const override { return GetValue(); }
- bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
- bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
- bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
- bool IsOne() const OVERRIDE { return GetValue() == 1; }
+ bool IsMinusOne() const override { return GetValue() == -1; }
+ bool IsArithmeticZero() const override { return GetValue() == 0; }
+ bool IsZeroBitPattern() const override { return GetValue() == 0; }
+ bool IsOne() const override { return GetValue() == 1; }
// Integer constants are used to encode Boolean values as well,
// where 1 means true and 0 means false.
@@ -2946,23 +2946,23 @@ class HIntConstant FINAL : public HConstant {
ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
};
-class HLongConstant FINAL : public HConstant {
+class HLongConstant final : public HConstant {
public:
int64_t GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE { return value_; }
+ uint64_t GetValueAsUint64() const override { return value_; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsLongConstant()) << other->DebugName();
return other->AsLongConstant()->value_ == value_;
}
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
- bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
- bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
- bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
- bool IsOne() const OVERRIDE { return GetValue() == 1; }
+ bool IsMinusOne() const override { return GetValue() == -1; }
+ bool IsArithmeticZero() const override { return GetValue() == 0; }
+ bool IsZeroBitPattern() const override { return GetValue() == 0; }
+ bool IsOne() const override { return GetValue() == 1; }
DECLARE_INSTRUCTION(LongConstant);
@@ -2980,25 +2980,25 @@ class HLongConstant FINAL : public HConstant {
friend class HGraph;
};
-class HFloatConstant FINAL : public HConstant {
+class HFloatConstant final : public HConstant {
public:
float GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE {
+ uint64_t GetValueAsUint64() const override {
return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsFloatConstant()) << other->DebugName();
return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
}
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
- bool IsMinusOne() const OVERRIDE {
+ bool IsMinusOne() const override {
return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
}
- bool IsArithmeticZero() const OVERRIDE {
+ bool IsArithmeticZero() const override {
return std::fpclassify(value_) == FP_ZERO;
}
bool IsArithmeticPositiveZero() const {
@@ -3007,10 +3007,10 @@ class HFloatConstant FINAL : public HConstant {
bool IsArithmeticNegativeZero() const {
return IsArithmeticZero() && std::signbit(value_);
}
- bool IsZeroBitPattern() const OVERRIDE {
+ bool IsZeroBitPattern() const override {
return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(0.0f);
}
- bool IsOne() const OVERRIDE {
+ bool IsOne() const override {
return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
}
bool IsNaN() const {
@@ -3039,23 +3039,23 @@ class HFloatConstant FINAL : public HConstant {
friend class HGraph;
};
-class HDoubleConstant FINAL : public HConstant {
+class HDoubleConstant final : public HConstant {
public:
double GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
+ uint64_t GetValueAsUint64() const override { return bit_cast<uint64_t, double>(value_); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsDoubleConstant()) << other->DebugName();
return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
}
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
- bool IsMinusOne() const OVERRIDE {
+ bool IsMinusOne() const override {
return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
}
- bool IsArithmeticZero() const OVERRIDE {
+ bool IsArithmeticZero() const override {
return std::fpclassify(value_) == FP_ZERO;
}
bool IsArithmeticPositiveZero() const {
@@ -3064,10 +3064,10 @@ class HDoubleConstant FINAL : public HConstant {
bool IsArithmeticNegativeZero() const {
return IsArithmeticZero() && std::signbit(value_);
}
- bool IsZeroBitPattern() const OVERRIDE {
+ bool IsZeroBitPattern() const override {
return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((0.0));
}
- bool IsOne() const OVERRIDE {
+ bool IsOne() const override {
return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
}
bool IsNaN() const {
@@ -3098,15 +3098,15 @@ class HDoubleConstant FINAL : public HConstant {
// Conditional branch. A block ending with an HIf instruction must have
// two successors.
-class HIf FINAL : public HExpression<1> {
+class HIf final : public HExpression<1> {
public:
explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(kIf, SideEffects::None(), dex_pc) {
SetRawInputAt(0, input);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool IsControlFlow() const override { return true; }
HBasicBlock* IfTrueSuccessor() const {
return GetBlock()->GetSuccessors()[0];
@@ -3128,7 +3128,7 @@ class HIf FINAL : public HExpression<1> {
// non-exceptional control flow.
// Normal-flow successor is stored at index zero, exception handlers under
// higher indices in no particular order.
-class HTryBoundary FINAL : public HExpression<0> {
+class HTryBoundary final : public HExpression<0> {
public:
enum class BoundaryKind {
kEntry,
@@ -3141,7 +3141,7 @@ class HTryBoundary FINAL : public HExpression<0> {
SetPackedField<BoundaryKindField>(kind);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
// Returns the block's non-exceptional successor (index zero).
HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
@@ -3187,7 +3187,7 @@ class HTryBoundary FINAL : public HExpression<0> {
};
// Deoptimize to interpreter, upon checking a condition.
-class HDeoptimize FINAL : public HVariableInputSizeInstruction {
+class HDeoptimize final : public HVariableInputSizeInstruction {
public:
// Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
// across.
@@ -3207,7 +3207,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
SetRawInputAt(0, cond);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
// Use this constructor when the `HDeoptimize` guards an instruction, and any user
// that relies on the deoptimization to pass should have its input be the `HDeoptimize`
@@ -3233,15 +3233,15 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
SetRawInputAt(1, guard);
}
- bool CanBeMoved() const OVERRIDE { return GetPackedFlag<kFieldCanBeMoved>(); }
+ bool CanBeMoved() const override { return GetPackedFlag<kFieldCanBeMoved>(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return (other->CanBeMoved() == CanBeMoved()) && (other->AsDeoptimize()->GetKind() == GetKind());
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); }
@@ -3281,7 +3281,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
// if it's true, starts to do deoptimization.
// It has a 4-byte slot on stack.
// TODO: allocate a register for this flag.
-class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
+class HShouldDeoptimizeFlag final : public HVariableInputSizeInstruction {
public:
// CHA guards are only optimized in a separate pass and it has no side effects
// with regard to other passes.
@@ -3299,7 +3299,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
// further guard elimination/motion since a guard might have been used for justification
// of the elimination of another guard. Therefore, we pretend this guard cannot be moved
// to avoid other optimizations trying to move it.
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
@@ -3310,7 +3310,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
// Represents the ArtMethod that was passed as a first argument to
// the method. It is used by instructions that depend on it, like
// instructions that work with the dex cache.
-class HCurrentMethod FINAL : public HExpression<0> {
+class HCurrentMethod final : public HExpression<0> {
public:
explicit HCurrentMethod(DataType::Type type, uint32_t dex_pc = kNoDexPc)
: HExpression(kCurrentMethod, type, SideEffects::None(), dex_pc) {
@@ -3324,7 +3324,7 @@ class HCurrentMethod FINAL : public HExpression<0> {
// Fetches an ArtMethod from the virtual table or the interface method table
// of a class.
-class HClassTableGet FINAL : public HExpression<1> {
+class HClassTableGet final : public HExpression<1> {
public:
enum class TableKind {
kVTable,
@@ -3342,9 +3342,9 @@ class HClassTableGet FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other) const override {
return other->AsClassTableGet()->GetIndex() == index_ &&
other->AsClassTableGet()->GetPackedFields() == GetPackedFields();
}
@@ -3373,7 +3373,7 @@ class HClassTableGet FINAL : public HExpression<1> {
// PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
// have one successor for each entry in the switch table, and the final successor
// will be the block containing the next Dex opcode.
-class HPackedSwitch FINAL : public HExpression<1> {
+class HPackedSwitch final : public HExpression<1> {
public:
HPackedSwitch(int32_t start_value,
uint32_t num_entries,
@@ -3385,9 +3385,9 @@ class HPackedSwitch FINAL : public HExpression<1> {
SetRawInputAt(0, input);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
int32_t GetStartValue() const { return start_value_; }
@@ -3418,13 +3418,13 @@ class HUnaryOperation : public HExpression<1> {
}
// All of the UnaryOperation instructions are clonable.
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
HInstruction* GetInput() const { return InputAt(0); }
DataType::Type GetResultType() const { return GetType(); }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -3459,7 +3459,7 @@ class HBinaryOperation : public HExpression<2> {
}
// All of the BinaryOperation instructions are clonable.
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
HInstruction* GetLeft() const { return InputAt(0); }
HInstruction* GetRight() const { return InputAt(1); }
@@ -3499,8 +3499,8 @@ class HBinaryOperation : public HExpression<2> {
}
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -3581,7 +3581,7 @@ class HCondition : public HBinaryOperation {
ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return GetPackedFields() == other->AsCondition()->GetPackedFields();
}
@@ -3638,42 +3638,42 @@ class HCondition : public HBinaryOperation {
};
// Instruction to check if two inputs are equal to each other.
-class HEqual FINAL : public HCondition {
+class HEqual final : public HCondition {
public:
HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kEqual, first, second, dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
- HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HNullConstant* y ATTRIBUTE_UNUSED) const override {
return MakeConstantCondition(true, GetDexPc());
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HEqual instruction; evaluate it as
// `Compare(x, y) == 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0),
GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(Equal);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondEQ;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondNE;
}
@@ -3684,42 +3684,42 @@ class HEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x == y; }
};
-class HNotEqual FINAL : public HCondition {
+class HNotEqual final : public HCondition {
public:
HNotEqual(HInstruction* first, HInstruction* second,
uint32_t dex_pc = kNoDexPc)
: HCondition(kNotEqual, first, second, dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
- HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HNullConstant* y ATTRIBUTE_UNUSED) const override {
return MakeConstantCondition(false, GetDexPc());
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HNotEqual instruction; evaluate it as
// `Compare(x, y) != 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(NotEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondNE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondEQ;
}
@@ -3730,36 +3730,36 @@ class HNotEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x != y; }
};
-class HLessThan FINAL : public HCondition {
+class HLessThan final : public HCondition {
public:
HLessThan(HInstruction* first, HInstruction* second,
uint32_t dex_pc = kNoDexPc)
: HCondition(kLessThan, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HLessThan instruction; evaluate it as
// `Compare(x, y) < 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(LessThan);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondLT;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondGE;
}
@@ -3770,36 +3770,36 @@ class HLessThan FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x < y; }
};
-class HLessThanOrEqual FINAL : public HCondition {
+class HLessThanOrEqual final : public HCondition {
public:
HLessThanOrEqual(HInstruction* first, HInstruction* second,
uint32_t dex_pc = kNoDexPc)
: HCondition(kLessThanOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HLessThanOrEqual instruction; evaluate it as
// `Compare(x, y) <= 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(LessThanOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondLE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondGT;
}
@@ -3810,35 +3810,35 @@ class HLessThanOrEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x <= y; }
};
-class HGreaterThan FINAL : public HCondition {
+class HGreaterThan final : public HCondition {
public:
HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kGreaterThan, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HGreaterThan instruction; evaluate it as
// `Compare(x, y) > 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThan);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondGT;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondLE;
}
@@ -3849,35 +3849,35 @@ class HGreaterThan FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x > y; }
};
-class HGreaterThanOrEqual FINAL : public HCondition {
+class HGreaterThanOrEqual final : public HCondition {
public:
HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kGreaterThanOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HGreaterThanOrEqual instruction; evaluate it as
// `Compare(x, y) >= 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThanOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondGE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondLT;
}
@@ -3888,36 +3888,36 @@ class HGreaterThanOrEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x >= y; }
};
-class HBelow FINAL : public HCondition {
+class HBelow final : public HCondition {
public:
HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kBelow, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(Below);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondB;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondAE;
}
@@ -3930,36 +3930,36 @@ class HBelow FINAL : public HCondition {
}
};
-class HBelowOrEqual FINAL : public HCondition {
+class HBelowOrEqual final : public HCondition {
public:
HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kBelowOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(BelowOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondBE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondA;
}
@@ -3972,36 +3972,36 @@ class HBelowOrEqual FINAL : public HCondition {
}
};
-class HAbove FINAL : public HCondition {
+class HAbove final : public HCondition {
public:
HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kAbove, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(Above);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondA;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondBE;
}
@@ -4014,36 +4014,36 @@ class HAbove FINAL : public HCondition {
}
};
-class HAboveOrEqual FINAL : public HCondition {
+class HAboveOrEqual final : public HCondition {
public:
HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kAboveOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(AboveOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondAE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondB;
}
@@ -4058,7 +4058,7 @@ class HAboveOrEqual FINAL : public HCondition {
// Instruction to check how two inputs compare to each other.
// Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
-class HCompare FINAL : public HBinaryOperation {
+class HCompare final : public HBinaryOperation {
public:
// Note that `comparison_type` is the type of comparison performed
// between the comparison's inputs, not the type of the instantiated
@@ -4090,7 +4090,7 @@ class HCompare FINAL : public HBinaryOperation {
return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y);
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
// Note that there is no "cmp-int" Dex instruction so we shouldn't
// reach this code path when processing a freshly built HIR
// graph. However HCompare integer instructions can be synthesized
@@ -4098,17 +4098,17 @@ class HCompare FINAL : public HBinaryOperation {
// IntegerSignum intrinsics, so we have to handle this case.
return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return GetPackedFields() == other->AsCompare()->GetPackedFields();
}
@@ -4147,7 +4147,7 @@ class HCompare FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Compare);
};
-class HNewInstance FINAL : public HExpression<1> {
+class HNewInstance final : public HExpression<1> {
public:
HNewInstance(HInstruction* cls,
uint32_t dex_pc,
@@ -4166,16 +4166,16 @@ class HNewInstance FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
// Calls runtime so needs an environment.
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
// Can throw errors when out-of-memory or if it's not instantiable/accessible.
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
bool NeedsChecks() const {
return entrypoint_ == kQuickAllocObjectWithChecks;
@@ -4183,7 +4183,7 @@ class HNewInstance FINAL : public HExpression<1> {
bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
@@ -4237,7 +4237,7 @@ enum IntrinsicExceptions {
class HInvoke : public HVariableInputSizeInstruction {
public:
- bool NeedsEnvironment() const OVERRIDE;
+ bool NeedsEnvironment() const override;
void SetArgumentAt(size_t index, HInstruction* argument) {
SetRawInputAt(index, argument);
@@ -4270,15 +4270,15 @@ class HInvoke : public HVariableInputSizeInstruction {
void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
- bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
+ bool CanThrow() const override { return GetPackedFlag<kFlagCanThrow>(); }
void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); }
- bool AlwaysThrows() const OVERRIDE { return GetPackedFlag<kFlagAlwaysThrows>(); }
+ bool AlwaysThrows() const override { return GetPackedFlag<kFlagAlwaysThrows>(); }
- bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); }
+ bool CanBeMoved() const override { return IsIntrinsic() && !DoesAnyWrite(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_;
}
@@ -4344,7 +4344,7 @@ class HInvoke : public HVariableInputSizeInstruction {
uint32_t intrinsic_optimizations_;
};
-class HInvokeUnresolved FINAL : public HInvoke {
+class HInvokeUnresolved final : public HInvoke {
public:
HInvokeUnresolved(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4363,7 +4363,7 @@ class HInvokeUnresolved FINAL : public HInvoke {
invoke_type) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
DECLARE_INSTRUCTION(InvokeUnresolved);
@@ -4371,7 +4371,7 @@ class HInvokeUnresolved FINAL : public HInvoke {
DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved);
};
-class HInvokePolymorphic FINAL : public HInvoke {
+class HInvokePolymorphic final : public HInvoke {
public:
HInvokePolymorphic(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4389,7 +4389,7 @@ class HInvokePolymorphic FINAL : public HInvoke {
kVirtual) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
DECLARE_INSTRUCTION(InvokePolymorphic);
@@ -4397,7 +4397,7 @@ class HInvokePolymorphic FINAL : public HInvoke {
DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
};
-class HInvokeCustom FINAL : public HInvoke {
+class HInvokeCustom final : public HInvoke {
public:
HInvokeCustom(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4418,7 +4418,7 @@ class HInvokeCustom FINAL : public HInvoke {
uint32_t GetCallSiteIndex() const { return call_site_index_; }
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
DECLARE_INSTRUCTION(InvokeCustom);
@@ -4429,7 +4429,7 @@ class HInvokeCustom FINAL : public HInvoke {
uint32_t call_site_index_;
};
-class HInvokeStaticOrDirect FINAL : public HInvoke {
+class HInvokeStaticOrDirect final : public HInvoke {
public:
// Requirements of this method call regarding the class
// initialization (clinit) check of its declaring class.
@@ -4518,7 +4518,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
void SetDispatchInfo(const DispatchInfo& dispatch_info) {
bool had_current_method_input = HasCurrentMethodInput();
@@ -4548,7 +4548,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
ArrayRef<HUserRecord<HInstruction*>> input_records = HInvoke::GetInputRecords();
if (kIsDebugBuild && IsStaticWithExplicitClinitCheck()) {
DCHECK(!input_records.empty());
@@ -4566,13 +4566,13 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
return input_records;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
}
- bool CanBeNull() const OVERRIDE {
+ bool CanBeNull() const override {
return GetType() == DataType::Type::kReference && !IsStringInit();
}
@@ -4587,7 +4587,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
+ bool NeedsDexCacheOfDeclaringClass() const override;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
bool HasPcRelativeMethodLoadKind() const {
@@ -4688,7 +4688,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
-class HInvokeVirtual FINAL : public HInvoke {
+class HInvokeVirtual final : public HInvoke {
public:
HInvokeVirtual(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4709,9 +4709,9 @@ class HInvokeVirtual FINAL : public HInvoke {
vtable_index_(vtable_index) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool CanBeNull() const OVERRIDE {
+ bool CanBeNull() const override {
switch (GetIntrinsic()) {
case Intrinsics::kThreadCurrentThread:
case Intrinsics::kStringBufferAppend:
@@ -4724,7 +4724,7 @@ class HInvokeVirtual FINAL : public HInvoke {
}
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
// TODO: Add implicit null checks in intrinsics.
return (obj == InputAt(0)) && !IsIntrinsic();
}
@@ -4741,7 +4741,7 @@ class HInvokeVirtual FINAL : public HInvoke {
const uint32_t vtable_index_;
};
-class HInvokeInterface FINAL : public HInvoke {
+class HInvokeInterface final : public HInvoke {
public:
HInvokeInterface(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4762,14 +4762,14 @@ class HInvokeInterface FINAL : public HInvoke {
imt_index_(imt_index) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
// TODO: Add implicit null checks in intrinsics.
return (obj == InputAt(0)) && !IsIntrinsic();
}
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ bool NeedsDexCacheOfDeclaringClass() const override {
// The assembly stub currently needs it.
return true;
}
@@ -4786,7 +4786,7 @@ class HInvokeInterface FINAL : public HInvoke {
const uint32_t imt_index_;
};
-class HNeg FINAL : public HUnaryOperation {
+class HNeg final : public HUnaryOperation {
public:
HNeg(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kNeg, result_type, input, dex_pc) {
@@ -4795,16 +4795,16 @@ class HNeg FINAL : public HUnaryOperation {
template <typename T> static T Compute(T x) { return -x; }
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x) const override {
return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x) const override {
return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue()), GetDexPc());
}
@@ -4814,7 +4814,7 @@ class HNeg FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Neg);
};
-class HNewArray FINAL : public HExpression<2> {
+class HNewArray final : public HExpression<2> {
public:
HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
: HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
@@ -4822,15 +4822,15 @@ class HNewArray FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
// Calls runtime so needs an environment.
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
// May throw NegativeArraySizeException, OutOfMemoryError, etc.
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
HLoadClass* GetLoadClass() const {
DCHECK(InputAt(0)->IsLoadClass());
@@ -4847,7 +4847,7 @@ class HNewArray FINAL : public HExpression<2> {
DEFAULT_COPY_CONSTRUCTOR(NewArray);
};
-class HAdd FINAL : public HBinaryOperation {
+class HAdd final : public HBinaryOperation {
public:
HAdd(DataType::Type result_type,
HInstruction* left,
@@ -4856,23 +4856,23 @@ class HAdd FINAL : public HBinaryOperation {
: HBinaryOperation(kAdd, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x + y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -4883,7 +4883,7 @@ class HAdd FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Add);
};
-class HSub FINAL : public HBinaryOperation {
+class HSub final : public HBinaryOperation {
public:
HSub(DataType::Type result_type,
HInstruction* left,
@@ -4894,19 +4894,19 @@ class HSub FINAL : public HBinaryOperation {
template <typename T> static T Compute(T x, T y) { return x - y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -4917,7 +4917,7 @@ class HSub FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Sub);
};
-class HMul FINAL : public HBinaryOperation {
+class HMul final : public HBinaryOperation {
public:
HMul(DataType::Type result_type,
HInstruction* left,
@@ -4926,23 +4926,23 @@ class HMul FINAL : public HBinaryOperation {
: HBinaryOperation(kMul, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x * y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -4953,7 +4953,7 @@ class HMul FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Mul);
};
-class HDiv FINAL : public HBinaryOperation {
+class HDiv final : public HBinaryOperation {
public:
HDiv(DataType::Type result_type,
HInstruction* left,
@@ -4978,19 +4978,19 @@ class HDiv FINAL : public HBinaryOperation {
return x / y;
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -5001,7 +5001,7 @@ class HDiv FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Div);
};
-class HRem FINAL : public HBinaryOperation {
+class HRem final : public HBinaryOperation {
public:
HRem(DataType::Type result_type,
HInstruction* left,
@@ -5026,19 +5026,19 @@ class HRem FINAL : public HBinaryOperation {
return std::fmod(x, y);
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -5049,7 +5049,7 @@ class HRem FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Rem);
};
-class HMin FINAL : public HBinaryOperation {
+class HMin final : public HBinaryOperation {
public:
HMin(DataType::Type result_type,
HInstruction* left,
@@ -5057,26 +5057,26 @@ class HMin FINAL : public HBinaryOperation {
uint32_t dex_pc)
: HBinaryOperation(kMin, result_type, left, right, SideEffects::None(), dex_pc) {}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
// Evaluation for integral values.
template <typename T> static T ComputeIntegral(T x, T y) {
return (x <= y) ? x : y;
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
// TODO: Evaluation for floating-point values.
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
DECLARE_INSTRUCTION(Min);
@@ -5084,7 +5084,7 @@ class HMin FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Min);
};
-class HMax FINAL : public HBinaryOperation {
+class HMax final : public HBinaryOperation {
public:
HMax(DataType::Type result_type,
HInstruction* left,
@@ -5092,26 +5092,26 @@ class HMax FINAL : public HBinaryOperation {
uint32_t dex_pc)
: HBinaryOperation(kMax, result_type, left, right, SideEffects::None(), dex_pc) {}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
// Evaluation for integral values.
template <typename T> static T ComputeIntegral(T x, T y) {
return (x >= y) ? x : y;
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
// TODO: Evaluation for floating-point values.
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
DECLARE_INSTRUCTION(Max);
@@ -5119,7 +5119,7 @@ class HMax FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Max);
};
-class HAbs FINAL : public HUnaryOperation {
+class HAbs final : public HUnaryOperation {
public:
HAbs(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kAbs, result_type, input, dex_pc) {}
@@ -5139,17 +5139,17 @@ class HAbs FINAL : public HUnaryOperation {
return bit_cast<T, S>(bits & std::numeric_limits<S>::max());
}
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x) const override {
return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
ComputeFP<float, int32_t>(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
ComputeFP<double, int64_t>(x->GetValue()), GetDexPc());
}
@@ -5160,7 +5160,7 @@ class HAbs FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Abs);
};
-class HDivZeroCheck FINAL : public HExpression<1> {
+class HDivZeroCheck final : public HExpression<1> {
public:
// `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
// constructor.
@@ -5169,15 +5169,15 @@ class HDivZeroCheck FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DECLARE_INSTRUCTION(DivZeroCheck);
@@ -5185,7 +5185,7 @@ class HDivZeroCheck FINAL : public HExpression<1> {
DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck);
};
-class HShl FINAL : public HBinaryOperation {
+class HShl final : public HBinaryOperation {
public:
HShl(DataType::Type result_type,
HInstruction* value,
@@ -5201,26 +5201,26 @@ class HShl FINAL : public HBinaryOperation {
return value << (distance & max_shift_distance);
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5231,7 +5231,7 @@ class HShl FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Shl);
};
-class HShr FINAL : public HBinaryOperation {
+class HShr final : public HBinaryOperation {
public:
HShr(DataType::Type result_type,
HInstruction* value,
@@ -5247,26 +5247,26 @@ class HShr FINAL : public HBinaryOperation {
return value >> (distance & max_shift_distance);
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5277,7 +5277,7 @@ class HShr FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Shr);
};
-class HUShr FINAL : public HBinaryOperation {
+class HUShr final : public HBinaryOperation {
public:
HUShr(DataType::Type result_type,
HInstruction* value,
@@ -5295,26 +5295,26 @@ class HUShr FINAL : public HBinaryOperation {
return static_cast<T>(ux >> (distance & max_shift_distance));
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5325,7 +5325,7 @@ class HUShr FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(UShr);
};
-class HAnd FINAL : public HBinaryOperation {
+class HAnd final : public HBinaryOperation {
public:
HAnd(DataType::Type result_type,
HInstruction* left,
@@ -5334,25 +5334,25 @@ class HAnd FINAL : public HBinaryOperation {
: HBinaryOperation(kAnd, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x & y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5363,7 +5363,7 @@ class HAnd FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(And);
};
-class HOr FINAL : public HBinaryOperation {
+class HOr final : public HBinaryOperation {
public:
HOr(DataType::Type result_type,
HInstruction* left,
@@ -5372,25 +5372,25 @@ class HOr FINAL : public HBinaryOperation {
: HBinaryOperation(kOr, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x | y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5401,7 +5401,7 @@ class HOr FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Or);
};
-class HXor FINAL : public HBinaryOperation {
+class HXor final : public HBinaryOperation {
public:
HXor(DataType::Type result_type,
HInstruction* left,
@@ -5410,25 +5410,25 @@ class HXor FINAL : public HBinaryOperation {
: HBinaryOperation(kXor, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x ^ y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5439,7 +5439,7 @@ class HXor FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Xor);
};
-class HRor FINAL : public HBinaryOperation {
+class HRor final : public HBinaryOperation {
public:
HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance)
: HBinaryOperation(kRor, result_type, value, distance) {
@@ -5460,26 +5460,26 @@ class HRor FINAL : public HBinaryOperation {
}
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5492,7 +5492,7 @@ class HRor FINAL : public HBinaryOperation {
// The value of a parameter in this method. Its location depends on
// the calling convention.
-class HParameterValue FINAL : public HExpression<0> {
+class HParameterValue final : public HExpression<0> {
public:
HParameterValue(const DexFile& dex_file,
dex::TypeIndex type_index,
@@ -5512,7 +5512,7 @@ class HParameterValue FINAL : public HExpression<0> {
uint8_t GetIndex() const { return index_; }
bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); }
- bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+ bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
DECLARE_INSTRUCTION(ParameterValue);
@@ -5535,30 +5535,30 @@ class HParameterValue FINAL : public HExpression<0> {
const uint8_t index_;
};
-class HNot FINAL : public HUnaryOperation {
+class HNot final : public HUnaryOperation {
public:
HNot(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kNot, result_type, input, dex_pc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
template <typename T> static T Compute(T x) { return ~x; }
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x) const override {
return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
- HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5569,14 +5569,14 @@ class HNot FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Not);
};
-class HBooleanNot FINAL : public HUnaryOperation {
+class HBooleanNot final : public HUnaryOperation {
public:
explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kBooleanNot, DataType::Type::kBool, input, dex_pc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -5585,18 +5585,18 @@ class HBooleanNot FINAL : public HUnaryOperation {
return !x;
}
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for long values";
UNREACHABLE();
}
- HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
- HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5607,7 +5607,7 @@ class HBooleanNot FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(BooleanNot);
};
-class HTypeConversion FINAL : public HExpression<1> {
+class HTypeConversion final : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
@@ -5621,9 +5621,9 @@ class HTypeConversion FINAL : public HExpression<1> {
DataType::Type GetInputType() const { return GetInput()->GetType(); }
DataType::Type GetResultType() const { return GetType(); }
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -5639,7 +5639,7 @@ class HTypeConversion FINAL : public HExpression<1> {
static constexpr uint32_t kNoRegNumber = -1;
-class HNullCheck FINAL : public HExpression<1> {
+class HNullCheck final : public HExpression<1> {
public:
// `HNullCheck` can trigger GC, as it may call the `NullPointerException`
// constructor.
@@ -5648,17 +5648,17 @@ class HNullCheck FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
DECLARE_INSTRUCTION(NullCheck);
@@ -5703,7 +5703,7 @@ class FieldInfo : public ValueObject {
const DexFile& dex_file_;
};
-class HInstanceFieldGet FINAL : public HExpression<1> {
+class HInstanceFieldGet final : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
ArtField* field,
@@ -5728,19 +5728,19 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return !IsVolatile(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
const HInstanceFieldGet* other_get = other->AsInstanceFieldGet();
return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
- size_t ComputeHashCode() const OVERRIDE {
+ size_t ComputeHashCode() const override {
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
@@ -5765,7 +5765,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
const FieldInfo field_info_;
};
-class HInstanceFieldSet FINAL : public HExpression<2> {
+class HInstanceFieldSet final : public HExpression<2> {
public:
HInstanceFieldSet(HInstruction* object,
HInstruction* value,
@@ -5792,9 +5792,9 @@ class HInstanceFieldSet FINAL : public HExpression<2> {
SetRawInputAt(1, value);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
@@ -5820,7 +5820,7 @@ class HInstanceFieldSet FINAL : public HExpression<2> {
const FieldInfo field_info_;
};
-class HArrayGet FINAL : public HExpression<2> {
+class HArrayGet final : public HExpression<2> {
public:
HArrayGet(HInstruction* array,
HInstruction* index,
@@ -5846,12 +5846,12 @@ class HArrayGet FINAL : public HExpression<2> {
SetRawInputAt(1, index);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
// TODO: We can be smarter here.
// Currently, unless the array is the result of NewArray, the array access is always
// preceded by some form of null NullCheck necessary for the bounds check, usually
@@ -5911,7 +5911,7 @@ class HArrayGet FINAL : public HExpression<2> {
"Too many packed fields.");
};
-class HArraySet FINAL : public HExpression<3> {
+class HArraySet final : public HExpression<3> {
public:
HArraySet(HInstruction* array,
HInstruction* index,
@@ -5943,17 +5943,17 @@ class HArraySet FINAL : public HExpression<3> {
SetRawInputAt(2, value);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
// We call a runtime method to throw ArrayStoreException.
return NeedsTypeCheck();
}
// Can throw ArrayStoreException.
- bool CanThrow() const OVERRIDE { return NeedsTypeCheck(); }
+ bool CanThrow() const override { return NeedsTypeCheck(); }
- bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
// TODO: Same as for ArrayGet.
return false;
}
@@ -6030,7 +6030,7 @@ class HArraySet FINAL : public HExpression<3> {
BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
};
-class HArrayLength FINAL : public HExpression<1> {
+class HArrayLength final : public HExpression<1> {
public:
HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false)
: HExpression(kArrayLength, DataType::Type::kInt32, SideEffects::None(), dex_pc) {
@@ -6040,12 +6040,12 @@ class HArrayLength FINAL : public HExpression<1> {
SetRawInputAt(0, array);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
return obj == InputAt(0);
}
@@ -6068,7 +6068,7 @@ class HArrayLength FINAL : public HExpression<1> {
"Too many packed fields.");
};
-class HBoundsCheck FINAL : public HExpression<2> {
+class HBoundsCheck final : public HExpression<2> {
public:
// `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
// constructor.
@@ -6083,15 +6083,15 @@ class HBoundsCheck FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
@@ -6106,16 +6106,16 @@ class HBoundsCheck FINAL : public HExpression<2> {
static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
};
-class HSuspendCheck FINAL : public HExpression<0> {
+class HSuspendCheck final : public HExpression<0> {
public:
explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc)
: HExpression(kSuspendCheck, SideEffects::CanTriggerGC(), dex_pc),
slow_path_(nullptr) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return true;
}
@@ -6141,7 +6141,7 @@ class HNativeDebugInfo : public HExpression<0> {
: HExpression<0>(kNativeDebugInfo, SideEffects::None(), dex_pc) {
}
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return true;
}
@@ -6154,7 +6154,7 @@ class HNativeDebugInfo : public HExpression<0> {
/**
* Instruction to load a Class object.
*/
-class HLoadClass FINAL : public HInstruction {
+class HLoadClass final : public HInstruction {
public:
// Determines how to load the Class.
enum class LoadKind {
@@ -6217,7 +6217,7 @@ class HLoadClass FINAL : public HInstruction {
SetPackedFlag<kFlagValidLoadedClassRTI>(false);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
void SetLoadKind(LoadKind load_kind);
@@ -6231,15 +6231,15 @@ class HLoadClass FINAL : public HInstruction {
GetLoadKind() == LoadKind::kBssEntry;
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
bool InstructionDataEquals(const HInstruction* other) const;
- size_t ComputeHashCode() const OVERRIDE { return type_index_.index_; }
+ size_t ComputeHashCode() const override { return type_index_.index_; }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return CanCallRuntime();
}
@@ -6257,7 +6257,7 @@ class HLoadClass FINAL : public HInstruction {
GetLoadKind() == LoadKind::kBssEntry;
}
- bool CanThrow() const OVERRIDE {
+ bool CanThrow() const override {
return NeedsAccessCheck() ||
MustGenerateClinitCheck() ||
// If the class is in the boot image, the lookup in the runtime call cannot throw.
@@ -6284,7 +6284,7 @@ class HLoadClass FINAL : public HInstruction {
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ bool NeedsDexCacheOfDeclaringClass() const override {
return GetLoadKind() == LoadKind::kRuntimeCall;
}
@@ -6311,7 +6311,7 @@ class HLoadClass FINAL : public HInstruction {
void AddSpecialInput(HInstruction* special_input);
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
@@ -6392,7 +6392,7 @@ inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
special_input->AddUseAt(this, 0);
}
-class HLoadString FINAL : public HInstruction {
+class HLoadString final : public HInstruction {
public:
// Determines how to load the String.
enum class LoadKind {
@@ -6436,7 +6436,7 @@ class HLoadString FINAL : public HInstruction {
SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
void SetLoadKind(LoadKind load_kind);
@@ -6466,15 +6466,15 @@ class HLoadString FINAL : public HInstruction {
string_ = str;
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
+ bool InstructionDataEquals(const HInstruction* other) const override;
- size_t ComputeHashCode() const OVERRIDE { return string_index_.index_; }
+ size_t ComputeHashCode() const override { return string_index_.index_; }
// Will call the runtime if we need to load the string through
// the dex cache and the string is not guaranteed to be there yet.
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
LoadKind load_kind = GetLoadKind();
if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
load_kind == LoadKind::kBootImageRelRo ||
@@ -6485,12 +6485,12 @@ class HLoadString FINAL : public HInstruction {
return true;
}
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ bool NeedsDexCacheOfDeclaringClass() const override {
return GetLoadKind() == LoadKind::kRuntimeCall;
}
- bool CanBeNull() const OVERRIDE { return false; }
- bool CanThrow() const OVERRIDE { return NeedsEnvironment(); }
+ bool CanBeNull() const override { return false; }
+ bool CanThrow() const override { return NeedsEnvironment(); }
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
@@ -6499,7 +6499,7 @@ class HLoadString FINAL : public HInstruction {
void AddSpecialInput(HInstruction* special_input);
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
@@ -6561,7 +6561,7 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
special_input->AddUseAt(this, 0);
}
-class HLoadMethodHandle FINAL : public HInstruction {
+class HLoadMethodHandle final : public HInstruction {
public:
HLoadMethodHandle(HCurrentMethod* current_method,
uint16_t method_handle_idx,
@@ -6577,12 +6577,12 @@ class HLoadMethodHandle FINAL : public HInstruction {
}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
uint16_t GetMethodHandleIndex() const { return method_handle_idx_; }
@@ -6605,7 +6605,7 @@ class HLoadMethodHandle FINAL : public HInstruction {
const DexFile& dex_file_;
};
-class HLoadMethodType FINAL : public HInstruction {
+class HLoadMethodType final : public HInstruction {
public:
HLoadMethodType(HCurrentMethod* current_method,
dex::ProtoIndex proto_index,
@@ -6621,12 +6621,12 @@ class HLoadMethodType FINAL : public HInstruction {
}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
dex::ProtoIndex GetProtoIndex() const { return proto_index_; }
@@ -6652,7 +6652,7 @@ class HLoadMethodType FINAL : public HInstruction {
/**
* Performs an initialization check on its Class object input.
*/
-class HClinitCheck FINAL : public HExpression<1> {
+class HClinitCheck final : public HExpression<1> {
public:
HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
: HExpression(
@@ -6663,17 +6663,17 @@ class HClinitCheck FINAL : public HExpression<1> {
SetRawInputAt(0, constant);
}
// TODO: Make ClinitCheck clonable.
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
// May call runtime to initialize the class.
return true;
}
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
HLoadClass* GetLoadClass() const {
DCHECK(InputAt(0)->IsLoadClass());
@@ -6687,7 +6687,7 @@ class HClinitCheck FINAL : public HExpression<1> {
DEFAULT_COPY_CONSTRUCTOR(ClinitCheck);
};
-class HStaticFieldGet FINAL : public HExpression<1> {
+class HStaticFieldGet final : public HExpression<1> {
public:
HStaticFieldGet(HInstruction* cls,
ArtField* field,
@@ -6713,15 +6713,15 @@ class HStaticFieldGet FINAL : public HExpression<1> {
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return !IsVolatile(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
const HStaticFieldGet* other_get = other->AsStaticFieldGet();
return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
}
- size_t ComputeHashCode() const OVERRIDE {
+ size_t ComputeHashCode() const override {
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
@@ -6746,7 +6746,7 @@ class HStaticFieldGet FINAL : public HExpression<1> {
const FieldInfo field_info_;
};
-class HStaticFieldSet FINAL : public HExpression<2> {
+class HStaticFieldSet final : public HExpression<2> {
public:
HStaticFieldSet(HInstruction* cls,
HInstruction* value,
@@ -6773,7 +6773,7 @@ class HStaticFieldSet FINAL : public HExpression<2> {
SetRawInputAt(1, value);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
const FieldInfo& GetFieldInfo() const { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
@@ -6797,7 +6797,7 @@ class HStaticFieldSet FINAL : public HExpression<2> {
const FieldInfo field_info_;
};
-class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
+class HUnresolvedInstanceFieldGet final : public HExpression<1> {
public:
HUnresolvedInstanceFieldGet(HInstruction* obj,
DataType::Type field_type,
@@ -6811,9 +6811,9 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, obj);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetType(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6827,7 +6827,7 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
const uint32_t field_index_;
};
-class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
+class HUnresolvedInstanceFieldSet final : public HExpression<2> {
public:
HUnresolvedInstanceFieldSet(HInstruction* obj,
HInstruction* value,
@@ -6842,9 +6842,9 @@ class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
SetRawInputAt(1, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6867,7 +6867,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
const uint32_t field_index_;
};
-class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
+class HUnresolvedStaticFieldGet final : public HExpression<0> {
public:
HUnresolvedStaticFieldGet(DataType::Type field_type,
uint32_t field_index,
@@ -6879,9 +6879,9 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
field_index_(field_index) {
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetType(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6895,7 +6895,7 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
const uint32_t field_index_;
};
-class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
+class HUnresolvedStaticFieldSet final : public HExpression<1> {
public:
HUnresolvedStaticFieldSet(HInstruction* value,
DataType::Type field_type,
@@ -6908,9 +6908,9 @@ class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6934,13 +6934,13 @@ class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
};
// Implement the move-exception DEX instruction.
-class HLoadException FINAL : public HExpression<0> {
+class HLoadException final : public HExpression<0> {
public:
explicit HLoadException(uint32_t dex_pc = kNoDexPc)
: HExpression(kLoadException, DataType::Type::kReference, SideEffects::None(), dex_pc) {
}
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
DECLARE_INSTRUCTION(LoadException);
@@ -6950,7 +6950,7 @@ class HLoadException FINAL : public HExpression<0> {
// Implicit part of move-exception which clears thread-local exception storage.
// Must not be removed because the runtime expects the TLS to get cleared.
-class HClearException FINAL : public HExpression<0> {
+class HClearException final : public HExpression<0> {
public:
explicit HClearException(uint32_t dex_pc = kNoDexPc)
: HExpression(kClearException, SideEffects::AllWrites(), dex_pc) {
@@ -6962,20 +6962,20 @@ class HClearException FINAL : public HExpression<0> {
DEFAULT_COPY_CONSTRUCTOR(ClearException);
};
-class HThrow FINAL : public HExpression<1> {
+class HThrow final : public HExpression<1> {
public:
HThrow(HInstruction* exception, uint32_t dex_pc)
: HExpression(kThrow, SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, exception);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
- bool AlwaysThrows() const OVERRIDE { return true; }
+ bool AlwaysThrows() const override { return true; }
DECLARE_INSTRUCTION(Throw);
@@ -7062,10 +7062,10 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
return static_cast<uint32_t>(mask->AsIntConstant()->GetValue());
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsInstanceOf() || other->IsCheckCast()) << other->DebugName();
return GetPackedFields() == down_cast<const HTypeCheckInstruction*>(other)->GetPackedFields();
}
@@ -7110,7 +7110,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
Handle<mirror::Class> klass_;
};
-class HInstanceOf FINAL : public HTypeCheckInstruction {
+class HInstanceOf final : public HTypeCheckInstruction {
public:
HInstanceOf(HInstruction* object,
HInstruction* target_class_or_null,
@@ -7132,9 +7132,9 @@ class HInstanceOf FINAL : public HTypeCheckInstruction {
bitstring_mask,
SideEffectsForArchRuntimeCalls(check_kind)) {}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return CanCallRuntime(GetTypeCheckKind());
}
@@ -7153,7 +7153,7 @@ class HInstanceOf FINAL : public HTypeCheckInstruction {
DEFAULT_COPY_CONSTRUCTOR(InstanceOf);
};
-class HBoundType FINAL : public HExpression<1> {
+class HBoundType final : public HExpression<1> {
public:
explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(kBoundType, DataType::Type::kReference, SideEffects::None(), dex_pc),
@@ -7164,8 +7164,8 @@ class HBoundType FINAL : public HExpression<1> {
SetRawInputAt(0, input);
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
- bool IsClonable() const OVERRIDE { return true; }
+ bool InstructionDataEquals(const HInstruction* other) const override;
+ bool IsClonable() const override { return true; }
// {Get,Set}Upper* should only be used in reference type propagation.
const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
@@ -7177,7 +7177,7 @@ class HBoundType FINAL : public HExpression<1> {
SetPackedFlag<kFlagCanBeNull>(can_be_null);
}
- bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+ bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
DECLARE_INSTRUCTION(BoundType);
@@ -7201,7 +7201,7 @@ class HBoundType FINAL : public HExpression<1> {
ReferenceTypeInfo upper_bound_;
};
-class HCheckCast FINAL : public HTypeCheckInstruction {
+class HCheckCast final : public HTypeCheckInstruction {
public:
HCheckCast(HInstruction* object,
HInstruction* target_class_or_null,
@@ -7223,13 +7223,13 @@ class HCheckCast FINAL : public HTypeCheckInstruction {
bitstring_mask,
SideEffects::CanTriggerGC()) {}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override {
// Instruction may throw a CheckCastError.
return true;
}
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
DECLARE_INSTRUCTION(CheckCast);
@@ -7263,7 +7263,7 @@ enum MemBarrierKind {
};
std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
-class HMemoryBarrier FINAL : public HExpression<0> {
+class HMemoryBarrier final : public HExpression<0> {
public:
explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
: HExpression(kMemoryBarrier,
@@ -7272,7 +7272,7 @@ class HMemoryBarrier FINAL : public HExpression<0> {
SetPackedField<BarrierKindField>(barrier_kind);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
@@ -7348,7 +7348,7 @@ class HMemoryBarrier FINAL : public HExpression<0> {
// * CompilerDriver::RequiresConstructorBarrier
// * QuasiAtomic::ThreadFenceForConstructor
//
-class HConstructorFence FINAL : public HVariableInputSizeInstruction {
+class HConstructorFence final : public HVariableInputSizeInstruction {
// A fence has variable inputs because the inputs can be removed
// after prepare_for_register_allocation phase.
// (TODO: In the future a fence could freeze multiple objects
@@ -7445,7 +7445,7 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction {
DEFAULT_COPY_CONSTRUCTOR(ConstructorFence);
};
-class HMonitorOperation FINAL : public HExpression<1> {
+class HMonitorOperation final : public HExpression<1> {
public:
enum class OperationKind {
kEnter,
@@ -7462,9 +7462,9 @@ class HMonitorOperation FINAL : public HExpression<1> {
}
// Instruction may go into runtime, so we need an environment.
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE {
+ bool CanThrow() const override {
// Verifier guarantees that monitor-exit cannot throw.
// This is important because it allows the HGraphBuilder to remove
// a dead throw-catch loop generated for `synchronized` blocks/methods.
@@ -7490,7 +7490,7 @@ class HMonitorOperation FINAL : public HExpression<1> {
using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
};
-class HSelect FINAL : public HExpression<3> {
+class HSelect final : public HExpression<3> {
public:
HSelect(HInstruction* condition,
HInstruction* true_value,
@@ -7508,17 +7508,17 @@ class HSelect FINAL : public HExpression<3> {
SetRawInputAt(2, condition);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
HInstruction* GetFalseValue() const { return InputAt(0); }
HInstruction* GetTrueValue() const { return InputAt(1); }
HInstruction* GetCondition() const { return InputAt(2); }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool CanBeNull() const OVERRIDE {
+ bool CanBeNull() const override {
return GetTrueValue()->CanBeNull() || GetFalseValue()->CanBeNull();
}
@@ -7606,7 +7606,7 @@ std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs);
static constexpr size_t kDefaultNumberOfMoves = 4;
-class HParallelMove FINAL : public HExpression<0> {
+class HParallelMove final : public HExpression<0> {
public:
explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
: HExpression(kParallelMove, SideEffects::None(), dex_pc),
@@ -7668,7 +7668,7 @@ class HParallelMove FINAL : public HExpression<0> {
// never used across anything that can trigger GC.
// The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
// So we represent it by the type `DataType::Type::kInt`.
-class HIntermediateAddress FINAL : public HExpression<2> {
+class HIntermediateAddress final : public HExpression<2> {
public:
HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
: HExpression(kIntermediateAddress,
@@ -7682,12 +7682,12 @@ class HIntermediateAddress FINAL : public HExpression<2> {
SetRawInputAt(1, offset);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool IsActualObject() const OVERRIDE { return false; }
+ bool IsActualObject() const override { return false; }
HInstruction* GetBaseAddress() const { return InputAt(0); }
HInstruction* GetOffset() const { return InputAt(1); }
@@ -7760,7 +7760,7 @@ class HGraphDelegateVisitor : public HGraphVisitor {
// Visit functions that delegate to to super class.
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
+ void Visit##name(H##name* instr) override { Visit##super(instr); }
FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -7782,7 +7782,7 @@ class CloneAndReplaceInstructionVisitor : public HGraphDelegateVisitor {
explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
: HGraphDelegateVisitor(graph), instr_replaced_by_clones_count_(0) {}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
if (instruction->IsClonable()) {
ReplaceInstrOrPhiByClone(instruction);
instr_replaced_by_clones_count_++;
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index 05b27a7810..4993f5737e 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -30,7 +30,7 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> {
kNoDexPc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress);
@@ -39,7 +39,7 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> {
};
// Mips version of HPackedSwitch that holds a pointer to the base method address.
-class HMipsPackedSwitch FINAL : public HExpression<2> {
+class HMipsPackedSwitch final : public HExpression<2> {
public:
HMipsPackedSwitch(int32_t start_value,
int32_t num_entries,
@@ -53,7 +53,7 @@ class HMipsPackedSwitch FINAL : public HExpression<2> {
SetRawInputAt(1, method_base);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
int32_t GetStartValue() const { return start_value_; }
@@ -91,7 +91,7 @@ class HMipsPackedSwitch FINAL : public HExpression<2> {
//
// Note: as the instruction doesn't involve base array address into computations it has no side
// effects.
-class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
+class HIntermediateArrayAddressIndex final : public HExpression<2> {
public:
HIntermediateArrayAddressIndex(HInstruction* index, HInstruction* shift, uint32_t dex_pc)
: HExpression(kIntermediateArrayAddressIndex,
@@ -102,11 +102,11 @@ class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
SetRawInputAt(1, shift);
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool IsActualObject() const OVERRIDE { return false; }
+ bool IsActualObject() const override { return false; }
HInstruction* GetIndex() const { return InputAt(0); }
HInstruction* GetShift() const { return InputAt(1); }
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 29358e1141..7dcac1787e 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -24,7 +24,7 @@
namespace art {
-class HMultiplyAccumulate FINAL : public HExpression<3> {
+class HMultiplyAccumulate final : public HExpression<3> {
public:
HMultiplyAccumulate(DataType::Type type,
InstructionKind op,
@@ -39,14 +39,14 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
SetRawInputAt(kInputMulRightIndex, mul_right);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
static constexpr int kInputAccumulatorIndex = 0;
static constexpr int kInputMulLeftIndex = 1;
static constexpr int kInputMulRightIndex = 2;
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other) const override {
return op_kind_ == other->AsMultiplyAccumulate()->op_kind_;
}
@@ -62,7 +62,7 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
const InstructionKind op_kind_;
};
-class HBitwiseNegatedRight FINAL : public HBinaryOperation {
+class HBitwiseNegatedRight final : public HBinaryOperation {
public:
HBitwiseNegatedRight(DataType::Type result_type,
InstructionKind op,
@@ -97,21 +97,21 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
}
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -145,7 +145,7 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
//
// Note: as the instruction doesn't involve base array address into computations it has no side
// effects (in comparison of HIntermediateAddress).
-class HIntermediateAddressIndex FINAL : public HExpression<3> {
+class HIntermediateAddressIndex final : public HExpression<3> {
public:
HIntermediateAddressIndex(
HInstruction* index, HInstruction* offset, HInstruction* shift, uint32_t dex_pc)
@@ -158,12 +158,12 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
SetRawInputAt(2, shift);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool IsActualObject() const OVERRIDE { return false; }
+ bool IsActualObject() const override { return false; }
HInstruction* GetIndex() const { return InputAt(0); }
HInstruction* GetOffset() const { return InputAt(1); }
@@ -175,7 +175,7 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex);
};
-class HDataProcWithShifterOp FINAL : public HExpression<2> {
+class HDataProcWithShifterOp final : public HExpression<2> {
public:
enum OpKind {
kLSL, // Logical shift left.
@@ -212,9 +212,9 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> {
SetRawInputAt(1, right);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other_instr) const override {
const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp();
return instr_kind_ == other->instr_kind_ &&
op_kind_ == other->op_kind_ &&
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 95fb5ab76a..c7539f2846 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -117,12 +117,12 @@ class HVecOperation : public HVariableInputSizeInstruction {
// Note: For newly introduced vector instructions HScheduler${ARCH}::IsSchedulingBarrier must be
// altered to return true if the instruction might reside outside the SIMD loop body since SIMD
// registers are not kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
// Tests if all data of a vector node (vector length and packed type) is equal.
// Each concrete implementation that adds more fields should test equality of
// those fields in its own method *and* call all super methods.
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecOperation());
const HVecOperation* o = other->AsVecOperation();
return GetVectorLength() == o->GetVectorLength() && GetPackedType() == o->GetPackedType();
@@ -280,7 +280,7 @@ class HVecMemoryOperation : public HVecOperation {
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecMemoryOperation());
const HVecMemoryOperation* o = other->AsVecMemoryOperation();
return HVecOperation::InstructionDataEquals(o) && GetAlignment() == o->GetAlignment();
@@ -315,7 +315,7 @@ inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type
// Replicates the given scalar into a vector,
// viz. replicate(x) = [ x, .. , x ].
-class HVecReplicateScalar FINAL : public HVecUnaryOperation {
+class HVecReplicateScalar final : public HVecUnaryOperation {
public:
HVecReplicateScalar(ArenaAllocator* allocator,
HInstruction* scalar,
@@ -329,7 +329,7 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation {
// A replicate needs to stay in place, since SIMD registers are not
// kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecReplicateScalar);
@@ -341,7 +341,7 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation {
// viz. extract[ x1, .. , xn ] = x_i.
//
// TODO: for now only i == 1 case supported.
-class HVecExtractScalar FINAL : public HVecUnaryOperation {
+class HVecExtractScalar final : public HVecUnaryOperation {
public:
HVecExtractScalar(ArenaAllocator* allocator,
HInstruction* input,
@@ -361,7 +361,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation {
// An extract needs to stay in place, since SIMD registers are not
// kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecExtractScalar);
@@ -372,7 +372,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation {
// Reduces the given vector into the first element as sum/min/max,
// viz. sum-reduce[ x1, .. , xn ] = [ y, ---- ], where y = sum xi
// and the "-" denotes "don't care" (implementation dependent).
-class HVecReduce FINAL : public HVecUnaryOperation {
+class HVecReduce final : public HVecUnaryOperation {
public:
enum ReductionKind {
kSum = 1,
@@ -393,9 +393,9 @@ class HVecReduce FINAL : public HVecUnaryOperation {
ReductionKind GetKind() const { return kind_; }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecReduce());
const HVecReduce* o = other->AsVecReduce();
return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind();
@@ -412,7 +412,7 @@ class HVecReduce FINAL : public HVecUnaryOperation {
// Converts every component in the vector,
// viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ].
-class HVecCnv FINAL : public HVecUnaryOperation {
+class HVecCnv final : public HVecUnaryOperation {
public:
HVecCnv(ArenaAllocator* allocator,
HInstruction* input,
@@ -427,7 +427,7 @@ class HVecCnv FINAL : public HVecUnaryOperation {
DataType::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); }
DataType::Type GetResultType() const { return GetPackedType(); }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecCnv);
@@ -437,7 +437,7 @@ class HVecCnv FINAL : public HVecUnaryOperation {
// Negates every component in the vector,
// viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ].
-class HVecNeg FINAL : public HVecUnaryOperation {
+class HVecNeg final : public HVecUnaryOperation {
public:
HVecNeg(ArenaAllocator* allocator,
HInstruction* input,
@@ -448,7 +448,7 @@ class HVecNeg FINAL : public HVecUnaryOperation {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecNeg);
@@ -459,7 +459,7 @@ class HVecNeg FINAL : public HVecUnaryOperation {
// Takes absolute value of every component in the vector,
// viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ]
// for signed operand x.
-class HVecAbs FINAL : public HVecUnaryOperation {
+class HVecAbs final : public HVecUnaryOperation {
public:
HVecAbs(ArenaAllocator* allocator,
HInstruction* input,
@@ -470,7 +470,7 @@ class HVecAbs FINAL : public HVecUnaryOperation {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAbs);
@@ -481,7 +481,7 @@ class HVecAbs FINAL : public HVecUnaryOperation {
// Bitwise- or boolean-nots every component in the vector,
// viz. not[ x1, .. , xn ] = [ ~x1, .. , ~xn ], or
// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
-class HVecNot FINAL : public HVecUnaryOperation {
+class HVecNot final : public HVecUnaryOperation {
public:
HVecNot(ArenaAllocator* allocator,
HInstruction* input,
@@ -492,7 +492,7 @@ class HVecNot FINAL : public HVecUnaryOperation {
DCHECK(input->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecNot);
@@ -506,7 +506,7 @@ class HVecNot FINAL : public HVecUnaryOperation {
// Adds every component in the two vectors,
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
-class HVecAdd FINAL : public HVecBinaryOperation {
+class HVecAdd final : public HVecBinaryOperation {
public:
HVecAdd(ArenaAllocator* allocator,
HInstruction* left,
@@ -519,7 +519,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAdd);
@@ -530,7 +530,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
// Adds every component in the two vectors using saturation arithmetic,
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 +_sat y1, .. , xn +_sat yn ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationAdd FINAL : public HVecBinaryOperation {
+class HVecSaturationAdd final : public HVecBinaryOperation {
public:
HVecSaturationAdd(ArenaAllocator* allocator,
HInstruction* left,
@@ -544,7 +544,7 @@ class HVecSaturationAdd FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecSaturationAdd);
@@ -556,7 +556,7 @@ class HVecSaturationAdd FINAL : public HVecBinaryOperation {
// rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
// truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecHalvingAdd FINAL : public HVecBinaryOperation {
+class HVecHalvingAdd final : public HVecBinaryOperation {
public:
HVecHalvingAdd(ArenaAllocator* allocator,
HInstruction* left,
@@ -574,9 +574,9 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecHalvingAdd());
const HVecHalvingAdd* o = other->AsVecHalvingAdd();
return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
@@ -596,7 +596,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
// Subtracts every component in the two vectors,
// viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
-class HVecSub FINAL : public HVecBinaryOperation {
+class HVecSub final : public HVecBinaryOperation {
public:
HVecSub(ArenaAllocator* allocator,
HInstruction* left,
@@ -609,7 +609,7 @@ class HVecSub FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecSub);
@@ -620,7 +620,7 @@ class HVecSub FINAL : public HVecBinaryOperation {
// Subtracts every component in the two vectors using saturation arithmetic,
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 -_sat y1, .. , xn -_sat yn ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationSub FINAL : public HVecBinaryOperation {
+class HVecSaturationSub final : public HVecBinaryOperation {
public:
HVecSaturationSub(ArenaAllocator* allocator,
HInstruction* left,
@@ -634,7 +634,7 @@ class HVecSaturationSub FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecSaturationSub);
@@ -644,7 +644,7 @@ class HVecSaturationSub FINAL : public HVecBinaryOperation {
// Multiplies every component in the two vectors,
// viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
-class HVecMul FINAL : public HVecBinaryOperation {
+class HVecMul final : public HVecBinaryOperation {
public:
HVecMul(ArenaAllocator* allocator,
HInstruction* left,
@@ -657,7 +657,7 @@ class HVecMul FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecMul);
@@ -667,7 +667,7 @@ class HVecMul FINAL : public HVecBinaryOperation {
// Divides every component in the two vectors,
// viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
-class HVecDiv FINAL : public HVecBinaryOperation {
+class HVecDiv final : public HVecBinaryOperation {
public:
HVecDiv(ArenaAllocator* allocator,
HInstruction* left,
@@ -680,7 +680,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecDiv);
@@ -691,7 +691,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
// Takes minimum of every component in the two vectors,
// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMin FINAL : public HVecBinaryOperation {
+class HVecMin final : public HVecBinaryOperation {
public:
HVecMin(ArenaAllocator* allocator,
HInstruction* left,
@@ -704,7 +704,7 @@ class HVecMin FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecMin);
@@ -715,7 +715,7 @@ class HVecMin FINAL : public HVecBinaryOperation {
// Takes maximum of every component in the two vectors,
// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMax FINAL : public HVecBinaryOperation {
+class HVecMax final : public HVecBinaryOperation {
public:
HVecMax(ArenaAllocator* allocator,
HInstruction* left,
@@ -728,7 +728,7 @@ class HVecMax FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecMax);
@@ -738,7 +738,7 @@ class HVecMax FINAL : public HVecBinaryOperation {
// Bitwise-ands every component in the two vectors,
// viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
-class HVecAnd FINAL : public HVecBinaryOperation {
+class HVecAnd final : public HVecBinaryOperation {
public:
HVecAnd(ArenaAllocator* allocator,
HInstruction* left,
@@ -750,7 +750,7 @@ class HVecAnd FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAnd);
@@ -760,7 +760,7 @@ class HVecAnd FINAL : public HVecBinaryOperation {
// Bitwise-and-nots every component in the two vectors,
// viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
-class HVecAndNot FINAL : public HVecBinaryOperation {
+class HVecAndNot final : public HVecBinaryOperation {
public:
HVecAndNot(ArenaAllocator* allocator,
HInstruction* left,
@@ -773,7 +773,7 @@ class HVecAndNot FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAndNot);
@@ -783,7 +783,7 @@ class HVecAndNot FINAL : public HVecBinaryOperation {
// Bitwise-ors every component in the two vectors,
// viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
-class HVecOr FINAL : public HVecBinaryOperation {
+class HVecOr final : public HVecBinaryOperation {
public:
HVecOr(ArenaAllocator* allocator,
HInstruction* left,
@@ -795,7 +795,7 @@ class HVecOr FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecOr);
@@ -805,7 +805,7 @@ class HVecOr FINAL : public HVecBinaryOperation {
// Bitwise-xors every component in the two vectors,
// viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
-class HVecXor FINAL : public HVecBinaryOperation {
+class HVecXor final : public HVecBinaryOperation {
public:
HVecXor(ArenaAllocator* allocator,
HInstruction* left,
@@ -817,7 +817,7 @@ class HVecXor FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecXor);
@@ -827,7 +827,7 @@ class HVecXor FINAL : public HVecBinaryOperation {
// Logically shifts every component in the vector left by the given distance,
// viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
-class HVecShl FINAL : public HVecBinaryOperation {
+class HVecShl final : public HVecBinaryOperation {
public:
HVecShl(ArenaAllocator* allocator,
HInstruction* left,
@@ -839,7 +839,7 @@ class HVecShl FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecShl);
@@ -849,7 +849,7 @@ class HVecShl FINAL : public HVecBinaryOperation {
// Arithmetically shifts every component in the vector right by the given distance,
// viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
-class HVecShr FINAL : public HVecBinaryOperation {
+class HVecShr final : public HVecBinaryOperation {
public:
HVecShr(ArenaAllocator* allocator,
HInstruction* left,
@@ -861,7 +861,7 @@ class HVecShr FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecShr);
@@ -871,7 +871,7 @@ class HVecShr FINAL : public HVecBinaryOperation {
// Logically shifts every component in the vector right by the given distance,
// viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
-class HVecUShr FINAL : public HVecBinaryOperation {
+class HVecUShr final : public HVecBinaryOperation {
public:
HVecUShr(ArenaAllocator* allocator,
HInstruction* left,
@@ -883,7 +883,7 @@ class HVecUShr FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecUShr);
@@ -898,7 +898,7 @@ class HVecUShr FINAL : public HVecBinaryOperation {
// Assigns the given scalar elements to a vector,
// viz. set( array(x1, .. , xn) ) = [ x1, .. , xn ] if n == m,
// set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m < n.
-class HVecSetScalars FINAL : public HVecOperation {
+class HVecSetScalars final : public HVecOperation {
public:
HVecSetScalars(ArenaAllocator* allocator,
HInstruction* scalars[],
@@ -921,7 +921,7 @@ class HVecSetScalars FINAL : public HVecOperation {
// Setting scalars needs to stay in place, since SIMD registers are not
// kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecSetScalars);
@@ -934,7 +934,7 @@ class HVecSetScalars FINAL : public HVecOperation {
// For floating point types, Java rounding behavior must be preserved; the products are rounded to
// the proper precision before being added. "Fused" multiply-add operations available on several
// architectures are not usable since they would violate Java language rules.
-class HVecMultiplyAccumulate FINAL : public HVecOperation {
+class HVecMultiplyAccumulate final : public HVecOperation {
public:
HVecMultiplyAccumulate(ArenaAllocator* allocator,
InstructionKind op,
@@ -964,9 +964,9 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
SetRawInputAt(2, mul_right);
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecMultiplyAccumulate());
const HVecMultiplyAccumulate* o = other->AsVecMultiplyAccumulate();
return HVecOperation::InstructionDataEquals(o) && GetOpKind() == o->GetOpKind();
@@ -989,7 +989,7 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
// viz. SAD([ a1, .. , am ], [ x1, .. , xn ], [ y1, .. , yn ]) =
// [ a1 + sum abs(xi-yi), .. , am + sum abs(xj-yj) ],
// for m <= n, non-overlapping sums, and signed operands x, y.
-class HVecSADAccumulate FINAL : public HVecOperation {
+class HVecSADAccumulate final : public HVecOperation {
public:
HVecSADAccumulate(ArenaAllocator* allocator,
HInstruction* accumulator,
@@ -1023,7 +1023,7 @@ class HVecSADAccumulate FINAL : public HVecOperation {
// Loads a vector from memory, viz. load(mem, 1)
// yield the vector [ mem(1), .. , mem(n) ].
-class HVecLoad FINAL : public HVecMemoryOperation {
+class HVecLoad final : public HVecMemoryOperation {
public:
HVecLoad(ArenaAllocator* allocator,
HInstruction* base,
@@ -1047,9 +1047,9 @@ class HVecLoad FINAL : public HVecMemoryOperation {
bool IsStringCharAt() const { return GetPackedFlag<kFieldIsStringCharAt>(); }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecLoad());
const HVecLoad* o = other->AsVecLoad();
return HVecMemoryOperation::InstructionDataEquals(o) && IsStringCharAt() == o->IsStringCharAt();
@@ -1069,7 +1069,7 @@ class HVecLoad FINAL : public HVecMemoryOperation {
// Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
// sets mem(1) = x1, .. , mem(n) = xn.
-class HVecStore FINAL : public HVecMemoryOperation {
+class HVecStore final : public HVecMemoryOperation {
public:
HVecStore(ArenaAllocator* allocator,
HInstruction* base,
@@ -1093,7 +1093,7 @@ class HVecStore FINAL : public HVecMemoryOperation {
}
// A store needs to stay in place.
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecStore);
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index d1e7f68edb..a55110426b 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -20,7 +20,7 @@
namespace art {
// Compute the address of the method for X86 Constant area support.
-class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
+class HX86ComputeBaseMethodAddress final : public HExpression<0> {
public:
// Treat the value as an int32_t, but it is really a 32 bit native pointer.
HX86ComputeBaseMethodAddress()
@@ -30,7 +30,7 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
kNoDexPc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
@@ -39,7 +39,7 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
};
// Load a constant value from the constant table.
-class HX86LoadFromConstantTable FINAL : public HExpression<2> {
+class HX86LoadFromConstantTable final : public HExpression<2> {
public:
HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
HConstant* constant)
@@ -66,7 +66,7 @@ class HX86LoadFromConstantTable FINAL : public HExpression<2> {
};
// Version of HNeg with access to the constant table for FP types.
-class HX86FPNeg FINAL : public HExpression<2> {
+class HX86FPNeg final : public HExpression<2> {
public:
HX86FPNeg(DataType::Type result_type,
HInstruction* input,
@@ -89,7 +89,7 @@ class HX86FPNeg FINAL : public HExpression<2> {
};
// X86 version of HPackedSwitch that holds a pointer to the base method address.
-class HX86PackedSwitch FINAL : public HExpression<2> {
+class HX86PackedSwitch final : public HExpression<2> {
public:
HX86PackedSwitch(int32_t start_value,
int32_t num_entries,
@@ -103,7 +103,7 @@ class HX86PackedSwitch FINAL : public HExpression<2> {
SetRawInputAt(1, method_base);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
int32_t GetStartValue() const { return start_value_; }
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 04301f5366..be1f7ea5b4 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -133,7 +133,7 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
return memory_.data();
}
- ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+ ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
private:
std::vector<uint8_t> memory_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f52b96dac9..0a747053cf 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -74,7 +74,7 @@ static constexpr const char* kPassNameSeparator = "$";
/**
* Used by the code generator, to allocate the code in a vector.
*/
-class CodeVectorAllocator FINAL : public CodeAllocator {
+class CodeVectorAllocator final : public CodeAllocator {
public:
explicit CodeVectorAllocator(ArenaAllocator* allocator)
: memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
@@ -84,7 +84,7 @@ class CodeVectorAllocator FINAL : public CodeAllocator {
return &memory_[0];
}
- ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+ ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
uint8_t* GetData() { return memory_.data(); }
private:
@@ -264,12 +264,12 @@ class PassScope : public ValueObject {
PassObserver* const pass_observer_;
};
-class OptimizingCompiler FINAL : public Compiler {
+class OptimizingCompiler final : public Compiler {
public:
explicit OptimizingCompiler(CompilerDriver* driver);
- ~OptimizingCompiler() OVERRIDE;
+ ~OptimizingCompiler() override;
- bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
+ bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
CompiledMethod* Compile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -278,29 +278,29 @@ class OptimizingCompiler FINAL : public Compiler {
uint32_t method_idx,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+ Handle<mirror::DexCache> dex_cache) const override;
CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+ Handle<mirror::DexCache> dex_cache) const override;
- uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
+ uintptr_t GetEntryPointOf(ArtMethod* method) const override
REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetCompilerOptions().GetInstructionSet())));
}
- void Init() OVERRIDE;
+ void Init() override;
- void UnInit() const OVERRIDE;
+ void UnInit() const override;
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
bool osr,
jit::JitLogger* jit_logger)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index e6e069f96e..5fadcab402 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -58,7 +58,7 @@ class ParallelMoveResolverWithSwap : public ParallelMoveResolver {
virtual ~ParallelMoveResolverWithSwap() {}
// Resolve a set of parallel moves, emitting assembler instructions.
- void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+ void EmitNativeCode(HParallelMove* parallel_move) override;
protected:
class ScratchRegisterScope : public ValueObject {
@@ -133,7 +133,7 @@ class ParallelMoveResolverNoSwap : public ParallelMoveResolver {
virtual ~ParallelMoveResolverNoSwap() {}
// Resolve a set of parallel moves, emitting assembler instructions.
- void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+ void EmitNativeCode(HParallelMove* parallel_move) override;
protected:
// Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index be35201166..399a6d8cbd 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -56,7 +56,7 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
explicit TestParallelMoveResolverWithSwap(ArenaAllocator* allocator)
: ParallelMoveResolverWithSwap(allocator) {}
- void EmitMove(size_t index) OVERRIDE {
+ void EmitMove(size_t index) override {
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
@@ -68,7 +68,7 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
message_ << ")";
}
- void EmitSwap(size_t index) OVERRIDE {
+ void EmitSwap(size_t index) override {
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
@@ -80,8 +80,8 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
message_ << ")";
}
- void SpillScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
- void RestoreScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
+ void SpillScratch(int reg ATTRIBUTE_UNUSED) override {}
+ void RestoreScratch(int reg ATTRIBUTE_UNUSED) override {}
std::string GetMessage() const {
return message_.str();
@@ -99,13 +99,13 @@ class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap {
explicit TestParallelMoveResolverNoSwap(ArenaAllocator* allocator)
: ParallelMoveResolverNoSwap(allocator), scratch_index_(kScratchRegisterStartIndexForTest) {}
- void PrepareForEmitNativeCode() OVERRIDE {
+ void PrepareForEmitNativeCode() override {
scratch_index_ = kScratchRegisterStartIndexForTest;
}
- void FinishEmitNativeCode() OVERRIDE {}
+ void FinishEmitNativeCode() override {}
- Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE {
+ Location AllocateScratchLocationFor(Location::Kind kind) override {
if (kind == Location::kStackSlot || kind == Location::kFpuRegister ||
kind == Location::kRegister) {
kind = Location::kRegister;
@@ -125,9 +125,9 @@ class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap {
return scratch;
}
- void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) OVERRIDE {}
+ void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) override {}
- void EmitMove(size_t index) OVERRIDE {
+ void EmitMove(size_t index) override {
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index a7e97a1ce5..05208ff65c 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -58,7 +58,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
DCHECK(base_ != nullptr);
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
// If this is an invoke with PC-relative load kind,
// we need to add the base as the special input.
if (invoke->HasPcRelativeMethodLoadKind() &&
@@ -70,7 +70,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ void VisitLoadClass(HLoadClass* load_class) override {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
@@ -86,7 +86,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ void VisitLoadString(HLoadString* load_string) override {
HLoadString::LoadKind load_kind = load_string->GetLoadKind();
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
@@ -102,7 +102,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
if (switch_insn->GetNumEntries() <=
InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
return;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 6dd1ee0db2..872370bcb7 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization {
static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 41f2f776fc..4b07d5b621 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -42,53 +42,53 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
private:
- void VisitAdd(HAdd* add) OVERRIDE {
+ void VisitAdd(HAdd* add) override {
BinaryFP(add);
}
- void VisitSub(HSub* sub) OVERRIDE {
+ void VisitSub(HSub* sub) override {
BinaryFP(sub);
}
- void VisitMul(HMul* mul) OVERRIDE {
+ void VisitMul(HMul* mul) override {
BinaryFP(mul);
}
- void VisitDiv(HDiv* div) OVERRIDE {
+ void VisitDiv(HDiv* div) override {
BinaryFP(div);
}
- void VisitCompare(HCompare* compare) OVERRIDE {
+ void VisitCompare(HCompare* compare) override {
BinaryFP(compare);
}
- void VisitReturn(HReturn* ret) OVERRIDE {
+ void VisitReturn(HReturn* ret) override {
HConstant* value = ret->InputAt(0)->AsConstant();
if ((value != nullptr && DataType::IsFloatingPointType(value->GetType()))) {
ReplaceInput(ret, value, 0, true);
}
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ void VisitInvokeInterface(HInvokeInterface* invoke) override {
HandleInvoke(invoke);
}
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ void VisitLoadClass(HLoadClass* load_class) override {
if (load_class->HasPcRelativeLoadKind()) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_class);
load_class->AddSpecialInput(method_address);
}
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ void VisitLoadString(HLoadString* load_string) override {
if (load_string->HasPcRelativeLoadKind()) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_string);
load_string->AddSpecialInput(method_address);
@@ -102,31 +102,31 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitEqual(HEqual* cond) OVERRIDE {
+ void VisitEqual(HEqual* cond) override {
BinaryFP(cond);
}
- void VisitNotEqual(HNotEqual* cond) OVERRIDE {
+ void VisitNotEqual(HNotEqual* cond) override {
BinaryFP(cond);
}
- void VisitLessThan(HLessThan* cond) OVERRIDE {
+ void VisitLessThan(HLessThan* cond) override {
BinaryFP(cond);
}
- void VisitLessThanOrEqual(HLessThanOrEqual* cond) OVERRIDE {
+ void VisitLessThanOrEqual(HLessThanOrEqual* cond) override {
BinaryFP(cond);
}
- void VisitGreaterThan(HGreaterThan* cond) OVERRIDE {
+ void VisitGreaterThan(HGreaterThan* cond) override {
BinaryFP(cond);
}
- void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) OVERRIDE {
+ void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) override {
BinaryFP(cond);
}
- void VisitNeg(HNeg* neg) OVERRIDE {
+ void VisitNeg(HNeg* neg) override {
if (DataType::IsFloatingPointType(neg->GetType())) {
// We need to replace the HNeg with a HX86FPNeg in order to address the constant area.
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg);
@@ -141,7 +141,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
if (switch_insn->GetNumEntries() <=
InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) {
return;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index db56b7f053..3b470a6502 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization {
static constexpr const char* kPcRelativeFixupsX86PassName = "pc_relative_fixups_x86";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 2978add703..a8ab256e27 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -43,18 +43,18 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
"prepare_for_register_allocation";
private:
- void VisitCheckCast(HCheckCast* check_cast) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE;
- void VisitNullCheck(HNullCheck* check) OVERRIDE;
- void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
- void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
- void VisitBoundType(HBoundType* bound_type) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
- void VisitCondition(HCondition* condition) OVERRIDE;
- void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE;
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
+ void VisitCheckCast(HCheckCast* check_cast) override;
+ void VisitInstanceOf(HInstanceOf* instance_of) override;
+ void VisitNullCheck(HNullCheck* check) override;
+ void VisitDivZeroCheck(HDivZeroCheck* check) override;
+ void VisitBoundsCheck(HBoundsCheck* check) override;
+ void VisitBoundType(HBoundType* bound_type) override;
+ void VisitArraySet(HArraySet* instruction) override;
+ void VisitClinitCheck(HClinitCheck* check) override;
+ void VisitCondition(HCondition* condition) override;
+ void VisitConstructorFence(HConstructorFence* constructor_fence) override;
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+ void VisitDeoptimize(HDeoptimize* deoptimize) override;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index c6579dc5e0..8ef9ce4e8b 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -33,7 +33,7 @@ class HPrettyPrinter : public HGraphVisitor {
PrintString(": ");
}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
PrintPreInstruction(instruction);
PrintString(instruction->DebugName());
PrintPostInstruction(instruction);
@@ -70,7 +70,7 @@ class HPrettyPrinter : public HGraphVisitor {
PrintNewLine();
}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
PrintString("BasicBlock ");
PrintInt(block->GetBlockId());
const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
@@ -108,15 +108,15 @@ class StringPrettyPrinter : public HPrettyPrinter {
explicit StringPrettyPrinter(HGraph* graph)
: HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
- void PrintInt(int value) OVERRIDE {
+ void PrintInt(int value) override {
str_ += android::base::StringPrintf("%d", value);
}
- void PrintString(const char* value) OVERRIDE {
+ void PrintString(const char* value) override {
str_ += value;
}
- void PrintNewLine() OVERRIDE {
+ void PrintNewLine() override {
str_ += '\n';
}
@@ -124,12 +124,12 @@ class StringPrettyPrinter : public HPrettyPrinter {
std::string str() const { return str_; }
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
current_block_ = block;
HPrettyPrinter::VisitBasicBlock(block);
}
- void VisitGoto(HGoto* gota) OVERRIDE {
+ void VisitGoto(HGoto* gota) override {
PrintString(" ");
PrintInt(gota->GetId());
PrintString(": Goto ");
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 0d622484ee..a9d590232c 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -94,26 +94,26 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
worklist_.reserve(kDefaultWorklistSize);
}
- void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
- void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* load_class) OVERRIDE;
- void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
- void VisitLoadMethodHandle(HLoadMethodHandle* instr) OVERRIDE;
- void VisitLoadMethodType(HLoadMethodType* instr) OVERRIDE;
- void VisitLoadString(HLoadString* instr) OVERRIDE;
- void VisitLoadException(HLoadException* instr) OVERRIDE;
- void VisitNewArray(HNewArray* instr) OVERRIDE;
- void VisitParameterValue(HParameterValue* instr) OVERRIDE;
- void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
- void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
- void VisitInvoke(HInvoke* instr) OVERRIDE;
- void VisitArrayGet(HArrayGet* instr) OVERRIDE;
- void VisitCheckCast(HCheckCast* instr) OVERRIDE;
- void VisitBoundType(HBoundType* instr) OVERRIDE;
- void VisitNullCheck(HNullCheck* instr) OVERRIDE;
+ void VisitDeoptimize(HDeoptimize* deopt) override;
+ void VisitNewInstance(HNewInstance* new_instance) override;
+ void VisitLoadClass(HLoadClass* load_class) override;
+ void VisitInstanceOf(HInstanceOf* load_class) override;
+ void VisitClinitCheck(HClinitCheck* clinit_check) override;
+ void VisitLoadMethodHandle(HLoadMethodHandle* instr) override;
+ void VisitLoadMethodType(HLoadMethodType* instr) override;
+ void VisitLoadString(HLoadString* instr) override;
+ void VisitLoadException(HLoadException* instr) override;
+ void VisitNewArray(HNewArray* instr) override;
+ void VisitParameterValue(HParameterValue* instr) override;
+ void VisitInstanceFieldGet(HInstanceFieldGet* instr) override;
+ void VisitStaticFieldGet(HStaticFieldGet* instr) override;
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) override;
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) override;
+ void VisitInvoke(HInvoke* instr) override;
+ void VisitArrayGet(HArrayGet* instr) override;
+ void VisitCheckCast(HCheckCast* instr) override;
+ void VisitBoundType(HBoundType* instr) override;
+ void VisitNullCheck(HNullCheck* instr) override;
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index d36d592708..7c6a048444 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -40,7 +40,7 @@ class ReferenceTypePropagation : public HOptimization {
// Visit a single instruction.
void Visit(HInstruction* instruction);
- bool Run() OVERRIDE;
+ bool Run() override;
// Returns true if klass is admissible to the propagation: non-null and resolved.
// For an array type, we also check if the component type is admissible.
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 3072c92e0f..16131e1c71 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -90,9 +90,9 @@ class RegisterAllocatorGraphColor : public RegisterAllocator {
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis,
bool iterative_move_coalescing = true);
- ~RegisterAllocatorGraphColor() OVERRIDE;
+ ~RegisterAllocatorGraphColor() override;
- void AllocateRegisters() OVERRIDE;
+ void AllocateRegisters() override;
bool Validate(bool log_fatal_on_failure);
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 36788b7c3c..4d445c7ff7 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -42,11 +42,11 @@ class RegisterAllocatorLinearScan : public RegisterAllocator {
RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
- ~RegisterAllocatorLinearScan() OVERRIDE;
+ ~RegisterAllocatorLinearScan() override;
- void AllocateRegisters() OVERRIDE;
+ void AllocateRegisters() override;
- bool Validate(bool log_fatal_on_failure) OVERRIDE {
+ bool Validate(bool log_fatal_on_failure) override {
processing_core_registers_ = true;
if (!ValidateInternal(log_fatal_on_failure)) {
return false;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 7144775c2b..db6a760007 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -40,7 +40,7 @@ using Strategy = RegisterAllocator::Strategy;
class RegisterAllocatorTest : public OptimizingUnitTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
// This test is using the x86 ISA.
OverrideInstructionSetFeatures(InstructionSet::kX86, "default");
OptimizingUnitTest::SetUp();
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index fd48d844e6..48e80f5f8b 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -339,7 +339,7 @@ class SchedulingLatencyVisitor : public HGraphDelegateVisitor {
last_visited_latency_(0),
last_visited_internal_latency_(0) {}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". "
"Architecture-specific scheduling latency visitors must handle all instructions"
" (potentially by overriding the generic `VisitInstruction()`.";
@@ -392,7 +392,7 @@ class RandomSchedulingNodeSelector : public SchedulingNodeSelector {
}
SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
- const SchedulingGraph& graph) OVERRIDE {
+ const SchedulingGraph& graph) override {
UNUSED(graph);
DCHECK(!nodes->empty());
size_t select = rand_r(&seed_) % nodes->size();
@@ -412,9 +412,9 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
public:
CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
- void Reset() OVERRIDE { prev_select_ = nullptr; }
+ void Reset() override { prev_select_ = nullptr; }
SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
- const SchedulingGraph& graph) OVERRIDE;
+ const SchedulingGraph& graph) override;
protected:
SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
@@ -492,7 +492,7 @@ class HInstructionScheduling : public HOptimization {
codegen_(cg),
instruction_set_(instruction_set) {}
- bool Run() OVERRIDE {
+ bool Run() override {
return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
}
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 2f369486b3..875593bbf0 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -100,7 +100,7 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor {
M(DataProcWithShifterOp, unused)
#define DECLARE_VISIT_INSTRUCTION(type, unused) \
- void Visit##type(H##type* instruction) OVERRIDE;
+ void Visit##type(H##type* instruction) override;
FOR_EACH_SCHEDULED_ARM_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -140,9 +140,9 @@ class HSchedulerARM : public HScheduler {
HSchedulerARM(SchedulingNodeSelector* selector,
SchedulingLatencyVisitorARM* arm_latency_visitor)
: HScheduler(arm_latency_visitor, selector) {}
- ~HSchedulerARM() OVERRIDE {}
+ ~HSchedulerARM() override {}
- bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+ bool IsSchedulable(const HInstruction* instruction) const override {
#define CASE_INSTRUCTION_KIND(type, unused) case \
HInstruction::InstructionKind::k##type:
switch (instruction->GetKind()) {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 0d2f8d9fa0..7f6549dcfe 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -118,7 +118,7 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
M(DataProcWithShifterOp, unused)
#define DECLARE_VISIT_INSTRUCTION(type, unused) \
- void Visit##type(H##type* instruction) OVERRIDE;
+ void Visit##type(H##type* instruction) override;
FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -136,9 +136,9 @@ class HSchedulerARM64 : public HScheduler {
public:
explicit HSchedulerARM64(SchedulingNodeSelector* selector)
: HScheduler(&arm64_latency_visitor_, selector) {}
- ~HSchedulerARM64() OVERRIDE {}
+ ~HSchedulerARM64() override {}
- bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+ bool IsSchedulable(const HInstruction* instruction) const override {
#define CASE_INSTRUCTION_KIND(type, unused) case \
HInstruction::InstructionKind::k##type:
switch (instruction->GetKind()) {
@@ -160,7 +160,7 @@ class HSchedulerARM64 : public HScheduler {
// SIMD&FP registers are callee saved) so don't reorder such vector instructions.
//
// TODO: remove this when a proper support of SIMD registers is introduced to the compiler.
- bool IsSchedulingBarrier(const HInstruction* instr) const OVERRIDE {
+ bool IsSchedulingBarrier(const HInstruction* instr) const override {
return HScheduler::IsSchedulingBarrier(instr) ||
instr->IsVecReduce() ||
instr->IsVecExtractScalar() ||
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index d24d2264b2..2889166f60 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -68,7 +68,7 @@ class HSelectGenerator : public HOptimization {
OptimizingCompilerStats* stats,
const char* name = kSelectGeneratorPassName);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kSelectGeneratorPassName = "select_generator";
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index cbac361891..dc55eea683 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -37,7 +37,7 @@ class HSharpening : public HOptimization {
: HOptimization(graph, name),
codegen_(codegen) { }
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kSharpeningPassName = "sharpening";
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index cebd4adcd8..97c00c9c1d 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -60,7 +60,7 @@ class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> {
+class LiveRange final : public ArenaObject<kArenaAllocSsaLiveness> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index a683c698d9..4b525531da 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -29,7 +29,7 @@ namespace art {
class SsaLivenessAnalysisTest : public OptimizingUnitTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
OptimizingUnitTest::SetUp();
graph_ = CreateGraph();
codegen_ = CodeGenerator::Create(graph_, *compiler_options_);
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index ee859e834c..c5cc752ffc 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@ class SsaDeadPhiElimination : public HOptimization {
explicit SsaDeadPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaDeadPhiEliminationPassName) {}
- bool Run() OVERRIDE;
+ bool Run() override;
void MarkDeadPhis();
void EliminateDeadPhis();
@@ -53,7 +53,7 @@ class SsaRedundantPhiElimination : public HOptimization {
explicit SsaRedundantPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaRedundantPhiEliminationPassName) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 85ed06eb9b..e679893af2 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -38,15 +38,15 @@ class SsaPrettyPrinter : public HPrettyPrinter {
public:
explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
- void PrintInt(int value) OVERRIDE {
+ void PrintInt(int value) override {
str_ += android::base::StringPrintf("%d", value);
}
- void PrintString(const char* value) OVERRIDE {
+ void PrintString(const char* value) override {
str_ += value;
}
- void PrintNewLine() OVERRIDE {
+ void PrintNewLine() override {
str_ += '\n';
}
@@ -54,7 +54,7 @@ class SsaPrettyPrinter : public HPrettyPrinter {
std::string str() const { return str_; }
- void VisitIntConstant(HIntConstant* constant) OVERRIDE {
+ void VisitIntConstant(HIntConstant* constant) override {
PrintPreInstruction(constant);
str_ += constant->DebugName();
str_ += " ";
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index f0069c0e09..b1abcf6747 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -31,7 +31,7 @@ class MemoryOperandVisitor : public HGraphVisitor {
do_implicit_null_checks_(do_implicit_null_checks) {}
private:
- void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE {
+ void VisitBoundsCheck(HBoundsCheck* check) override {
// Replace the length by the array itself, so that we can do compares to memory.
HArrayLength* array_len = check->InputAt(1)->AsArrayLength();
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index b254000f28..3f4178d58a 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -31,7 +31,7 @@ class X86MemoryOperandGeneration : public HOptimization {
CodeGenerator* codegen,
OptimizingCompilerStats* stats);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kX86MemoryOperandGenerationPassName =
"x86_memory_operand_generation";
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index b0310f2fb6..98c0191679 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -39,7 +39,7 @@ namespace vixl32 = vixl::aarch32;
namespace art {
namespace arm {
-class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
+class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
public:
// Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
// fewer system calls than a larger default capacity.
@@ -149,7 +149,7 @@ class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
using MacroAssembler::Vmov;
};
-class ArmVIXLAssembler FINAL : public Assembler {
+class ArmVIXLAssembler final : public Assembler {
private:
class ArmException;
public:
@@ -161,19 +161,19 @@ class ArmVIXLAssembler FINAL : public Assembler {
virtual ~ArmVIXLAssembler() {}
ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Size of generated code.
- size_t CodeSize() const OVERRIDE;
- const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+ size_t CodeSize() const override;
+ const uint8_t* CodeBufferBaseAddress() const override;
// Copy instructions out of assembly buffer into the given region of memory.
- void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+ void FinalizeInstructions(const MemoryRegion& region) override;
- void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 4bc5d69f4d..674bf12f89 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -32,7 +32,7 @@
namespace art {
namespace arm {
-class ArmVIXLJNIMacroAssembler FINAL
+class ArmVIXLJNIMacroAssembler final
: public JNIMacroAssemblerFwd<ArmVIXLAssembler, PointerSize::k32> {
private:
class ArmException;
@@ -42,7 +42,7 @@ class ArmVIXLJNIMacroAssembler FINAL
exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
virtual ~ArmVIXLJNIMacroAssembler() {}
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
//
// Overridden common assembler high-level functionality
@@ -52,109 +52,109 @@ class ArmVIXLJNIMacroAssembler FINAL
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister src,
FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
void LoadFromThread(ManagedRegister dest,
ThreadOffset32 src,
- size_t size) OVERRIDE;
+ size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
// Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister mtr) OVERRIDE;
+ void GetCurrentThread(ManagedRegister mtr) override;
void GetCurrentThread(FrameOffset dest_offset,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -163,43 +163,43 @@ class ArmVIXLJNIMacroAssembler FINAL
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src) OVERRIDE;
+ ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+ void MemoryBarrier(ManagedRegister scratch) override;
void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
void Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, size_t size);
@@ -231,7 +231,7 @@ class ArmVIXLJNIMacroAssembler FINAL
friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test;
};
-class ArmVIXLJNIMacroLabel FINAL
+class ArmVIXLJNIMacroLabel final
: public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
vixl32::Label,
InstructionSet::kArm> {
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 8983af2677..74537dd5a3 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -61,7 +61,7 @@ enum StoreOperandType {
kStoreDWord
};
-class Arm64Assembler FINAL : public Assembler {
+class Arm64Assembler final : public Assembler {
public:
explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
@@ -70,11 +70,11 @@ class Arm64Assembler FINAL : public Assembler {
vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
// Finalize the code.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Size of generated code.
- size_t CodeSize() const OVERRIDE;
- const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+ size_t CodeSize() const override;
+ const uint8_t* CodeBufferBaseAddress() const override;
// Copy instructions out of assembly buffer into the given region of memory.
void FinalizeInstructions(const MemoryRegion& region);
@@ -109,10 +109,10 @@ class Arm64Assembler FINAL : public Assembler {
// MaybeGenerateMarkingRegisterCheck and is passed to the BRK instruction.
void GenerateMarkingRegisterCheck(vixl::aarch64::Register temp, int code = 0);
- void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index f531b2aa51..45316ed88e 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -40,7 +40,7 @@
namespace art {
namespace arm64 {
-class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
public:
explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator)
: JNIMacroAssemblerFwd(allocator),
@@ -49,94 +49,94 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
~Arm64JNIMacroAssembler();
// Finalize the code.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ ManagedRegister scratch) override;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister src,
FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+ bool unpoison_reference) override;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
// Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ override;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+ size_t size) override;
+ void MemoryBarrier(ManagedRegister scratch) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -145,40 +145,40 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
private:
class Arm64Exception {
@@ -234,7 +234,7 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
};
-class Arm64JNIMacroLabel FINAL
+class Arm64JNIMacroLabel final
: public JNIMacroLabelCommon<Arm64JNIMacroLabel,
vixl::aarch64::Label,
InstructionSet::kArm64> {
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 379a6396eb..251b82cb54 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -283,7 +283,7 @@ class AssemblerBuffer {
// The purpose of this class is to ensure that we do not have to explicitly
// call the AdvancePC method (which is good for convenience and correctness).
-class DebugFrameOpCodeWriterForAssembler FINAL
+class DebugFrameOpCodeWriterForAssembler final
: public dwarf::DebugFrameOpCodeWriter<> {
public:
struct DelayedAdvancePC {
@@ -292,7 +292,7 @@ class DebugFrameOpCodeWriterForAssembler FINAL
};
// This method is called the by the opcode writers.
- virtual void ImplicitlyAdvancePC() FINAL;
+ virtual void ImplicitlyAdvancePC() final;
explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
: dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 7c800b355f..9e23d11116 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -737,7 +737,7 @@ class AssemblerTest : public testing::Test {
protected:
AssemblerTest() {}
- void SetUp() OVERRIDE {
+ void SetUp() override {
allocator_.reset(new ArenaAllocator(&pool_));
assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
@@ -753,7 +753,7 @@ class AssemblerTest : public testing::Test {
SetUpHelpers();
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
allocator_.reset();
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index f5df926749..e0c2992dea 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -259,19 +259,19 @@ inline JNIMacroLabel::~JNIMacroLabel() {
template <typename T, PointerSize kPointerSize>
class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
public:
- void FinalizeCode() OVERRIDE {
+ void FinalizeCode() override {
asm_.FinalizeCode();
}
- size_t CodeSize() const OVERRIDE {
+ size_t CodeSize() const override {
return asm_.CodeSize();
}
- void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
+ void FinalizeInstructions(const MemoryRegion& region) override {
asm_.FinalizeInstructions(region);
}
- DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE {
+ DebugFrameOpCodeWriterForAssembler& cfi() override {
return asm_.cfi();
}
@@ -299,7 +299,7 @@ class JNIMacroLabelCommon : public JNIMacroLabel {
JNIMacroLabelCommon() : JNIMacroLabel(kIsa) {
}
- virtual ~JNIMacroLabelCommon() OVERRIDE {}
+ virtual ~JNIMacroLabelCommon() override {}
private:
PlatformLabel label_;
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index b70c18b3e2..067a5953b8 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -58,7 +58,7 @@ class JNIMacroAssemblerTest : public testing::Test {
protected:
JNIMacroAssemblerTest() {}
- void SetUp() OVERRIDE {
+ void SetUp() override {
allocator_.reset(new ArenaAllocator(&pool_));
assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
@@ -74,7 +74,7 @@ class JNIMacroAssemblerTest : public testing::Test {
SetUpHelpers();
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
allocator_.reset();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index af3d7a06ba..8a1e1df777 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -263,7 +263,7 @@ class MipsExceptionSlowPath {
DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
};
-class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
+class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
public:
using JNIBase = JNIMacroAssembler<PointerSize::k32>;
@@ -285,8 +285,8 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
cfi().DelayEmittingAdvancePCs();
}
- size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
- size_t CodePosition() OVERRIDE;
+ size_t CodeSize() const override { return Assembler::CodeSize(); }
+ size_t CodePosition() override;
DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
virtual ~MipsAssembler() {
@@ -1143,10 +1143,10 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
}
}
- void Bind(Label* label) OVERRIDE {
+ void Bind(Label* label) override {
Bind(down_cast<MipsLabel*>(label));
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
}
@@ -1155,25 +1155,25 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
using JNIBase::Jump;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
// Emit a conditional jump to the label by applying a unary condition test to the register.
void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
- ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ ManagedRegister test ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
@@ -1232,108 +1232,108 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
FrameOffset in_off,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
// Load routines.
- void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister mdest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) override;
// Copying routines.
- void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+ void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset32 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
void CopyRawPtrToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -1342,34 +1342,34 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister mscratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
// Emit slow paths queued during assembly and promote short branches to long if needed.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Emit branches and finalize all instructions.
void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index 0f858926df..f9919f52b5 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -61,15 +61,15 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
return " --no-warn -32 -march=mips32r5 -mmsa";
}
- void Pad(std::vector<uint8_t>& data) OVERRIDE {
+ void Pad(std::vector<uint8_t>& data) override {
// The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
// of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
// pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -78,15 +78,15 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
data.insert(data.end(), pad_size, 0);
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa32r5";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips::Register(mips::ZERO));
registers_.push_back(new mips::Register(mips::AT));
@@ -222,7 +222,7 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -234,23 +234,23 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
UNREACHABLE();
}
- std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ std::vector<mips::Register*> GetRegisters() override {
return registers_;
}
- std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips::FRegister*> GetFPRegisters() override {
return fp_registers_;
}
- std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ std::vector<mips::VectorRegister*> GetVectorRegisters() override {
return vec_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips::Register& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 3d876ca613..1ec7a6a3e0 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -61,16 +61,16 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips";
}
- std::string GetAssemblerCmdName() OVERRIDE {
+ std::string GetAssemblerCmdName() override {
// We assemble and link for MIPS32R6. See GetAssemblerParameters() for details.
return "gcc";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
// We assemble and link for MIPS32R6. The reason is that object files produced for MIPS32R6
// (and MIPS64R6) with the GNU assembler don't have correct final offsets in PC-relative
// branches in the .text section and so they require a relocation pass (there's a relocation
@@ -82,7 +82,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
" -Wl,-Ttext=0x1000000 -Wl,-e0x1000000 -nostdlib";
}
- void Pad(std::vector<uint8_t>& data) OVERRIDE {
+ void Pad(std::vector<uint8_t>& data) override {
// The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
// of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
// pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -91,15 +91,15 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
data.insert(data.end(), pad_size, 0);
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa32r6";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips::Register(mips::ZERO));
registers_.push_back(new mips::Register(mips::AT));
@@ -235,7 +235,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -247,23 +247,23 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
UNREACHABLE();
}
- std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ std::vector<mips::Register*> GetRegisters() override {
return registers_;
}
- std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips::FRegister*> GetFPRegisters() override {
return fp_registers_;
}
- std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ std::vector<mips::VectorRegister*> GetVectorRegisters() override {
return vec_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips::Register& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index f94d074299..9527fa6ddd 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -55,19 +55,19 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
return " --no-warn -32 -march=mips32r2";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa32r2";
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips::Register(mips::ZERO));
registers_.push_back(new mips::Register(mips::AT));
@@ -170,7 +170,7 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -181,19 +181,19 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
UNREACHABLE();
}
- std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ std::vector<mips::Register*> GetRegisters() override {
return registers_;
}
- std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips::FRegister*> GetFPRegisters() override {
return fp_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips::Register& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 19f23b7e95..ce447db4fb 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -414,7 +414,7 @@ class Mips64ExceptionSlowPath {
DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
};
-class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class Mips64Assembler final : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
public:
using JNIBase = JNIMacroAssembler<PointerSize::k64>;
@@ -439,7 +439,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
}
}
- size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+ size_t CodeSize() const override { return Assembler::CodeSize(); }
DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
// Emit Machine Instructions.
@@ -920,10 +920,10 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
}
}
- void Bind(Label* label) OVERRIDE {
+ void Bind(Label* label) override {
Bind(down_cast<Mips64Label*>(label));
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS64";
}
@@ -934,25 +934,25 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
using JNIBase::Jump;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
// Emit a conditional jump to the label by applying a unary condition test to the register.
void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
- ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ ManagedRegister test ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
@@ -1322,119 +1322,119 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
// Load routines.
- void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) override;
// Copying routines.
- void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+ void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
void CopyRawPtrToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister mscratch, size_t size) OVERRIDE;
+ ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister mscratch, size_t size) OVERRIDE;
+ ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) OVERRIDE;
+ ManagedRegister mscratch, size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+ ManagedRegister in_reg, bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
- mscratch, bool null_allowed) OVERRIDE;
+ mscratch, bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
// Emit slow paths queued during assembly and promote short branches to long if needed.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Emit branches and finalize all instructions.
void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index a53ff7cc2b..4ceb356910 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -63,16 +63,16 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips64";
}
- std::string GetAssemblerCmdName() OVERRIDE {
+ std::string GetAssemblerCmdName() override {
// We assemble and link for MIPS64R6. See GetAssemblerParameters() for details.
return "gcc";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
// We assemble and link for MIPS64R6. The reason is that object files produced for MIPS64R6
// (and MIPS32R6) with the GNU assembler don't have correct final offsets in PC-relative
// branches in the .text section and so they require a relocation pass (there's a relocation
@@ -80,7 +80,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
return " -march=mips64r6 -mmsa -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib";
}
- void Pad(std::vector<uint8_t>& data) OVERRIDE {
+ void Pad(std::vector<uint8_t>& data) override {
// The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
// of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
// pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -89,15 +89,15 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
data.insert(data.end(), pad_size, 0);
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa64r6";
}
- mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) override {
return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
registers_.push_back(new mips64::GpuRegister(mips64::AT));
@@ -233,7 +233,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -245,23 +245,23 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
UNREACHABLE();
}
- std::vector<mips64::GpuRegister*> GetRegisters() OVERRIDE {
+ std::vector<mips64::GpuRegister*> GetRegisters() override {
return registers_;
}
- std::vector<mips64::FpuRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips64::FpuRegister*> GetFPRegisters() override {
return fp_registers_;
}
- std::vector<mips64::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ std::vector<mips64::VectorRegister*> GetVectorRegisters() override {
return vec_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index e42c4c986a..5ac9236d6b 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -306,7 +306,7 @@ class ConstantArea {
ArenaVector<int32_t> buffer_;
};
-class X86Assembler FINAL : public Assembler {
+class X86Assembler final : public Assembler {
public:
explicit X86Assembler(ArenaAllocator* allocator)
: Assembler(allocator), constant_area_(allocator) {}
@@ -758,8 +758,8 @@ class X86Assembler FINAL : public Assembler {
//
int PreferredLoopAlignment() { return 16; }
void Align(int alignment, int offset);
- void Bind(Label* label) OVERRIDE;
- void Jump(Label* label) OVERRIDE {
+ void Bind(Label* label) override;
+ void Jump(Label* label) override {
jmp(label);
}
void Bind(NearLabel* label);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index cd007b32d4..b03c40aa3e 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -51,19 +51,19 @@ class AssemblerX86Test : public AssemblerTest<x86::X86Assembler,
x86::Immediate> Base;
protected:
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "x86";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
return " --32";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mi386 --no-show-raw-insn";
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (addresses_singleton_.size() == 0) {
// One addressing mode to test the repeat drivers.
addresses_singleton_.push_back(x86::Address(x86::EAX, x86::EBX, x86::TIMES_1, 2));
@@ -118,25 +118,25 @@ class AssemblerX86Test : public AssemblerTest<x86::X86Assembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
}
- std::vector<x86::Address> GetAddresses() OVERRIDE {
+ std::vector<x86::Address> GetAddresses() override {
return addresses_;
}
- std::vector<x86::Register*> GetRegisters() OVERRIDE {
+ std::vector<x86::Register*> GetRegisters() override {
return registers_;
}
- std::vector<x86::XmmRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<x86::XmmRegister*> GetFPRegisters() override {
return fp_registers_;
}
- x86::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ x86::Immediate CreateImmediate(int64_t imm_value) override {
return x86::Immediate(imm_value);
}
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index dd99f03aa7..df946bd229 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -25,10 +25,10 @@ namespace art {
namespace x86 {
// Slowpath entered when Thread::Current()->_exception is non-null
-class X86ExceptionSlowPath FINAL : public SlowPath {
+class X86ExceptionSlowPath final : public SlowPath {
public:
explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ virtual void Emit(Assembler *sp_asm) override;
private:
const size_t stack_adjust_;
};
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 99219d8f88..a701080b4f 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -32,7 +32,7 @@ namespace x86 {
class X86JNIMacroLabel;
-class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
+class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
public:
explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {}
virtual ~X86JNIMacroAssembler() {}
@@ -45,130 +45,130 @@ class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, Poi
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
// Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
+ override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
+ ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
+ ManagedRegister scratch, size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+ ManagedRegister in_reg, bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
+ ManagedRegister scratch, bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
private:
DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
};
-class X86JNIMacroLabel FINAL
+class X86JNIMacroLabel final
: public JNIMacroLabelCommon<X86JNIMacroLabel,
art::Label,
InstructionSet::kX86> {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e4d72a7ba2..e696635e62 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -351,7 +351,7 @@ class NearLabel : private Label {
};
-class X86_64Assembler FINAL : public Assembler {
+class X86_64Assembler final : public Assembler {
public:
explicit X86_64Assembler(ArenaAllocator* allocator)
: Assembler(allocator), constant_area_(allocator) {}
@@ -844,8 +844,8 @@ class X86_64Assembler FINAL : public Assembler {
//
int PreferredLoopAlignment() { return 16; }
void Align(int alignment, int offset);
- void Bind(Label* label) OVERRIDE;
- void Jump(Label* label) OVERRIDE {
+ void Bind(Label* label) override;
+ void Jump(Label* label) override {
jmp(label);
}
void Bind(NearLabel* label);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 0589df55d2..e1de1f172f 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -145,15 +145,15 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "x86_64";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (addresses_singleton_.size() == 0) {
// One addressing mode to test the repeat drivers.
addresses_singleton_.push_back(
@@ -291,7 +291,7 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -301,29 +301,29 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
return addresses_;
}
- std::vector<x86_64::CpuRegister*> GetRegisters() OVERRIDE {
+ std::vector<x86_64::CpuRegister*> GetRegisters() override {
return registers_;
}
- std::vector<x86_64::XmmRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<x86_64::XmmRegister*> GetFPRegisters() override {
return fp_registers_;
}
- x86_64::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ x86_64::Immediate CreateImmediate(int64_t imm_value) override {
return x86_64::Immediate(imm_value);
}
- std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
- std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) override {
CHECK(tertiary_register_names_.find(reg) != tertiary_register_names_.end());
return tertiary_register_names_[reg];
}
- std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) override {
CHECK(quaternary_register_names_.find(reg) != quaternary_register_names_.end());
return quaternary_register_names_[reg];
}
@@ -2002,11 +2002,11 @@ class JNIMacroAssemblerX86_64Test : public JNIMacroAssemblerTest<x86_64::X86_64J
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "x86_64";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
}
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index f6b2f9df34..d5c0878c95 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -574,10 +574,10 @@ void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegist
}
// Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
+class X86_64ExceptionSlowPath final : public SlowPath {
public:
explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ virtual void Emit(Assembler *sp_asm) override;
private:
const size_t stack_adjust_;
};
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index d766ad4716..4411558340 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -31,7 +31,7 @@
namespace art {
namespace x86_64 {
-class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assembler,
PointerSize::k64> {
public:
explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator)
@@ -46,107 +46,107 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister src,
FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size);
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
+ override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -155,46 +155,46 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
private:
DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
};
-class X86_64JNIMacroLabel FINAL
+class X86_64JNIMacroLabel final
: public JNIMacroLabelCommon<X86_64JNIMacroLabel,
art::Label,
InstructionSet::kX86_64> {
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index c223549710..136066d074 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -47,11 +47,11 @@ class VerifierDepsCompilerCallbacks : public CompilerCallbacks {
: CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp),
deps_(nullptr) {}
- void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {}
- void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
- bool IsRelocationPossible() OVERRIDE { return false; }
+ void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
+ bool IsRelocationPossible() override { return false; }
- verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return deps_; }
+ verifier::VerifierDeps* GetVerifierDeps() const override { return deps_; }
void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; }
private:
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 29df0670d6..5655b3c91d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -613,7 +613,7 @@ class WatchDog {
bool shutting_down_;
};
-class Dex2Oat FINAL {
+class Dex2Oat final {
public:
explicit Dex2Oat(TimingLogger* timings) :
compiler_kind_(Compiler::kOptimizing),
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 4247e176aa..60dba99bfa 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -52,7 +52,7 @@ std::ostream& operator<<(std::ostream& os, const ImageSizes& sizes) {
class Dex2oatImageTest : public CommonRuntimeTest {
public:
- virtual void TearDown() OVERRIDE {}
+ virtual void TearDown() override {}
protected:
// Visitors take method and type references
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 2b96684fdd..5fc3da42e8 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -55,7 +55,7 @@ using android::base::StringPrintf;
class Dex2oatTest : public Dex2oatEnvironmentTest {
public:
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
Dex2oatEnvironmentTest::TearDown();
output_ = "";
@@ -349,7 +349,7 @@ TEST_F(Dex2oatSwapTest, DoUseSwapSingleSmall) {
class Dex2oatSwapUseTest : public Dex2oatSwapTest {
protected:
- void CheckHostResult(bool expect_use) OVERRIDE {
+ void CheckHostResult(bool expect_use) override {
if (!kIsTargetBuild) {
if (expect_use) {
EXPECT_NE(output_.find("Large app, accepted running with swap."), std::string::npos)
@@ -361,7 +361,7 @@ class Dex2oatSwapUseTest : public Dex2oatSwapTest {
}
}
- std::string GetTestDexFileName() OVERRIDE {
+ std::string GetTestDexFileName() override {
// Use Statics as it has a handful of functions.
return CommonRuntimeTest::GetTestDexFileName("Statics");
}
@@ -474,7 +474,7 @@ TEST_F(Dex2oatSwapUseTest, CheckSwapUsage) {
class Dex2oatVeryLargeTest : public Dex2oatTest {
protected:
void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
- CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+ CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
// Ignore, we'll do our own checks.
}
@@ -627,7 +627,7 @@ TEST_F(Dex2oatVeryLargeTest, SpeedProfileNoProfile) {
class Dex2oatLayoutTest : public Dex2oatTest {
protected:
void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
- CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+ CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
// Ignore, we'll do our own checks.
}
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.h b/dex2oat/linker/arm/relative_patcher_arm_base.h
index f5a1395bdd..0eb4417771 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.h
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.h
@@ -31,10 +31,10 @@ class ArmBaseRelativePatcher : public RelativePatcher {
public:
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
protected:
ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2.h b/dex2oat/linker/arm/relative_patcher_thumb2.h
index 3a42928466..dbf64a13da 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2.h
+++ b/dex2oat/linker/arm/relative_patcher_thumb2.h
@@ -29,7 +29,7 @@ class ArmVIXLAssembler;
namespace linker {
-class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Thumb2RelativePatcher final : public ArmBaseRelativePatcher {
public:
explicit Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
RelativePatcherTargetProvider* target_provider);
@@ -37,18 +37,18 @@ class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
protected:
- uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
- uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+ uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+ uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
private:
void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.h b/dex2oat/linker/arm64/relative_patcher_arm64.h
index f7f673c1ba..e95d0fee6f 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.h
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.h
@@ -28,7 +28,7 @@ class Arm64Assembler;
namespace linker {
-class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Arm64RelativePatcher final : public ArmBaseRelativePatcher {
public:
Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
RelativePatcherTargetProvider* target_provider,
@@ -36,24 +36,24 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
protected:
- uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
- uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+ uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+ uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
private:
static uint32_t PatchAdrp(uint32_t adrp, uint32_t disp);
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 4e7d636dbf..194a0e1ad9 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -94,35 +94,35 @@ class DebugInfoTask : public Task {
};
template <typename ElfTypes>
-class ElfWriterQuick FINAL : public ElfWriter {
+class ElfWriterQuick final : public ElfWriter {
public:
ElfWriterQuick(const CompilerOptions& compiler_options,
File* elf_file);
~ElfWriterQuick();
- void Start() OVERRIDE;
+ void Start() override;
void PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t data_bimg_rel_ro_size,
size_t bss_size,
size_t bss_methods_offset,
size_t bss_roots_offset,
- size_t dex_section_size) OVERRIDE;
- void PrepareDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
- OutputStream* StartRoData() OVERRIDE;
- void EndRoData(OutputStream* rodata) OVERRIDE;
- OutputStream* StartText() OVERRIDE;
- void EndText(OutputStream* text) OVERRIDE;
- OutputStream* StartDataBimgRelRo() OVERRIDE;
- void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) OVERRIDE;
- void WriteDynamicSection() OVERRIDE;
- void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
- bool StripDebugInfo() OVERRIDE;
- bool End() OVERRIDE;
-
- virtual OutputStream* GetStream() OVERRIDE;
-
- size_t GetLoadedSize() OVERRIDE;
+ size_t dex_section_size) override;
+ void PrepareDebugInfo(const debug::DebugInfo& debug_info) override;
+ OutputStream* StartRoData() override;
+ void EndRoData(OutputStream* rodata) override;
+ OutputStream* StartText() override;
+ void EndText(OutputStream* text) override;
+ OutputStream* StartDataBimgRelRo() override;
+ void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) override;
+ void WriteDynamicSection() override;
+ void WriteDebugInfo(const debug::DebugInfo& debug_info) override;
+ bool StripDebugInfo() override;
+ bool End() override;
+
+ virtual OutputStream* GetStream() override;
+
+ size_t GetLoadedSize() override;
static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
std::vector<uint8_t>* buffer);
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 440b3a47cb..d575420f9b 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -83,7 +83,7 @@ class ImageTest : public CommonCompilerTest {
const std::string& extra_dex = "",
const std::initializer_list<std::string>& image_classes = {});
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonCompilerTest::SetUpRuntimeOptions(options);
QuickCompilerCallbacks* new_callbacks =
new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileBootImage);
@@ -92,7 +92,7 @@ class ImageTest : public CommonCompilerTest {
options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
}
- std::unique_ptr<HashSet<std::string>> GetImageClasses() OVERRIDE {
+ std::unique_ptr<HashSet<std::string>> GetImageClasses() override {
return std::make_unique<HashSet<std::string>>(image_classes_);
}
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 67ded3278e..97d82d9442 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -755,7 +755,7 @@ bool ImageWriter::AllocMemory() {
class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
public:
- bool operator()(ObjPtr<Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(Thread::Current());
mirror::Class::ComputeName(hs.NewHandle(c));
return true;
@@ -987,7 +987,7 @@ class ImageWriter::PruneClassesVisitor : public ClassVisitor {
classes_to_prune_(),
defined_class_count_(0u) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!image_writer_->KeepClass(klass.Ptr())) {
classes_to_prune_.insert(klass.Ptr());
if (klass->GetClassLoader() == class_loader_) {
@@ -1022,7 +1022,7 @@ class ImageWriter::PruneClassLoaderClassesVisitor : public ClassLoaderVisitor {
explicit PruneClassLoaderClassesVisitor(ImageWriter* image_writer)
: image_writer_(image_writer), removed_class_count_(0) {}
- virtual void Visit(ObjPtr<mirror::ClassLoader> class_loader) OVERRIDE
+ virtual void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
REQUIRES_SHARED(Locks::mutator_lock_) {
PruneClassesVisitor classes_visitor(image_writer_, class_loader);
ClassTable* class_table =
@@ -1677,7 +1677,7 @@ class ImageWriter::GetRootsVisitor : public RootVisitor {
void VisitRoots(mirror::Object*** roots,
size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+ const RootInfo& info ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots_->push_back(*roots[i]);
@@ -1686,7 +1686,7 @@ class ImageWriter::GetRootsVisitor : public RootVisitor {
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+ const RootInfo& info ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots_->push_back(roots[i]->AsMirrorPtr());
@@ -2104,14 +2104,14 @@ class ImageWriter::FixupRootVisitor : public RootVisitor {
void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED,
size_t count ATTRIBUTE_UNUSED,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(FATAL) << "Unsupported";
}
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
// Copy the reference. Since we do not have the address for recording the relocation,
// it needs to be recorded explicitly by the user of FixupRootVisitor.
@@ -2401,7 +2401,7 @@ class ImageWriter::FixupVisitor {
size_t oat_index_;
};
-class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor {
+class ImageWriter::FixupClassVisitor final : public FixupVisitor {
public:
FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
: FixupVisitor(image_writer, copy, oat_index) {}
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 7cf555bf96..e45023e6dc 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -73,7 +73,7 @@ static constexpr int kInvalidFd = -1;
namespace linker {
// Write a Space built during compilation for use during execution.
-class ImageWriter FINAL {
+class ImageWriter final {
public:
ImageWriter(const CompilerOptions& compiler_options,
uintptr_t image_begin,
diff --git a/dex2oat/linker/mips/relative_patcher_mips.h b/dex2oat/linker/mips/relative_patcher_mips.h
index d3a4c5a14f..4c385a3fec 100644
--- a/dex2oat/linker/mips/relative_patcher_mips.h
+++ b/dex2oat/linker/mips/relative_patcher_mips.h
@@ -23,28 +23,28 @@
namespace art {
namespace linker {
-class MipsRelativePatcher FINAL : public RelativePatcher {
+class MipsRelativePatcher final : public RelativePatcher {
public:
explicit MipsRelativePatcher(const MipsInstructionSetFeatures* features)
: is_r6(features->IsR6()) {}
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ uint32_t patch_offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
private:
bool is_r6;
diff --git a/dex2oat/linker/mips64/relative_patcher_mips64.h b/dex2oat/linker/mips64/relative_patcher_mips64.h
index 9f5a125408..7b7c2ccd9f 100644
--- a/dex2oat/linker/mips64/relative_patcher_mips64.h
+++ b/dex2oat/linker/mips64/relative_patcher_mips64.h
@@ -22,27 +22,27 @@
namespace art {
namespace linker {
-class Mips64RelativePatcher FINAL : public RelativePatcher {
+class Mips64RelativePatcher final : public RelativePatcher {
public:
Mips64RelativePatcher() {}
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ uint32_t patch_offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
private:
DISALLOW_COPY_AND_ASSIGN(Mips64RelativePatcher);
diff --git a/dex2oat/linker/multi_oat_relative_patcher.h b/dex2oat/linker/multi_oat_relative_patcher.h
index 60fcfe8b58..9b47a0d5b0 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.h
+++ b/dex2oat/linker/multi_oat_relative_patcher.h
@@ -35,7 +35,7 @@ namespace linker {
// any number of oat files. It provides storage for method code offsets
// and wraps RelativePatcher calls, adjusting relative offsets according
// to the value set by SetAdjustment().
-class MultiOatRelativePatcher FINAL {
+class MultiOatRelativePatcher final {
public:
using const_iterator = SafeMap<MethodReference, uint32_t>::const_iterator;
@@ -139,7 +139,7 @@ class MultiOatRelativePatcher FINAL {
void GetThunkCode(const LinkerPatch& patch,
/*out*/ ArrayRef<const uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE;
+ /*out*/ std::string* debug_name) override;
private:
CompiledMethodStorage* storage_;
@@ -149,7 +149,7 @@ class MultiOatRelativePatcher FINAL {
// Wrap the map in a class implementing RelativePatcherTargetProvider.
class MethodOffsetMap : public RelativePatcherTargetProvider {
public:
- std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE;
+ std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override;
SafeMap<MethodReference, uint32_t> map;
};
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index 05fe36a590..a5831b64b3 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -35,7 +35,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
- MethodReference method_ref) OVERRIDE {
+ MethodReference method_ref) override {
last_reserve_offset_ = offset;
last_reserve_method_ = method_ref;
offset += next_reserve_adjustment_;
@@ -43,7 +43,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
return offset;
}
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+ uint32_t ReserveSpaceEnd(uint32_t offset) override {
last_reserve_offset_ = offset;
last_reserve_method_ = kNullMethodRef;
offset += next_reserve_adjustment_;
@@ -51,7 +51,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
return offset;
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override {
last_write_offset_ = offset;
if (next_write_alignment_ != 0u) {
offset += next_write_alignment_;
@@ -79,7 +79,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ uint32_t target_offset) override {
last_literal_offset_ = literal_offset;
last_patch_offset_ = patch_offset;
last_target_offset_ = target_offset;
@@ -88,7 +88,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ uint32_t target_offset) override {
last_literal_offset_ = patch.LiteralOffset();
last_patch_offset_ = patch_offset;
last_target_offset_ = target_offset;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 9045c43e03..4748c155f1 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -103,16 +103,16 @@ class ChecksumUpdatingOutputStream : public OutputStream {
ChecksumUpdatingOutputStream(OutputStream* out, OatHeader* oat_header)
: OutputStream(out->GetLocation()), out_(out), oat_header_(oat_header) { }
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
oat_header_->UpdateChecksum(buffer, byte_count);
return out_->WriteFully(buffer, byte_count);
}
- off_t Seek(off_t offset, Whence whence) OVERRIDE {
+ off_t Seek(off_t offset, Whence whence) override {
return out_->Seek(offset, whence);
}
- bool Flush() OVERRIDE {
+ bool Flush() override {
return out_->Flush();
}
@@ -826,7 +826,7 @@ class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
oat_class_index_(0u),
method_offsets_index_(0u) {}
- bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
DexMethodVisitor::StartClass(dex_file, class_def_index);
if (kIsDebugBuild && writer_->MayHaveCompiledMethods()) {
// There are no oat classes if there aren't any compiled methods.
@@ -836,7 +836,7 @@ class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
return true;
}
- bool EndClass() OVERRIDE {
+ bool EndClass() override {
++oat_class_index_;
return DexMethodVisitor::EndClass();
}
@@ -862,7 +862,7 @@ class OatWriter::InitBssLayoutMethodVisitor : public DexMethodVisitor {
: DexMethodVisitor(writer, /* offset */ 0u) {}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassAccessor::Method& method) OVERRIDE {
+ const ClassAccessor::Method& method) override {
// Look for patches with .bss references and prepare maps with placeholders for their offsets.
CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(
MethodReference(dex_file_, method.GetIndex()));
@@ -936,7 +936,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
DCHECK(num_classes == 0u || IsAligned<4u>(offset));
}
- bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
DexMethodVisitor::StartClass(dex_file, class_def_index);
compiled_methods_.clear();
compiled_methods_with_code_ = 0u;
@@ -944,7 +944,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassAccessor::Method& method) OVERRIDE {
+ const ClassAccessor::Method& method) override {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// compiled_methods_with_code_ since we only want to allocate
@@ -959,7 +959,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
return true;
}
- bool EndClass() OVERRIDE {
+ bool EndClass() override {
ClassReference class_ref(dex_file_, class_def_index_);
ClassStatus status;
bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
@@ -1145,14 +1145,14 @@ class OatWriter::LayoutCodeMethodVisitor : public OatDexMethodVisitor {
: OatDexMethodVisitor(writer, offset) {
}
- bool EndClass() OVERRIDE {
+ bool EndClass() override {
OatDexMethodVisitor::EndClass();
return true;
}
bool VisitMethod(size_t class_def_method_index,
const ClassAccessor::Method& method)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -1248,7 +1248,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
std::move(ordered_methods)) {
}
- virtual bool VisitComplete() OVERRIDE {
+ virtual bool VisitComplete() override {
offset_ = writer_->relative_patcher_->ReserveSpaceEnd(offset_);
if (generate_debug_info_) {
std::vector<debug::MethodDebugInfo> thunk_infos =
@@ -1261,7 +1261,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
}
virtual bool VisitMethod(const OrderedMethodData& method_data)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = method_data.oat_class;
CompiledMethod* compiled_method = method_data.compiled_method;
@@ -1445,7 +1445,7 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
bool VisitMethod(size_t class_def_method_index,
const ClassAccessor::Method& method ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1495,7 +1495,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
// in the same oat file. If the origin and the copied methods are
// in different oat files don't touch the copied method.
// References to other oat files are not supported yet.
- bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) override
REQUIRES_SHARED(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
// Skip classes that are not in the image.
@@ -1533,7 +1533,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) OVERRIDE
+ bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) override
REQUIRES_SHARED(Locks::mutator_lock_) {
// Skip methods that are not in the image.
if (!IsImageClass()) {
@@ -1652,7 +1652,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
}
}
- virtual bool VisitStart() OVERRIDE {
+ virtual bool VisitStart() override {
return true;
}
@@ -1681,7 +1681,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
return true;
}
- virtual bool VisitMethod(const OrderedMethodData& method_data) OVERRIDE
+ virtual bool VisitMethod(const OrderedMethodData& method_data) override
REQUIRES_SHARED(Locks::mutator_lock_) {
const MethodReference& method_ref = method_data.method_reference;
UpdateDexFileAndDexCache(method_ref.dex_file);
diff --git a/dex2oat/linker/relative_patcher.cc b/dex2oat/linker/relative_patcher.cc
index b6135c9b5f..564cf30f19 100644
--- a/dex2oat/linker/relative_patcher.cc
+++ b/dex2oat/linker/relative_patcher.cc
@@ -45,35 +45,35 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
const InstructionSetFeatures* features,
RelativePatcherThunkProvider* thunk_provider,
RelativePatcherTargetProvider* target_provider) {
- class RelativePatcherNone FINAL : public RelativePatcher {
+ class RelativePatcherNone final : public RelativePatcher {
public:
RelativePatcherNone() { }
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
- MethodReference method_ref ATTRIBUTE_UNUSED) OVERRIDE {
+ MethodReference method_ref ATTRIBUTE_UNUSED) override {
return offset; // No space reserved; no patches expected.
}
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+ uint32_t ReserveSpaceEnd(uint32_t offset) override {
return offset; // No space reserved; no patches expected.
}
- uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) override {
return offset; // No thunks added; no patches expected.
}
void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
uint32_t literal_offset ATTRIBUTE_UNUSED,
uint32_t patch_offset ATTRIBUTE_UNUSED,
- uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+ uint32_t target_offset ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Unexpected relative call patch.";
}
void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
const LinkerPatch& patch ATTRIBUTE_UNUSED,
uint32_t patch_offset ATTRIBUTE_UNUSED,
- uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+ uint32_t target_offset ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Unexpected relative dex cache array patch.";
}
@@ -84,7 +84,7 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
}
std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(
- uint32_t executable_offset ATTRIBUTE_UNUSED) OVERRIDE {
+ uint32_t executable_offset ATTRIBUTE_UNUSED) override {
return std::vector<debug::MethodDebugInfo>(); // No thunks added.
}
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index 9556c5f557..9725570570 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -57,7 +57,7 @@ class RelativePatcherTest : public CommonCompilerTest {
patched_code_.reserve(16 * KB);
}
- void SetUp() OVERRIDE {
+ void SetUp() override {
OverrideInstructionSetFeatures(instruction_set_, variant_);
CommonCompilerTest::SetUp();
@@ -67,7 +67,7 @@ class RelativePatcherTest : public CommonCompilerTest {
&method_offset_map_);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
compiled_methods_.clear();
patcher_.reset();
CommonCompilerTest::TearDown();
@@ -260,7 +260,7 @@ class RelativePatcherTest : public CommonCompilerTest {
void GetThunkCode(const LinkerPatch& patch,
/*out*/ ArrayRef<const uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE {
+ /*out*/ std::string* debug_name) override {
auto it = thunk_map_.find(ThunkKey(patch));
CHECK(it != thunk_map_.end());
const ThunkValue& value = it->second;
@@ -316,9 +316,9 @@ class RelativePatcherTest : public CommonCompilerTest {
// Map method reference to assinged offset.
// Wrap the map in a class implementing RelativePatcherTargetProvider.
- class MethodOffsetMap FINAL : public RelativePatcherTargetProvider {
+ class MethodOffsetMap final : public RelativePatcherTargetProvider {
public:
- std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE {
+ std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override {
auto it = map.find(ref);
if (it == map.end()) {
return std::pair<bool, uint32_t>(false, 0u);
diff --git a/dex2oat/linker/x86/relative_patcher_x86.h b/dex2oat/linker/x86/relative_patcher_x86.h
index e723580dae..3da62fb23b 100644
--- a/dex2oat/linker/x86/relative_patcher_x86.h
+++ b/dex2oat/linker/x86/relative_patcher_x86.h
@@ -22,17 +22,17 @@
namespace art {
namespace linker {
-class X86RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86RelativePatcher final : public X86BaseRelativePatcher {
public:
X86RelativePatcher() { }
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
};
} // namespace linker
diff --git a/dex2oat/linker/x86/relative_patcher_x86_base.h b/dex2oat/linker/x86/relative_patcher_x86_base.h
index 4cc7b07d2d..a1925e0995 100644
--- a/dex2oat/linker/x86/relative_patcher_x86_base.h
+++ b/dex2oat/linker/x86/relative_patcher_x86_base.h
@@ -26,14 +26,14 @@ class X86BaseRelativePatcher : public RelativePatcher {
public:
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ uint32_t target_offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
protected:
X86BaseRelativePatcher() { }
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.h b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
index a31e1ebfbb..a82fef3b56 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.h
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
@@ -22,17 +22,17 @@
namespace art {
namespace linker {
-class X86_64RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86_64RelativePatcher final : public X86BaseRelativePatcher {
public:
X86_64RelativePatcher() { }
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
};
} // namespace linker
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index e7d5ed953d..c81d0c722d 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -112,15 +112,15 @@ class CompactDexWriter : public DexWriter {
public:
class Container : public DexContainer {
public:
- Section* GetMainSection() OVERRIDE {
+ Section* GetMainSection() override {
return &main_section_;
}
- Section* GetDataSection() OVERRIDE {
+ Section* GetDataSection() override {
return &data_section_;
}
- bool IsCompactDexContainer() const OVERRIDE {
+ bool IsCompactDexContainer() const override {
return true;
}
@@ -139,21 +139,21 @@ class CompactDexWriter : public DexWriter {
// Return true if we can generate compact dex for the IR.
bool CanGenerateCompactDex(std::string* error_msg);
- bool Write(DexContainer* output, std::string* error_msg) OVERRIDE;
+ bool Write(DexContainer* output, std::string* error_msg) override;
- std::unique_ptr<DexContainer> CreateDexContainer() const OVERRIDE;
+ std::unique_ptr<DexContainer> CreateDexContainer() const override;
- void WriteHeader(Stream* stream) OVERRIDE;
+ void WriteHeader(Stream* stream) override;
- size_t GetHeaderSize() const OVERRIDE;
+ size_t GetHeaderSize() const override;
uint32_t WriteDebugInfoOffsetTable(Stream* stream);
- void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) OVERRIDE;
+ void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) override;
- void WriteStringData(Stream* stream, dex_ir::StringData* string_data) OVERRIDE;
+ void WriteStringData(Stream* stream, dex_ir::StringData* string_data) override;
- void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) OVERRIDE;
+ void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) override;
void SortDebugInfosByMethodIndex();
diff --git a/dexlayout/dex_container.h b/dexlayout/dex_container.h
index 2b9a5f9959..2d742b0dbe 100644
--- a/dexlayout/dex_container.h
+++ b/dexlayout/dex_container.h
@@ -57,19 +57,19 @@ class DexContainer {
public:
virtual ~VectorSection() {}
- uint8_t* Begin() OVERRIDE {
+ uint8_t* Begin() override {
return &data_[0];
}
- size_t Size() const OVERRIDE {
+ size_t Size() const override {
return data_.size();
}
- void Resize(size_t size) OVERRIDE {
+ void Resize(size_t size) override {
data_.resize(size, 0u);
}
- void Clear() OVERRIDE {
+ void Clear() override {
data_.clear();
}
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 8f853eaeb5..b02ae50c70 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -233,7 +233,7 @@ template<class T> class CollectionVector : public CollectionBase {
// Preallocate so that assignment does not invalidate pointers into the vector.
collection_.reserve(size);
}
- virtual ~CollectionVector() OVERRIDE { }
+ virtual ~CollectionVector() override { }
template<class... Args>
T* CreateAndAddItem(Args&&... args) {
@@ -242,7 +242,7 @@ template<class T> class CollectionVector : public CollectionBase {
return object;
}
- virtual uint32_t Size() const OVERRIDE { return collection_.size(); }
+ virtual uint32_t Size() const override { return collection_.size(); }
Iterator<ElementType> begin() const { return Iterator<ElementType>(collection_, 0U, Size()); }
Iterator<ElementType> end() const { return Iterator<ElementType>(collection_, Size(), Size()); }
@@ -406,7 +406,7 @@ class Header : public Item {
data_size,
data_offset);
}
- ~Header() OVERRIDE { }
+ ~Header() override { }
static size_t ItemSize() { return kHeaderItemSize; }
@@ -590,7 +590,7 @@ class StringId : public IndexedItem {
explicit StringId(StringData* string_data) : string_data_(string_data) {
size_ = kStringIdItemSize;
}
- ~StringId() OVERRIDE { }
+ ~StringId() override { }
static size_t ItemSize() { return kStringIdItemSize; }
@@ -608,7 +608,7 @@ class StringId : public IndexedItem {
class TypeId : public IndexedItem {
public:
explicit TypeId(StringId* string_id) : string_id_(string_id) { size_ = kTypeIdItemSize; }
- ~TypeId() OVERRIDE { }
+ ~TypeId() override { }
static size_t ItemSize() { return kTypeIdItemSize; }
@@ -629,7 +629,7 @@ class TypeList : public Item {
explicit TypeList(TypeIdVector* type_list) : type_list_(type_list) {
size_ = sizeof(uint32_t) + (type_list->size() * sizeof(uint16_t));
}
- ~TypeList() OVERRIDE { }
+ ~TypeList() override { }
const TypeIdVector* GetTypeList() const { return type_list_.get(); }
@@ -644,7 +644,7 @@ class ProtoId : public IndexedItem {
ProtoId(const StringId* shorty, const TypeId* return_type, TypeList* parameters)
: shorty_(shorty), return_type_(return_type), parameters_(parameters)
{ size_ = kProtoIdItemSize; }
- ~ProtoId() OVERRIDE { }
+ ~ProtoId() override { }
static size_t ItemSize() { return kProtoIdItemSize; }
@@ -666,7 +666,7 @@ class FieldId : public IndexedItem {
public:
FieldId(const TypeId* klass, const TypeId* type, const StringId* name)
: class_(klass), type_(type), name_(name) { size_ = kFieldIdItemSize; }
- ~FieldId() OVERRIDE { }
+ ~FieldId() override { }
static size_t ItemSize() { return kFieldIdItemSize; }
@@ -688,7 +688,7 @@ class MethodId : public IndexedItem {
public:
MethodId(const TypeId* klass, const ProtoId* proto, const StringId* name)
: class_(klass), proto_(proto), name_(name) { size_ = kMethodIdItemSize; }
- ~MethodId() OVERRIDE { }
+ ~MethodId() override { }
static size_t ItemSize() { return kMethodIdItemSize; }
@@ -710,7 +710,7 @@ class FieldItem : public Item {
public:
FieldItem(uint32_t access_flags, const FieldId* field_id)
: access_flags_(access_flags), field_id_(field_id) { }
- ~FieldItem() OVERRIDE { }
+ ~FieldItem() override { }
FieldItem(FieldItem&&) = default;
@@ -732,7 +732,7 @@ class MethodItem : public Item {
public:
MethodItem(uint32_t access_flags, const MethodId* method_id, CodeItem* code)
: access_flags_(access_flags), method_id_(method_id), code_(code) { }
- ~MethodItem() OVERRIDE { }
+ ~MethodItem() override { }
MethodItem(MethodItem&&) = default;
@@ -876,7 +876,7 @@ class ClassData : public Item {
direct_methods_(direct_methods),
virtual_methods_(virtual_methods) { }
- ~ClassData() OVERRIDE = default;
+ ~ClassData() override = default;
FieldItemVector* StaticFields() { return static_fields_.get(); }
FieldItemVector* InstanceFields() { return instance_fields_.get(); }
MethodItemVector* DirectMethods() { return direct_methods_.get(); }
@@ -912,7 +912,7 @@ class ClassDef : public IndexedItem {
class_data_(class_data),
static_values_(static_values) { size_ = kClassDefItemSize; }
- ~ClassDef() OVERRIDE { }
+ ~ClassDef() override { }
static size_t ItemSize() { return kClassDefItemSize; }
@@ -980,7 +980,7 @@ class TryItem : public Item {
public:
TryItem(uint32_t start_addr, uint16_t insn_count, const CatchHandler* handlers)
: start_addr_(start_addr), insn_count_(insn_count), handlers_(handlers) { }
- ~TryItem() OVERRIDE { }
+ ~TryItem() override { }
uint32_t StartAddr() const { return start_addr_; }
uint16_t InsnCount() const { return insn_count_; }
@@ -1042,7 +1042,7 @@ class CodeItem : public Item {
tries_(tries),
handlers_(handlers) { }
- ~CodeItem() OVERRIDE { }
+ ~CodeItem() override { }
uint16_t RegistersSize() const { return registers_size_; }
uint16_t InsSize() const { return ins_size_; }
@@ -1115,7 +1115,7 @@ class AnnotationSetItem : public Item {
explicit AnnotationSetItem(std::vector<AnnotationItem*>* items) : items_(items) {
size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
}
- ~AnnotationSetItem() OVERRIDE { }
+ ~AnnotationSetItem() override { }
std::vector<AnnotationItem*>* GetItems() { return items_.get(); }
@@ -1132,7 +1132,7 @@ class AnnotationSetRefList : public Item {
explicit AnnotationSetRefList(std::vector<AnnotationSetItem*>* items) : items_(items) {
size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
}
- ~AnnotationSetRefList() OVERRIDE { }
+ ~AnnotationSetRefList() override { }
std::vector<AnnotationSetItem*>* GetItems() { return items_.get(); }
@@ -1227,7 +1227,7 @@ class CallSiteId : public IndexedItem {
explicit CallSiteId(EncodedArrayItem* call_site_item) : call_site_item_(call_site_item) {
size_ = kCallSiteIdItemSize;
}
- ~CallSiteId() OVERRIDE { }
+ ~CallSiteId() override { }
static size_t ItemSize() { return kCallSiteIdItemSize; }
@@ -1248,7 +1248,7 @@ class MethodHandleItem : public IndexedItem {
field_or_method_id_(field_or_method_id) {
size_ = kMethodHandleItemSize;
}
- ~MethodHandleItem() OVERRIDE { }
+ ~MethodHandleItem() override { }
static size_t ItemSize() { return kMethodHandleItemSize; }
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index a83a46b7e2..601d7838e2 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -92,7 +92,7 @@ static uint32_t GetDebugInfoStreamSize(const uint8_t* debug_info_stream) {
template<class T> class CollectionMap : public CollectionBase {
public:
CollectionMap() = default;
- virtual ~CollectionMap() OVERRIDE { }
+ virtual ~CollectionMap() override { }
template <class... Args>
T* CreateAndAddItem(CollectionVector<T>& vector,
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index db1898bf26..dd2ebad26f 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -192,15 +192,15 @@ class DexWriter {
class Container : public DexContainer {
public:
- Section* GetMainSection() OVERRIDE {
+ Section* GetMainSection() override {
return &main_section_;
}
- Section* GetDataSection() OVERRIDE {
+ Section* GetDataSection() override {
return &data_section_;
}
- bool IsCompactDexContainer() const OVERRIDE {
+ bool IsCompactDexContainer() const override {
return false;
}
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 871cd081e7..00b8ef2254 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -132,7 +132,7 @@ NO_RETURN static void Usage(const char *fmt, ...) {
exit(kErrorInvalidArguments);
}
-class DexoptAnalyzer FINAL {
+class DexoptAnalyzer final {
public:
DexoptAnalyzer() :
assume_profile_changed_(false),
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 49f92499e3..c1a6f59341 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -39,15 +39,15 @@ using vixl::aarch32::pc;
static const vixl::aarch32::Register tr(TR);
-class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
- class CustomDisassemblerStream FINAL : public DisassemblerStream {
+class DisassemblerArm::CustomDisassembler final : public PrintDisassembler {
+ class CustomDisassemblerStream final : public DisassemblerStream {
public:
CustomDisassemblerStream(std::ostream& os,
const CustomDisassembler* disasm,
const DisassemblerOptions* options)
: DisassemblerStream(os), disasm_(disasm), options_(options) {}
- DisassemblerStream& operator<<(const PrintLabel& label) OVERRIDE {
+ DisassemblerStream& operator<<(const PrintLabel& label) override {
const LocationType type = label.GetLocationType();
switch (type) {
@@ -73,7 +73,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
}
}
- DisassemblerStream& operator<<(vixl::aarch32::Register reg) OVERRIDE {
+ DisassemblerStream& operator<<(vixl::aarch32::Register reg) override {
if (reg.Is(tr)) {
os() << "tr";
return *this;
@@ -82,7 +82,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
}
}
- DisassemblerStream& operator<<(const MemOperand& operand) OVERRIDE {
+ DisassemblerStream& operator<<(const MemOperand& operand) override {
// VIXL must use a PrintLabel object whenever the base register is PC;
// the following check verifies this invariant, and guards against bugs.
DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -96,7 +96,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
return *this;
}
- DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) OVERRIDE {
+ DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) override {
// VIXL must use a PrintLabel object whenever the base register is PC;
// the following check verifies this invariant, and guards against bugs.
DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -116,7 +116,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
disassembler_stream_(os, this, options),
is_t32_(true) {}
- void PrintCodeAddress(uint32_t prog_ctr) OVERRIDE {
+ void PrintCodeAddress(uint32_t prog_ctr) override {
os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": ";
}
diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h
index 237b577bc2..dd6621d344 100644
--- a/disassembler/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -26,14 +26,14 @@
namespace art {
namespace arm {
-class DisassemblerArm FINAL : public Disassembler {
+class DisassemblerArm final : public Disassembler {
class CustomDisassembler;
public:
explicit DisassemblerArm(DisassemblerOptions* options);
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
uintptr_t GetPc(uintptr_t instr_ptr) const {
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 19e4dfb486..89beaa927b 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -29,7 +29,7 @@
namespace art {
namespace arm64 {
-class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
+class CustomDisassembler final : public vixl::aarch64::Disassembler {
public:
explicit CustomDisassembler(DisassemblerOptions* options)
: vixl::aarch64::Disassembler(),
@@ -45,13 +45,13 @@ class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
// Use register aliases in the disassembly.
void AppendRegisterNameToOutput(const vixl::aarch64::Instruction* instr,
- const vixl::aarch64::CPURegister& reg) OVERRIDE;
+ const vixl::aarch64::CPURegister& reg) override;
// Improve the disassembly of literal load instructions.
- void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) OVERRIDE;
+ void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) override;
// Improve the disassembly of thread offset.
- void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) OVERRIDE;
+ void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) override;
private:
// Indicate if the disassembler should read data loaded from literal pools.
@@ -69,15 +69,15 @@ class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
DisassemblerOptions* options_;
};
-class DisassemblerArm64 FINAL : public Disassembler {
+class DisassemblerArm64 final : public Disassembler {
public:
explicit DisassemblerArm64(DisassemblerOptions* options) :
Disassembler(options), disasm(options) {
decoder.AppendVisitor(&disasm);
}
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
vixl::aarch64::Decoder decoder;
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index afa6af366f..bc74b43ac9 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -24,7 +24,7 @@
namespace art {
namespace mips {
-class DisassemblerMips FINAL : public Disassembler {
+class DisassemblerMips final : public Disassembler {
public:
explicit DisassemblerMips(DisassemblerOptions* options, bool is_o32_abi)
: Disassembler(options),
@@ -33,8 +33,8 @@ class DisassemblerMips FINAL : public Disassembler {
is_o32_abi_(is_o32_abi) {}
const char* RegName(uint32_t reg);
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
// Address and encoding of the last disassembled instruction.
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index 31b62bccf2..a329280b70 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -24,13 +24,13 @@ namespace x86 {
enum RegFile { GPR, MMX, SSE };
-class DisassemblerX86 FINAL : public Disassembler {
+class DisassemblerX86 final : public Disassembler {
public:
DisassemblerX86(DisassemblerOptions* options, bool supports_rex)
: Disassembler(options), supports_rex_(supports_rex) {}
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
size_t DumpNops(std::ostream& os, const uint8_t* instr);
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f54c55153a..420cd123c5 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -347,9 +347,9 @@ class ImgObjectVisitor : public ObjectVisitor {
begin_image_ptr_(begin_image_ptr),
dirty_pages_(dirty_pages) { }
- virtual ~ImgObjectVisitor() OVERRIDE { }
+ virtual ~ImgObjectVisitor() override { }
- virtual void Visit(mirror::Object* object) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ virtual void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Sanity check that we are reading a real mirror::Object
CHECK(object->GetClass() != nullptr) << "Image object at address "
<< object
@@ -658,8 +658,8 @@ class ImgArtMethodVisitor : public ArtMethodVisitor {
dirty_func_(std::move(dirty_func)),
begin_image_ptr_(begin_image_ptr),
dirty_pages_(dirty_pages) { }
- virtual ~ImgArtMethodVisitor() OVERRIDE { }
- virtual void Visit(ArtMethod* method) OVERRIDE {
+ virtual ~ImgArtMethodVisitor() override { }
+ virtual void Visit(ArtMethod* method) override {
dirty_func_(method, begin_image_ptr_, dirty_pages_);
}
@@ -1672,7 +1672,7 @@ struct ImgDiagArgs : public CmdlineArgs {
using Base = CmdlineArgs;
virtual ParseStatus ParseCustom(const StringPiece& option,
- std::string* error_msg) OVERRIDE {
+ std::string* error_msg) override {
{
ParseStatus base_parse = Base::ParseCustom(option, error_msg);
if (base_parse != kParseUnknownArgument) {
@@ -1703,7 +1703,7 @@ struct ImgDiagArgs : public CmdlineArgs {
return kParseOk;
}
- virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+ virtual ParseStatus ParseChecks(std::string* error_msg) override {
// Perform the parent checks.
ParseStatus parent_checks = Base::ParseChecks(error_msg);
if (parent_checks != kParseOk) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 52096f0d7b..c46aaf4f72 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -57,7 +57,7 @@ class ImgDiagTest : public CommonRuntimeTest {
boot_image_location_ = image_spaces[0]->GetImageLocation();
}
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
// Needs to live until CommonRuntimeTest::SetUp finishes, since we pass it a cstring.
runtime_args_image_ = android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str());
options->push_back(std::make_pair(runtime_args_image_, nullptr));
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index c4ac180a15..1bcfe8720a 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -25,7 +25,7 @@
namespace art {
-class MallocAllocator FINAL : public Allocator {
+class MallocAllocator final : public Allocator {
public:
MallocAllocator() {}
~MallocAllocator() {}
@@ -44,7 +44,7 @@ class MallocAllocator FINAL : public Allocator {
MallocAllocator g_malloc_allocator;
-class NoopAllocator FINAL : public Allocator {
+class NoopAllocator final : public Allocator {
public:
NoopAllocator() {}
~NoopAllocator() {}
diff --git a/libartbase/base/arena_bit_vector.cc b/libartbase/base/arena_bit_vector.cc
index 01f9013737..c6d899313d 100644
--- a/libartbase/base/arena_bit_vector.cc
+++ b/libartbase/base/arena_bit_vector.cc
@@ -50,7 +50,7 @@ using ArenaBitVectorAllocatorKind =
ArenaBitVectorAllocatorKindImpl<kArenaAllocatorCountAllocations>;
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind {
+class ArenaBitVectorAllocator final : public Allocator, private ArenaBitVectorAllocatorKind {
public:
static ArenaBitVectorAllocator* Create(ArenaAlloc* allocator, ArenaAllocKind kind) {
void* storage = allocator->template Alloc<ArenaBitVectorAllocator>(kind);
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 5668b6cd79..76f57dac74 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -26,7 +26,7 @@ namespace art {
// Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
// abstracting away the bit start offset to avoid needing passing as an argument everywhere.
-class BitMemoryRegion FINAL : public ValueObject {
+class BitMemoryRegion final : public ValueObject {
public:
struct Less {
bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 62834c7d35..95f2cbbcf2 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -209,11 +209,11 @@ class CommonArtTestBase : public TestType, public CommonArtTestImpl {
virtual ~CommonArtTestBase() {}
protected:
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
CommonArtTestImpl::SetUp();
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
CommonArtTestImpl::TearDown();
}
};
diff --git a/libartbase/base/dumpable.h b/libartbase/base/dumpable.h
index 0c00505461..bd8622f0e2 100644
--- a/libartbase/base/dumpable.h
+++ b/libartbase/base/dumpable.h
@@ -29,7 +29,7 @@ namespace art {
// os << Dumpable<MyType>(my_type_instance);
//
template<typename T>
-class Dumpable FINAL {
+class Dumpable final {
public:
explicit Dumpable(const T& value) : value_(value) {
}
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index a479b7d650..81d55fc2f3 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -37,7 +37,7 @@ class Indenter : public std::streambuf {
count_(count) {}
private:
- std::streamsize xsputn(const char* s, std::streamsize n) OVERRIDE {
+ std::streamsize xsputn(const char* s, std::streamsize n) override {
std::streamsize result = n; // Aborts on failure.
const char* eol = static_cast<const char*>(memchr(s, '\n', n));
while (eol != nullptr) {
@@ -54,7 +54,7 @@ class Indenter : public std::streambuf {
return result;
}
- int_type overflow(int_type c) OVERRIDE {
+ int_type overflow(int_type c) override {
if (UNLIKELY(c == std::char_traits<char>::eof())) {
out_sbuf_->pubsync();
return c;
diff --git a/libartbase/base/leb128.h b/libartbase/base/leb128.h
index d5847fd6c6..b866d37552 100644
--- a/libartbase/base/leb128.h
+++ b/libartbase/base/leb128.h
@@ -357,7 +357,7 @@ class Leb128Encoder {
// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
template <typename Vector = std::vector<uint8_t>>
-class Leb128EncodingVector FINAL : private Vector,
+class Leb128EncodingVector final : private Vector,
public Leb128Encoder<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index f26cf0708b..33866bba08 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -23,9 +23,6 @@
#include "android-base/macros.h"
#include "android-base/thread_annotations.h"
-#define OVERRIDE override
-#define FINAL final
-
// Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid
// globally importing gtest/gtest.h into the main ART header files.
#define ART_FRIEND_TEST(test_set_name, individual_test)\
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 15a5d71a6b..02e29f1d21 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -28,7 +28,7 @@
namespace art {
-class MallocArena FINAL : public Arena {
+class MallocArena final : public Arena {
public:
explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize);
virtual ~MallocArena();
diff --git a/libartbase/base/malloc_arena_pool.h b/libartbase/base/malloc_arena_pool.h
index c48be59eb5..9216c033c3 100644
--- a/libartbase/base/malloc_arena_pool.h
+++ b/libartbase/base/malloc_arena_pool.h
@@ -23,17 +23,17 @@
namespace art {
-class MallocArenaPool FINAL : public ArenaPool {
+class MallocArenaPool final : public ArenaPool {
public:
MallocArenaPool();
~MallocArenaPool();
- Arena* AllocArena(size_t size) OVERRIDE;
- void FreeArenaChain(Arena* first) OVERRIDE;
- size_t GetBytesAllocated() const OVERRIDE;
- void ReclaimMemory() OVERRIDE;
- void LockReclaimMemory() OVERRIDE;
+ Arena* AllocArena(size_t size) override;
+ void FreeArenaChain(Arena* first) override;
+ size_t GetBytesAllocated() const override;
+ void ReclaimMemory() override;
+ void LockReclaimMemory() override;
// Is a nop for malloc pools.
- void TrimMaps() OVERRIDE;
+ void TrimMaps() override;
private:
Arena* free_arenas_;
diff --git a/libartbase/base/memory_region.h b/libartbase/base/memory_region.h
index 206032923f..9c9ff92071 100644
--- a/libartbase/base/memory_region.h
+++ b/libartbase/base/memory_region.h
@@ -34,7 +34,7 @@ namespace art {
// Memory regions are useful for accessing memory with bounds check in
// debug mode. They can be safely passed by value and do not assume ownership
// of the region.
-class MemoryRegion FINAL : public ValueObject {
+class MemoryRegion final : public ValueObject {
public:
struct ContentEquals {
constexpr bool operator()(const MemoryRegion& lhs, const MemoryRegion& rhs) const {
diff --git a/libartbase/base/unix_file/fd_file.h b/libartbase/base/unix_file/fd_file.h
index d61dab6ce3..19be3ef6f7 100644
--- a/libartbase/base/unix_file/fd_file.h
+++ b/libartbase/base/unix_file/fd_file.h
@@ -89,13 +89,13 @@ class FdFile : public RandomAccessFile {
virtual ~FdFile();
// RandomAccessFile API.
- int Close() OVERRIDE WARN_UNUSED;
- int64_t Read(char* buf, int64_t byte_count, int64_t offset) const OVERRIDE WARN_UNUSED;
- int SetLength(int64_t new_length) OVERRIDE WARN_UNUSED;
- int64_t GetLength() const OVERRIDE;
- int64_t Write(const char* buf, int64_t byte_count, int64_t offset) OVERRIDE WARN_UNUSED;
+ int Close() override WARN_UNUSED;
+ int64_t Read(char* buf, int64_t byte_count, int64_t offset) const override WARN_UNUSED;
+ int SetLength(int64_t new_length) override WARN_UNUSED;
+ int64_t GetLength() const override;
+ int64_t Write(const char* buf, int64_t byte_count, int64_t offset) override WARN_UNUSED;
- int Flush() OVERRIDE WARN_UNUSED;
+ int Flush() override WARN_UNUSED;
// Short for SetLength(0); Flush(); Close();
// If the file was opened with a path name and unlink = true, also calls Unlink() on the path.
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 1846a13a20..f50bf1cc9f 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -40,9 +40,9 @@ namespace {
class MemMapContainer : public DexFileContainer {
public:
explicit MemMapContainer(MemMap&& mem_map) : mem_map_(std::move(mem_map)) { }
- virtual ~MemMapContainer() OVERRIDE { }
+ virtual ~MemMapContainer() override { }
- int GetPermissions() OVERRIDE {
+ int GetPermissions() override {
if (!mem_map_.IsValid()) {
return 0;
} else {
@@ -50,11 +50,11 @@ class MemMapContainer : public DexFileContainer {
}
}
- bool IsReadOnly() OVERRIDE {
+ bool IsReadOnly() override {
return GetPermissions() == PROT_READ;
}
- bool EnableWrite() OVERRIDE {
+ bool EnableWrite() override {
CHECK(IsReadOnly());
if (!mem_map_.IsValid()) {
return false;
@@ -63,7 +63,7 @@ class MemMapContainer : public DexFileContainer {
}
}
- bool DisableWrite() OVERRIDE {
+ bool DisableWrite() override {
CHECK(!IsReadOnly());
if (!mem_map_.IsValid()) {
return false;
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index 420b347808..40d4673625 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -51,7 +51,7 @@ class ArtDexFileLoader : public DexFileLoader {
std::vector<uint32_t>* checksums,
std::string* error_msg,
int zip_fd = -1,
- bool* only_contains_uncompressed_dex = nullptr) const OVERRIDE;
+ bool* only_contains_uncompressed_dex = nullptr) const override;
// Opens .dex file, backed by existing memory
std::unique_ptr<const DexFile> Open(const uint8_t* base,
@@ -61,7 +61,7 @@ class ArtDexFileLoader : public DexFileLoader {
const OatDexFile* oat_dex_file,
bool verify,
bool verify_checksum,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
// Opens .dex file that has been memory-mapped by the caller.
std::unique_ptr<const DexFile> Open(const std::string& location,
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index 3f311b7451..a7d03637b1 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -44,7 +44,7 @@ static void Copy(const std::string& src, const std::string& dst) {
}
class ArtDexFileLoaderTest : public CommonArtTest {
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonArtTest::SetUp();
// Open a jar file from the boot classpath for use in basic tests of dex accessors.
std::vector<std::string> lib_core_dex_file_names = GetLibCoreDexFileNames();
diff --git a/libdexfile/dex/compact_dex_file.h b/libdexfile/dex/compact_dex_file.h
index affc9a20b0..6db68c025d 100644
--- a/libdexfile/dex/compact_dex_file.h
+++ b/libdexfile/dex/compact_dex_file.h
@@ -253,15 +253,15 @@ class CompactDexFile : public DexFile {
// Returns true if the byte string points to the magic value.
static bool IsMagicValid(const uint8_t* magic);
- virtual bool IsMagicValid() const OVERRIDE;
+ virtual bool IsMagicValid() const override;
// Returns true if the byte string after the magic is the correct value.
static bool IsVersionValid(const uint8_t* magic);
- virtual bool IsVersionValid() const OVERRIDE;
+ virtual bool IsVersionValid() const override;
// TODO This is completely a guess. We really need to do better. b/72402467
// We ask for 64 megabytes which should be big enough for any realistic dex file.
- virtual size_t GetDequickenedSize() const OVERRIDE {
+ virtual size_t GetDequickenedSize() const override {
return 64 * MB;
}
@@ -269,9 +269,9 @@ class CompactDexFile : public DexFile {
return down_cast<const Header&>(DexFile::GetHeader());
}
- virtual bool SupportsDefaultMethods() const OVERRIDE;
+ virtual bool SupportsDefaultMethods() const override;
- uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
return debug_info_offsets_.GetOffset(dex_method_index);
@@ -281,7 +281,7 @@ class CompactDexFile : public DexFile {
size_t base_size,
const uint8_t* data_begin,
size_t data_size);
- virtual uint32_t CalculateChecksum() const OVERRIDE;
+ virtual uint32_t CalculateChecksum() const override;
private:
CompactDexFile(const uint8_t* base,
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 6d9ca4aafa..d9e483d643 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -36,21 +36,21 @@ namespace {
class VectorContainer : public DexFileContainer {
public:
explicit VectorContainer(std::vector<uint8_t>&& vector) : vector_(std::move(vector)) { }
- virtual ~VectorContainer() OVERRIDE { }
+ virtual ~VectorContainer() override { }
- int GetPermissions() OVERRIDE {
+ int GetPermissions() override {
return 0;
}
- bool IsReadOnly() OVERRIDE {
+ bool IsReadOnly() override {
return true;
}
- bool EnableWrite() OVERRIDE {
+ bool EnableWrite() override {
return false;
}
- bool DisableWrite() OVERRIDE {
+ bool DisableWrite() override {
return false;
}
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index 6807025e13..ad8a1842fc 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -708,12 +708,12 @@ class InstructionOperands {
// Class for accessing operands for instructions with a range format
// (e.g. 3rc and 4rcc).
-class RangeInstructionOperands FINAL : public InstructionOperands {
+class RangeInstructionOperands final : public InstructionOperands {
public:
RangeInstructionOperands(uint32_t first_operand, size_t num_operands)
: InstructionOperands(num_operands), first_operand_(first_operand) {}
~RangeInstructionOperands() {}
- uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+ uint32_t GetOperand(size_t operand_index) const override;
private:
const uint32_t first_operand_;
@@ -723,13 +723,13 @@ class RangeInstructionOperands FINAL : public InstructionOperands {
// Class for accessing operands for instructions with a variable
// number of arguments format (e.g. 35c and 45cc).
-class VarArgsInstructionOperands FINAL : public InstructionOperands {
+class VarArgsInstructionOperands final : public InstructionOperands {
public:
VarArgsInstructionOperands(const uint32_t (&operands)[Instruction::kMaxVarArgRegs],
size_t num_operands)
: InstructionOperands(num_operands), operands_(operands) {}
~VarArgsInstructionOperands() {}
- uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+ uint32_t GetOperand(size_t operand_index) const override;
private:
const uint32_t (&operands_)[Instruction::kMaxVarArgRegs];
@@ -739,12 +739,12 @@ class VarArgsInstructionOperands FINAL : public InstructionOperands {
// Class for accessing operands without the receiver by wrapping an
// existing InstructionOperands instance.
-class NoReceiverInstructionOperands FINAL : public InstructionOperands {
+class NoReceiverInstructionOperands final : public InstructionOperands {
public:
explicit NoReceiverInstructionOperands(const InstructionOperands* const inner)
: InstructionOperands(inner->GetNumberOfOperands() - 1), inner_(inner) {}
~NoReceiverInstructionOperands() {}
- uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+ uint32_t GetOperand(size_t operand_index) const override;
private:
const InstructionOperands* const inner_;
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index 999e5b99e9..a2896353de 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -73,17 +73,17 @@ class StandardDexFile : public DexFile {
// Returns true if the byte string points to the magic value.
static bool IsMagicValid(const uint8_t* magic);
- virtual bool IsMagicValid() const OVERRIDE;
+ virtual bool IsMagicValid() const override;
// Returns true if the byte string after the magic is the correct value.
static bool IsVersionValid(const uint8_t* magic);
- virtual bool IsVersionValid() const OVERRIDE;
+ virtual bool IsVersionValid() const override;
- virtual bool SupportsDefaultMethods() const OVERRIDE;
+ virtual bool SupportsDefaultMethods() const override;
- uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
- virtual size_t GetDequickenedSize() const OVERRIDE {
+ virtual size_t GetDequickenedSize() const override {
return Size();
}
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 42c3320ea5..417abaa435 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -35,7 +35,7 @@ static constexpr size_t kMaxMethodIds = 65535;
class ProfileCompilationInfoTest : public CommonArtTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonArtTest::SetUp();
allocator_.reset(new ArenaAllocator(&pool_));
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a5cc38b866..5e56c9adf2 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -129,7 +129,7 @@ const DexFile* OpenDexFile(const OatDexFile* oat_dex_file, std::string* error_ms
}
template <typename ElfTypes>
-class OatSymbolizer FINAL {
+class OatSymbolizer final {
public:
OatSymbolizer(const OatFile* oat_file, const std::string& output_name, bool no_bits) :
oat_file_(oat_file),
@@ -1980,7 +1980,7 @@ class ImageDumper {
public:
explicit DumpArtMethodVisitor(ImageDumper* image_dumper) : image_dumper_(image_dumper) {}
- virtual void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ virtual void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& indent_os = image_dumper_->vios_.Stream();
indent_os << method << " " << " ArtMethod: " << ArtMethod::PrettyMethod(method) << "\n";
image_dumper_->DumpMethod(method, indent_os);
@@ -3351,7 +3351,7 @@ struct OatdumpArgs : public CmdlineArgs {
using Base = CmdlineArgs;
virtual ParseStatus ParseCustom(const StringPiece& option,
- std::string* error_msg) OVERRIDE {
+ std::string* error_msg) override {
{
ParseStatus base_parse = Base::ParseCustom(option, error_msg);
if (base_parse != kParseUnknownArgument) {
@@ -3408,7 +3408,7 @@ struct OatdumpArgs : public CmdlineArgs {
return kParseOk;
}
- virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+ virtual ParseStatus ParseChecks(std::string* error_msg) override {
// Infer boot image location from the image location if possible.
if (boot_image_location_ == nullptr) {
boot_image_location_ = image_location_;
@@ -3536,7 +3536,7 @@ struct OatdumpArgs : public CmdlineArgs {
};
struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
- virtual bool NeedsRuntime() OVERRIDE {
+ virtual bool NeedsRuntime() override {
CHECK(args_ != nullptr);
// If we are only doing the oat file, disable absolute_addresses. Keep them for image dumping.
@@ -3563,7 +3563,7 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
!args_->symbolize_;
}
- virtual bool ExecuteWithoutRuntime() OVERRIDE {
+ virtual bool ExecuteWithoutRuntime() override {
CHECK(args_ != nullptr);
CHECK(args_->oat_filename_ != nullptr);
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index 6e991dee3d..d9f34a50db 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -58,13 +58,13 @@ struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback {
explicit JvmtiMethodInspectionCallback(DeoptManager* manager) : manager_(manager) {}
bool IsMethodBeingInspected(art::ArtMethod* method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+ override REQUIRES_SHARED(art::Locks::mutator_lock_);
bool IsMethodSafeToJit(art::ArtMethod* method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+ override REQUIRES_SHARED(art::Locks::mutator_lock_);
bool MethodNeedsDebugVersion(art::ArtMethod* method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+ override REQUIRES_SHARED(art::Locks::mutator_lock_);
private:
DeoptManager* manager_;
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 6a8ba48109..e98517fdff 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -50,7 +50,7 @@ namespace impl {
// pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI
// specification we allow exceptions originating from events to overwrite the current exception,
// including exceptions originating from earlier events.
-class ScopedEventDispatchEnvironment FINAL : public art::ValueObject {
+class ScopedEventDispatchEnvironment final : public art::ValueObject {
public:
ScopedEventDispatchEnvironment() : env_(nullptr), throw_(nullptr, nullptr) {
DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative);
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index f71a5dc72d..43d0b10914 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -265,7 +265,7 @@ class JvmtiDdmChunkListener : public art::DdmCallback {
explicit JvmtiDdmChunkListener(EventHandler* handler) : handler_(handler) {}
void DdmPublishChunk(uint32_t type, const art::ArrayRef<const uint8_t>& data)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kDdmPublishChunk)) {
art::Thread* self = art::Thread::Current();
handler_->DispatchEvent<ArtJvmtiEvent::kDdmPublishChunk>(
@@ -288,7 +288,7 @@ class JvmtiAllocationListener : public art::gc::AllocationListener {
explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK_EQ(self, art::Thread::Current());
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kVmObjectAlloc)) {
@@ -337,7 +337,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
explicit JvmtiMonitorListener(EventHandler* handler) : handler_(handler) {}
void MonitorContendedLocking(art::Monitor* m)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEnter)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -351,7 +351,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
}
void MonitorContendedLocked(art::Monitor* m)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEntered)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -365,7 +365,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
}
void ObjectWaitStart(art::Handle<art::mirror::Object> obj, int64_t timeout)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -392,7 +392,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
//
// See b/65558434 for more discussion.
void MonitorWaitFinished(art::Monitor* m, bool timeout)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -429,11 +429,11 @@ class JvmtiGcPauseListener : public art::gc::GcPauseListener {
start_enabled_(false),
finish_enabled_(false) {}
- void StartPause() OVERRIDE {
+ void StartPause() override {
handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(art::Thread::Current());
}
- void EndPause() OVERRIDE {
+ void EndPause() override {
handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(art::Thread::Current());
}
@@ -475,7 +475,7 @@ static void SetupGcPauseTracking(JvmtiGcPauseListener* listener, ArtJvmtiEvent e
}
}
-class JvmtiMethodTraceListener FINAL : public art::instrumentation::InstrumentationListener {
+class JvmtiMethodTraceListener final : public art::instrumentation::InstrumentationListener {
public:
explicit JvmtiMethodTraceListener(EventHandler* handler) : event_handler_(handler) {}
@@ -484,7 +484,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodEntry)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -501,7 +501,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> return_value)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
DCHECK_EQ(
@@ -528,7 +528,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const art::JValue& return_value)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
DCHECK_NE(
@@ -556,7 +556,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
jvalue val;
@@ -586,7 +586,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t new_dex_pc)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
DCHECK(!method->IsRuntimeMethod());
// Default methods might be copied to multiple classes. We need to get the canonical version of
// this method so that we can check for breakpoints correctly.
@@ -613,7 +613,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method,
uint32_t dex_pc,
art::ArtField* field)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldAccess)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
// DCHECK(!self->IsExceptionPending());
@@ -638,7 +638,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
uint32_t dex_pc,
art::ArtField* field,
art::Handle<art::mirror::Object> new_val)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
// DCHECK(!self->IsExceptionPending());
@@ -670,7 +670,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
uint32_t dex_pc,
art::ArtField* field,
const art::JValue& field_value)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
DCHECK(!self->IsExceptionPending());
@@ -700,7 +700,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
}
void WatchedFramePop(art::Thread* self, const art::ShadowFrame& frame)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
art::JNIEnvExt* jnienv = self->GetJniEnv();
jboolean is_exception_pending = self->IsExceptionPending();
RunEventCallback<ArtJvmtiEvent::kFramePop>(
@@ -720,7 +720,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
// Finds the location where this exception will most likely be caught. We ignore intervening
// native frames (which could catch the exception) and return the closest java frame with a
// compatible catch statement.
- class CatchLocationFinder FINAL : public art::StackVisitor {
+ class CatchLocationFinder final : public art::StackVisitor {
public:
CatchLocationFinder(art::Thread* target,
art::Handle<art::mirror::Class> exception_class,
@@ -733,7 +733,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
catch_method_ptr_(out_catch_method),
catch_dex_pc_ptr_(out_catch_pc) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -782,7 +782,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
// Call-back when an exception is thrown.
void ExceptionThrown(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
DCHECK(self->IsExceptionThrownByCurrentMethod(exception_object.Get()));
// The instrumentation events get rid of this for us.
DCHECK(!self->IsExceptionPending());
@@ -812,7 +812,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
// Call-back when an exception is handled.
void ExceptionHandled(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
// Since the exception has already been handled there shouldn't be one pending.
DCHECK(!self->IsExceptionPending());
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kExceptionCatch)) {
@@ -839,7 +839,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
return;
}
@@ -849,7 +849,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtMethod* callee ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
return;
}
@@ -959,7 +959,7 @@ void EventHandler::HandleLocalAccessCapabilityAdded() {
: runtime_(runtime) {}
bool operator()(art::ObjPtr<art::mirror::Class> klass)
- OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ override REQUIRES(art::Locks::mutator_lock_) {
if (!klass->IsLoaded()) {
// Skip classes that aren't loaded since they might not have fully allocated and initialized
// their methods. Furthemore since the jvmti-plugin must have been loaded by this point
diff --git a/openjdkjvmti/object_tagging.h b/openjdkjvmti/object_tagging.h
index 1b8366a501..4181302f3a 100644
--- a/openjdkjvmti/object_tagging.h
+++ b/openjdkjvmti/object_tagging.h
@@ -45,15 +45,15 @@ namespace openjdkjvmti {
struct ArtJvmTiEnv;
class EventHandler;
-class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
+class ObjectTagTable final : public JvmtiWeakTable<jlong> {
public:
ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
: event_handler_(event_handler), jvmti_env_(env) {}
- bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
+ bool Set(art::mirror::Object* obj, jlong tag) override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
- bool SetLocked(art::mirror::Object* obj, jlong tag) OVERRIDE
+ bool SetLocked(art::mirror::Object* obj, jlong tag) override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
@@ -73,8 +73,8 @@ class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
}
protected:
- bool DoesHandleNullOnSweep() OVERRIDE;
- void HandleNullSweep(jlong tag) OVERRIDE;
+ bool DoesHandleNullOnSweep() override;
+ void HandleNullSweep(jlong tag) override;
private:
EventHandler* event_handler_;
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 209add36e1..f1d6fb0b5d 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -163,7 +163,7 @@ struct ClassCallback : public art::ClassLoadCallback {
const art::DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/art::DexFile const** final_dex_file,
/*out*/art::DexFile::ClassDef const** final_class_def)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
bool is_enabled =
event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookRetransformable) ||
event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookNonRetransformable);
@@ -381,7 +381,7 @@ struct ClassCallback : public art::ClassLoadCallback {
void VisitRoots(art::mirror::Object*** roots,
size_t count,
const art::RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE {
+ override {
for (size_t i = 0; i != count; ++i) {
if (*roots[i] == input_) {
*roots[i] = output_;
@@ -392,7 +392,7 @@ struct ClassCallback : public art::ClassLoadCallback {
void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
size_t count,
const art::RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
for (size_t i = 0; i != count; ++i) {
if (roots[i]->AsMirrorPtr() == input_) {
roots[i]->Assign(output_);
@@ -418,7 +418,7 @@ struct ClassCallback : public art::ClassLoadCallback {
WeakGlobalUpdate(art::mirror::Class* root_input, art::mirror::Class* root_output)
: input_(root_input), output_(root_output) {}
- art::mirror::Object* IsMarked(art::mirror::Object* obj) OVERRIDE {
+ art::mirror::Object* IsMarked(art::mirror::Object* obj) override {
if (obj == input_) {
return output_;
}
diff --git a/openjdkjvmti/ti_dump.cc b/openjdkjvmti/ti_dump.cc
index 253580e0e1..c9abb71e4c 100644
--- a/openjdkjvmti/ti_dump.cc
+++ b/openjdkjvmti/ti_dump.cc
@@ -44,7 +44,7 @@
namespace openjdkjvmti {
struct DumpCallback : public art::RuntimeSigQuitCallback {
- void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void SigQuit() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Thread* thread = art::Thread::Current();
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(art::Thread::Current());
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index d23370bc5c..85aa946356 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -760,7 +760,7 @@ jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env,
user_data);
}
-class FollowReferencesHelper FINAL {
+class FollowReferencesHelper final {
public:
FollowReferencesHelper(HeapUtil* h,
jvmtiEnv* jvmti_env,
@@ -828,7 +828,7 @@ class FollowReferencesHelper FINAL {
}
private:
- class CollectAndReportRootsVisitor FINAL : public art::RootVisitor {
+ class CollectAndReportRootsVisitor final : public art::RootVisitor {
public:
CollectAndReportRootsVisitor(FollowReferencesHelper* helper,
ObjectTagTable* tag_table,
@@ -841,7 +841,7 @@ class FollowReferencesHelper FINAL {
stop_reports_(false) {}
void VisitRoots(art::mirror::Object*** roots, size_t count, const art::RootInfo& info)
- OVERRIDE
+ override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
for (size_t i = 0; i != count; ++i) {
@@ -852,7 +852,7 @@ class FollowReferencesHelper FINAL {
void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
size_t count,
const art::RootInfo& info)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_)
+ override REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
for (size_t i = 0; i != count; ++i) {
AddRoot(roots[i]->AsMirrorPtr(), info);
@@ -1347,7 +1347,7 @@ jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
bool operator()(art::ObjPtr<art::mirror::Class> klass)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (klass->IsLoaded() || klass->IsErroneous()) {
classes_.push_back(self_->GetJniEnv()->AddLocalReference<jclass>(klass));
}
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 87d832caec..1588df4086 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -66,7 +66,7 @@ struct TiMethodCallback : public art::MethodCallback {
void RegisterNativeMethod(art::ArtMethod* method,
const void* cur_method,
/*out*/void** new_method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kNativeMethodBind)) {
art::Thread* thread = art::Thread::Current();
art::JNIEnvExt* jnienv = thread->GetJniEnv();
@@ -550,7 +550,7 @@ class CommonLocalVariableClosure : public art::Closure {
CommonLocalVariableClosure(jint depth, jint slot)
: result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
art::ScopedAssertNoThreadSuspension sants("CommonLocalVariableClosure::Run");
std::unique_ptr<art::Context> context(art::Context::Create());
@@ -702,7 +702,7 @@ class GetLocalVariableClosure : public CommonLocalVariableClosure {
jvmtiError GetTypeError(art::ArtMethod* method ATTRIBUTE_UNUSED,
art::Primitive::Type slot_type,
const std::string& descriptor ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (slot_type) {
case art::Primitive::kPrimByte:
case art::Primitive::kPrimChar:
@@ -722,7 +722,7 @@ class GetLocalVariableClosure : public CommonLocalVariableClosure {
}
jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (type_) {
case art::Primitive::kPrimNot: {
uint32_t ptr_val;
@@ -816,7 +816,7 @@ class SetLocalVariableClosure : public CommonLocalVariableClosure {
jvmtiError GetTypeError(art::ArtMethod* method,
art::Primitive::Type slot_type,
const std::string& descriptor)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (slot_type) {
case art::Primitive::kPrimNot: {
if (type_ != art::Primitive::kPrimNot) {
@@ -852,7 +852,7 @@ class SetLocalVariableClosure : public CommonLocalVariableClosure {
}
jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (type_) {
case art::Primitive::kPrimNot: {
uint32_t ptr_val;
@@ -941,7 +941,7 @@ class GetLocalInstanceClosure : public art::Closure {
depth_(depth),
val_(nullptr) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
art::ScopedAssertNoThreadSuspension sants("GetLocalInstanceClosure::Run");
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
std::unique_ptr<art::Context> context(art::Context::Create());
diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc
index 7157974c13..4fa97f10aa 100644
--- a/openjdkjvmti/ti_phase.cc
+++ b/openjdkjvmti/ti_phase.cc
@@ -56,7 +56,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
return soa.AddLocalReference<jthread>(soa.Self()->GetPeer());
}
- void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) override {
art::Thread* self = art::Thread::Current();
switch (phase) {
case RuntimePhase::kInitialAgents:
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 8707e272c6..2ec2f04e73 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -158,7 +158,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
obsoleted_methods_(obsoleted_methods),
obsolete_maps_(obsolete_maps) { }
- ~ObsoleteMethodStackVisitor() OVERRIDE {}
+ ~ObsoleteMethodStackVisitor() override {}
public:
// Returns true if we successfully installed obsolete methods on this thread, filling
@@ -177,7 +177,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
visitor.WalkStack();
}
- bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES(art::Locks::mutator_lock_) {
art::ScopedAssertNoThreadSuspension snts("Fixing up the stack for obsolete methods.");
art::ArtMethod* old_method = GetMethod();
if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index bcbab14cdd..1189b1dec5 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -186,7 +186,7 @@ static void Update() REQUIRES_SHARED(art::Locks::mutator_lock_) {
}
struct SearchCallback : public art::RuntimePhaseCallback {
- void NextRuntimePhase(RuntimePhase phase) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void NextRuntimePhase(RuntimePhase phase) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (phase == RuntimePhase::kStart) {
// It's time to update the system properties.
Update();
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 318d98d877..b6969afff1 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -128,7 +128,7 @@ struct GetStackTraceVectorClosure : public art::Closure {
start_result(0),
stop_result(0) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
auto frames_fn = [&](jvmtiFrameInfo info) {
frames.push_back(info);
};
@@ -195,7 +195,7 @@ struct GetStackTraceDirectClosure : public art::Closure {
DCHECK_GE(start_input, 0u);
}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
auto frames_fn = [&](jvmtiFrameInfo info) {
frame_buffer[index] = info;
++index;
@@ -287,7 +287,7 @@ struct GetAllStackTracesVectorClosure : public art::Closure {
GetAllStackTracesVectorClosure(size_t stop, Data* data_)
: barrier(0), stop_input(stop), data(data_) {}
- void Run(art::Thread* thread) OVERRIDE
+ void Run(art::Thread* thread) override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!data->mutex) {
art::Thread* self = art::Thread::Current();
@@ -678,7 +678,7 @@ struct GetFrameCountClosure : public art::Closure {
public:
GetFrameCountClosure() : count(0) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
GetFrameCountVisitor visitor(self);
visitor.WalkStack(false);
@@ -759,7 +759,7 @@ struct GetLocationClosure : public art::Closure {
public:
explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
GetLocationVisitor visitor(self, n);
visitor.WalkStack(false);
@@ -842,7 +842,7 @@ struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor
delete context_;
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
if (!GetMethod()->IsRuntimeMethod()) {
art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
@@ -867,7 +867,7 @@ struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor
}
void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
for (const art::Handle<art::mirror::Object>& m : monitors) {
if (m.Get() == obj) {
return;
@@ -889,7 +889,7 @@ struct MonitorInfoClosure : public art::Closure {
explicit MonitorInfoClosure(Fn handle_results)
: err_(OK), handle_results_(handle_results) {}
- void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
// Find the monitors on the stack.
MonitorVisitor visitor(target);
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 949b566860..e53309445d 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -82,7 +82,7 @@ struct ThreadCallback : public art::ThreadLifecycleCallback {
thread.get());
}
- void ThreadStart(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void ThreadStart(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (!started) {
// Runtime isn't started. We only expect at most the signal handler or JIT threads to be
// started here.
@@ -101,7 +101,7 @@ struct ThreadCallback : public art::ThreadLifecycleCallback {
Post<ArtJvmtiEvent::kThreadStart>(self);
}
- void ThreadDeath(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void ThreadDeath(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Post<ArtJvmtiEvent::kThreadEnd>(self);
}
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index 8797553b07..d87ca56b85 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -68,7 +68,7 @@
namespace openjdkjvmti {
// A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
-class TransformationFaultHandler FINAL : public art::FaultHandler {
+class TransformationFaultHandler final : public art::FaultHandler {
public:
explicit TransformationFaultHandler(art::FaultManager* manager)
: art::FaultHandler(manager),
@@ -84,7 +84,7 @@ class TransformationFaultHandler FINAL : public art::FaultHandler {
uninitialized_class_definitions_.clear();
}
- bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
DCHECK_EQ(sig, SIGSEGV);
art::Thread* self = art::Thread::Current();
if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 8169979759..02fc92533f 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -815,7 +815,7 @@ class PatchOat::PatchOatArtFieldVisitor : public ArtFieldVisitor {
public:
explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtField* field) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
dest->SetDeclaringClass(
patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Ptr()));
@@ -834,7 +834,7 @@ class PatchOat::PatchOatArtMethodVisitor : public ArtMethodVisitor {
public:
explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
patch_oat_->FixupMethod(method, dest);
}
@@ -877,7 +877,7 @@ class PatchOat::FixupRootVisitor : public RootVisitor {
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
}
@@ -885,7 +885,7 @@ class PatchOat::FixupRootVisitor : public RootVisitor {
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
}
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 370f59dc8a..286b6867a3 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -40,7 +40,7 @@ static constexpr size_t kMaxMethodIds = 65535;
class ProfileAssistantTest : public CommonRuntimeTest {
public:
- void PostRuntimeCreate() OVERRIDE {
+ void PostRuntimeCreate() override {
allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
diff --git a/profman/profman.cc b/profman/profman.cc
index 9b470973c6..cecd3c2d63 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -185,7 +185,7 @@ NO_RETURN static void Abort(const char* msg) {
// TODO(calin): This class has grown too much from its initial design. Split the functionality
// into smaller, more contained pieces.
-class ProfMan FINAL {
+class ProfMan final {
public:
ProfMan() :
reference_profile_file_fd_(kInvalidFd),
diff --git a/runtime/aot_class_linker.h b/runtime/aot_class_linker.h
index 927b53302b..6a8133efc1 100644
--- a/runtime/aot_class_linker.h
+++ b/runtime/aot_class_linker.h
@@ -34,14 +34,14 @@ class AotClassLinker : public ClassLinker {
Handle<mirror::Class> klass,
verifier::HardFailLogMode log_level,
std::string* error_msg)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_);
bool InitializeClass(Thread *self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_);
};
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d4ceede07a..d4dbbf9541 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -46,7 +46,7 @@ namespace art {
class ArchTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use 64-bit ISA for runtime setup to make method size potentially larger
// than necessary (rather than smaller) during CreateCalleeSaveMethod
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -55,7 +55,7 @@ class ArchTest : public CommonRuntimeTest {
// Do not do any of the finalization. We don't want to run any code, we don't need the heap
// prepared, it actually will be a problem with setting the instruction set to x86_64 in
// SetUpRuntimeOptions.
- void FinalizeSetup() OVERRIDE {
+ void FinalizeSetup() override {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
};
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index b9802967fe..845cdaa100 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -26,7 +26,7 @@
namespace art {
namespace arm {
-class ArmContext FINAL : public Context {
+class ArmContext final : public Context {
public:
ArmContext() {
Reset();
@@ -34,55 +34,55 @@ class ArmContext FINAL : public Context {
virtual ~ArmContext() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(PC, new_pc);
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(R0, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pointers to register locations, initialized to null or the specific registers below.
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index f82534b511..d964148900 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -25,7 +25,7 @@ class ArmInstructionSetFeatures;
using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
// Instruction set features relevant to the ARM architecture.
-class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class ArmInstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,18 +47,18 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static ArmFeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
// Return a string of the form "div,lpae" or "none".
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Is the divide instruction feature enabled?
bool HasDivideInstruction() const {
@@ -82,7 +82,7 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
ArmInstructionSetFeatures(bool has_div,
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index e64cfb86ea..95dac90ac7 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -26,7 +26,7 @@
namespace art {
namespace arm64 {
-class Arm64Context FINAL : public Context {
+class Arm64Context final : public Context {
public:
Arm64Context() {
Reset();
@@ -34,56 +34,56 @@ class Arm64Context FINAL : public Context {
~Arm64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_lr) OVERRIDE {
+ void SetPC(uintptr_t new_lr) override {
SetGPR(kPC, new_lr);
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(X0, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, arraysize(gprs_));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, arraysize(gprs_));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
// Note: PC isn't an available GPR (outside of internals), so don't allow retrieving the value.
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfXRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
static constexpr size_t kPC = kNumberOfXRegisters;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index af2d4c79f9..163a2d8eba 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -25,7 +25,7 @@ class Arm64InstructionSetFeatures;
using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
// Instruction set features relevant to the ARM64 architecture.
-class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Arm64InstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,16 +47,16 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static Arm64FeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm64;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
// Return a string of the form "a53" or "none".
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Generate code addressing Cortex-A53 erratum 835769?
bool NeedFixCortexA53_835769() const {
@@ -74,7 +74,7 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix)
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 7e073b288a..960aea1fcd 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -33,53 +33,53 @@ class MipsContext : public Context {
}
virtual ~MipsContext() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(T9, new_pc);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(A0, new_arg0_value);
}
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 76bc639277..ab5bb3c101 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -28,7 +28,7 @@ class MipsInstructionSetFeatures;
using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
// Instruction set features relevant to the MIPS architecture.
-class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class MipsInstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -50,15 +50,15 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static MipsFeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kMips;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Is this an ISA revision greater than 2 opening up new opcodes.
bool IsMipsIsaRevGreaterThanEqual2() const {
@@ -87,7 +87,7 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index b2a6138471..857abfd2b8 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -33,53 +33,53 @@ class Mips64Context : public Context {
}
virtual ~Mips64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(T9, new_pc);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(A0, new_arg0_value);
}
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index 27e544ed91..e204d9de83 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -25,7 +25,7 @@ class Mips64InstructionSetFeatures;
using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
// Instruction set features relevant to the MIPS64 architecture.
-class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Mips64InstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
@@ -48,15 +48,15 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static Mips64FeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kMips64;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Does it have MSA (MIPS SIMD Architecture) support.
bool HasMsa() const {
@@ -69,7 +69,7 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index b0c0e43e35..e8df90eccd 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -37,7 +37,7 @@ namespace art {
class StubTest : public CommonRuntimeTest {
protected:
// We need callee-save methods set up in the Runtime for exceptions.
- void SetUp() OVERRIDE {
+ void SetUp() override {
// Do the normal setup.
CommonRuntimeTest::SetUp();
@@ -54,7 +54,7 @@ class StubTest : public CommonRuntimeTest {
}
}
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use a smaller heap
for (std::pair<std::string, const void*>& pair : *options) {
if (pair.first.find("-Xmx") == 0) {
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index 0ebb22bd6d..5b438c3623 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -26,62 +26,62 @@
namespace art {
namespace x86 {
-class X86Context FINAL : public Context {
+class X86Context final : public Context {
public:
X86Context() {
Reset();
}
virtual ~X86Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(ESP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
eip_ = new_pc;
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(EAX, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pretend XMM registers are made of uin32_t pieces, because they are manipulated
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 57cf4b2741..acf13c491e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -49,17 +49,17 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const override;
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ virtual InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
virtual ~X86InstructionSetFeatures() {}
@@ -71,7 +71,7 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
virtual std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
return AddFeaturesFromSplitString(features, false, error_msg);
}
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d242693f81..ab38614c98 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -26,62 +26,62 @@
namespace art {
namespace x86_64 {
-class X86_64Context FINAL : public Context {
+class X86_64Context final : public Context {
public:
X86_64Context() {
Reset();
}
virtual ~X86_64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(RSP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
rip_ = new_pc;
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(RDI, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pointers to register locations. Values are initialized to null or the special registers below.
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index e76490ba13..76258fa5d4 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -25,7 +25,7 @@ class X86_64InstructionSetFeatures;
using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
// Instruction set features relevant to the X86_64 architecture.
-class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
+class X86_64InstructionSetFeatures final : public X86InstructionSetFeatures {
public:
// Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
@@ -59,7 +59,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
return Convert(X86InstructionSetFeatures::FromAssembly(true));
}
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86_64;
}
@@ -69,7 +69,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
return X86InstructionSetFeatures::AddFeaturesFromSplitString(features, true, error_msg);
}
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 123595c6fe..5afd000b05 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -40,7 +40,7 @@ class Object;
class String;
} // namespace mirror
-class ArtField FINAL {
+class ArtField final {
public:
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce08cb0bea..48ddc6992d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -66,7 +66,7 @@ using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
using MethodDexCacheType = std::atomic<MethodDexCachePair>;
} // namespace mirror
-class ArtMethod FINAL {
+class ArtMethod final {
public:
// Should the class state be checked on sensitive operations?
DECLARE_RUNTIME_DEBUG_FLAG(kCheckDeclaringClassState);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index a9fbafe7ab..851c23f1cb 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -31,11 +31,11 @@
namespace art {
-class MemMapArena FINAL : public Arena {
+class MemMapArena final : public Arena {
public:
MemMapArena(size_t size, bool low_4gb, const char* name);
virtual ~MemMapArena();
- void Release() OVERRIDE;
+ void Release() override;
private:
static MemMap Allocate(size_t size, bool low_4gb, const char* name);
diff --git a/runtime/base/mem_map_arena_pool.h b/runtime/base/mem_map_arena_pool.h
index 24e150e1e7..e98ef07ddb 100644
--- a/runtime/base/mem_map_arena_pool.h
+++ b/runtime/base/mem_map_arena_pool.h
@@ -21,17 +21,17 @@
namespace art {
-class MemMapArenaPool FINAL : public ArenaPool {
+class MemMapArenaPool final : public ArenaPool {
public:
explicit MemMapArenaPool(bool low_4gb = false, const char* name = "LinearAlloc");
virtual ~MemMapArenaPool();
- Arena* AllocArena(size_t size) OVERRIDE;
- void FreeArenaChain(Arena* first) OVERRIDE;
- size_t GetBytesAllocated() const OVERRIDE;
- void ReclaimMemory() OVERRIDE;
- void LockReclaimMemory() OVERRIDE;
+ Arena* AllocArena(size_t size) override;
+ void FreeArenaChain(Arena* first) override;
+ size_t GetBytesAllocated() const override;
+ void ReclaimMemory() override;
+ void LockReclaimMemory() override;
// Trim the maps in arenas by madvising, used by JIT to reduce memory usage.
- void TrimMaps() OVERRIDE;
+ void TrimMaps() override;
private:
const bool low_4gb_;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 044c4c2f78..28b29125cd 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -125,7 +125,7 @@ static void BackOff(uint32_t i) {
}
}
-class ScopedAllMutexesLock FINAL {
+class ScopedAllMutexesLock final {
public:
explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
@@ -144,7 +144,7 @@ class ScopedAllMutexesLock FINAL {
const BaseMutex* const mutex_;
};
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
public:
explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
@@ -166,7 +166,7 @@ class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
};
// Scoped class that generates events at the beginning and end of lock contention.
-class ScopedContentionRecorder FINAL : public ValueObject {
+class ScopedContentionRecorder final : public ValueObject {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
: mutex_(kLogLockContentions ? mutex : nullptr),
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index fba209a0b6..d127d0f01f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -297,7 +297,7 @@ class LOCKABLE Mutex : public BaseMutex {
// For negative capabilities in clang annotations.
const Mutex& operator!() const { return *this; }
- void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+ void WakeupToRespondToEmptyCheckpoint() override;
private:
#if ART_USE_FUTEXES
@@ -418,7 +418,7 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
// For negative capabilities in clang annotations.
const ReaderWriterMutex& operator!() const { return *this; }
- void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+ void WakeupToRespondToEmptyCheckpoint() override;
private:
#if ART_USE_FUTEXES
diff --git a/runtime/cha.cc b/runtime/cha.cc
index ce84e8ce2e..3ea920dff1 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -181,7 +181,7 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
// headers, sets the should_deoptimize flag on stack to 1.
// TODO: also set the register value to 1 when should_deoptimize is allocated in
// a register.
-class CHAStackVisitor FINAL : public StackVisitor {
+class CHAStackVisitor final : public StackVisitor {
public:
CHAStackVisitor(Thread* thread_in,
Context* context,
@@ -190,7 +190,7 @@ class CHAStackVisitor FINAL : public StackVisitor {
method_headers_(method_headers) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
// Avoid types of methods that do not have an oat quick method header.
if (method == nullptr ||
@@ -245,13 +245,13 @@ class CHAStackVisitor FINAL : public StackVisitor {
DISALLOW_COPY_AND_ASSIGN(CHAStackVisitor);
};
-class CHACheckpoint FINAL : public Closure {
+class CHACheckpoint final : public Closure {
public:
explicit CHACheckpoint(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
: barrier_(0),
method_headers_(method_headers) {}
- void Run(Thread* thread) OVERRIDE {
+ void Run(Thread* thread) override {
// Note thread and self may not be equal if thread was already suspended at
// the point of the request.
Thread* self = Thread::Current();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f80d34ca2f..65f05d9362 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -884,7 +884,7 @@ class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor {
explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
: image_pointer_size_(image_pointer_size) {}
- void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild && !method->IsRuntimeMethod()) {
CHECK(method->GetDeclaringClass() != nullptr);
}
@@ -1390,7 +1390,7 @@ bool ClassLinker::OpenImageDexFiles(gc::space::ImageSpace* space,
// Helper class for ArtMethod checks when adding an image. Keeps all required functionality
// together and caches some intermediate results.
-class ImageSanityChecks FINAL {
+class ImageSanityChecks final {
public:
static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1951,7 +1951,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
done_(false) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (!done_ && class_table != nullptr) {
DefiningClassLoaderFilterVisitor visitor(class_loader, visitor_);
@@ -1972,7 +1972,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
ClassVisitor* visitor)
: defining_class_loader_(defining_class_loader), visitor_(visitor) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (klass->GetClassLoader() != defining_class_loader_) {
return true;
}
@@ -2009,7 +2009,7 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor) {
class GetClassesInToVector : public ClassVisitor {
public:
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE {
+ bool operator()(ObjPtr<mirror::Class> klass) override {
classes_.push_back(klass);
return true;
}
@@ -2021,7 +2021,7 @@ class GetClassInToObjectArray : public ClassVisitor {
explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
: arr_(arr), index_(0) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
++index_;
if (index_ <= arr_->GetLength()) {
arr_->Set(index_ - 1, klass);
@@ -3845,7 +3845,7 @@ class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
class_table->FreezeSnapshot();
@@ -3871,7 +3871,7 @@ class LookupClassesVisitor : public ClassLoaderVisitor {
result_(result) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
ObjPtr<mirror::Class> klass = class_table->Lookup(descriptor_, hash_);
// Add `klass` only if `class_loader` is its defining (not just initiating) class loader.
@@ -5563,7 +5563,7 @@ bool ClassLinker::LinkMethods(Thread* self,
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
// avoids the use of handles, if it didn't then rather than compare dex files we could compare dex
// caches in the implementation below.
-class MethodNameAndSignatureComparator FINAL : public ValueObject {
+class MethodNameAndSignatureComparator final : public ValueObject {
public:
explicit MethodNameAndSignatureComparator(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) :
@@ -8555,7 +8555,7 @@ class CountClassesVisitor : public ClassLoaderVisitor {
CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
num_zygote_classes += class_table->NumZygoteClasses(class_loader);
@@ -8825,7 +8825,7 @@ class GetResolvedClassesVisitor : public ClassVisitor {
extra_stats_(),
last_extra_stats_(extra_stats_.end()) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!klass->IsProxyClass() &&
!klass->IsArrayClass() &&
klass->IsResolved() &&
@@ -8913,7 +8913,7 @@ class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
: method_(method),
pointer_size_(pointer_size) {}
- bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) override {
if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
holder_ = klass;
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e40f1dbcdf..52ddd13ab7 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -442,7 +442,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
class TestRootVisitor : public SingleRootVisitor {
public:
- void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE {
+ void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) override {
EXPECT_TRUE(root != nullptr);
}
};
@@ -450,7 +450,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
class ClassLinkerMethodHandlesTest : public ClassLinkerTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 234b66a862..bf17e644af 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -157,11 +157,11 @@ class CommonRuntimeTestBase : public TestType, public CommonRuntimeTestImpl {
virtual ~CommonRuntimeTestBase() {}
protected:
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
CommonRuntimeTestImpl::SetUp();
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
CommonRuntimeTestImpl::TearDown();
}
};
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 60975b04f7..012ebcbe1c 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -25,7 +25,7 @@
namespace art {
-class CompilerFilter FINAL {
+class CompilerFilter final {
public:
// Note: Order here matters. Later filter choices are considered "as good
// as" earlier filter choices.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e607b31e68..366b5ec5e9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -138,7 +138,7 @@ static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
return os;
}
-class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class DebugInstrumentationListener final : public instrumentation::InstrumentationListener {
public:
DebugInstrumentationListener() {}
virtual ~DebugInstrumentationListener() {}
@@ -147,7 +147,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -176,7 +176,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
const JValue& return_value)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -195,7 +195,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
@@ -205,7 +205,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t new_dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
// We also listen to kMethodExited instrumentation event and the current instruction is a
// RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -229,7 +229,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
ArtField* field)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field);
}
@@ -239,19 +239,19 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
uint32_t dex_pc,
ArtField* field,
const JValue& field_value)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value);
}
void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> exception_object)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostException(exception_object.Get());
}
// We only care about branches in the Jit.
void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc << ", " << dex_pc_offset;
}
@@ -262,20 +262,20 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
ArtMethod* target ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
}
// TODO Might be worth it to post ExceptionCatch event.
void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
- Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) OVERRIDE {
+ Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
LOG(ERROR) << "Unexpected exception handled event in debugger";
}
// TODO Might be worth it to implement this.
void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
- const ShadowFrame& frame ATTRIBUTE_UNUSED) OVERRIDE {
+ const ShadowFrame& frame ATTRIBUTE_UNUSED) override {
LOG(ERROR) << "Unexpected WatchedFramePop event in debugger";
}
@@ -1087,7 +1087,7 @@ class ClassListCreator : public ClassVisitor {
public:
explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
- bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!c->IsPrimitive()) {
classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
}
@@ -2450,7 +2450,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram
expandBufAdd4BE(buf_, frame_count_);
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2608,7 +2608,7 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame
}
// Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor FINAL : public StackVisitor {
+class FindFrameVisitor final : public StackVisitor {
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -3040,7 +3040,7 @@ class CatchLocationFinder : public StackVisitor {
throw_dex_pc_(dex::kDexNoIndex) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -3693,7 +3693,7 @@ class NeedsDeoptimizationVisitor : public StackVisitor {
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
needs_deoptimization_(false) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
// The visitor is meant to be used when handling exception from compiled code only.
CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
<< ArtMethod::PrettyMethod(GetMethod());
diff --git a/runtime/debugger.h b/runtime/debugger.h
index e1de991812..33444f829c 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -54,20 +54,20 @@ class StackVisitor;
class Thread;
struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
- bool IsMethodBeingInspected(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsMethodSafeToJit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool MethodNeedsDebugVersion(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodBeingInspected(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodSafeToJit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool MethodNeedsDebugVersion(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
};
struct DebuggerDdmCallback : public DdmCallback {
void DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::mutator_lock_);
};
struct InternalDebuggerControlCallback : public DebuggerControlCallback {
- void StartDebugger() OVERRIDE;
- void StopDebugger() OVERRIDE;
- bool IsDebuggerConfigured() OVERRIDE;
+ void StartDebugger() override;
+ void StopDebugger() override;
+ bool IsDebuggerConfigured() override;
};
/*
@@ -831,15 +831,15 @@ class Dbg {
class DbgThreadLifecycleCallback : public ThreadLifecycleCallback {
public:
- void ThreadStart(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- void ThreadDeath(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadStart(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadDeath(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
};
class DbgClassLoadCallback : public ClassLoadCallback {
public:
- void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
void ClassPrepare(Handle<mirror::Class> temp_klass,
- Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
};
static DbgThreadLifecycleCallback thread_lifecycle_callback_;
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 00a95cc7bd..0b99722652 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -42,7 +42,7 @@ namespace art {
// Test class that provides some helpers to set a test up for compilation using dex2oat.
class Dex2oatEnvironmentTest : public CommonRuntimeTest {
public:
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
CommonRuntimeTest::SetUp();
const ArtDexFileLoader dex_file_loader;
@@ -106,7 +106,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
}
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
// options->push_back(std::make_pair("-verbose:oat", nullptr));
// Set up the image location.
@@ -117,7 +117,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
callbacks_.reset();
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
ClearDirectory(odex_dir_.c_str());
ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 3203ee526b..b4e52ac49c 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -26,11 +26,11 @@ namespace art {
class DexoptTest : public Dex2oatEnvironmentTest {
public:
- virtual void SetUp() OVERRIDE;
+ virtual void SetUp() override;
virtual void PreRuntimeCreate();
- virtual void PostRuntimeCreate() OVERRIDE;
+ virtual void PostRuntimeCreate() override;
// Generate an oat file for the purposes of test.
// The oat file will be generated for dex_location in the given oat_location
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aca169b924..fccfce4589 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -615,13 +615,13 @@ extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
}
// Visits arguments on the stack placing them into the shadow frame.
-class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
public:
BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
private:
ShadowFrame* const sf_;
@@ -707,7 +707,7 @@ static void HandleDeoptimization(JValue* result,
explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
// Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
// logic. Just always say we want to continue.
return true;
@@ -824,13 +824,13 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
// to jobjects.
-class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
public:
BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -959,7 +959,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
// Visitor returning a reference argument at a given position in a Quick stack frame.
// NOTE: Only used for testing purposes.
-class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
public:
GetQuickReferenceArgumentAtVisitor(ArtMethod** sp,
const char* shorty,
@@ -972,7 +972,7 @@ class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments";
}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
if (cur_pos_ == arg_pos_) {
Primitive::Type type = GetParamPrimitiveType();
CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
@@ -1014,7 +1014,7 @@ extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(s
}
// Visitor returning all the reference arguments in a Quick stack frame.
-class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
public:
GetQuickReferenceArgumentsVisitor(ArtMethod** sp,
bool is_static,
@@ -1022,7 +1022,7 @@ class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
uint32_t shorty_len)
: QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
Primitive::Type type = GetParamPrimitiveType();
if (type == Primitive::kPrimNot) {
StackReference<mirror::Object>* ref_arg =
@@ -1059,13 +1059,13 @@ std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMetho
// Read object references held in arguments from quick frames and place in a JNI local references,
// so they don't get garbage collected.
-class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
+class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
public:
RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1957,7 +1957,7 @@ class ComputeNativeCallFrameSize {
uint32_t num_stack_entries_;
};
-class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
+class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
public:
explicit ComputeGenericJniFrameSize(bool critical_native)
: num_handle_scope_references_(0), critical_native_(critical_native) {}
@@ -2038,10 +2038,10 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
return sp8;
}
- uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
+ uintptr_t PushHandle(mirror::Object* /* ptr */) override;
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
- void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
+ void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -2117,7 +2117,7 @@ class FillNativeCall {
// Visits arguments on the stack placing them into a region lower down the stack for the benefit
// of transitioning into native code.
-class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
public:
BuildGenericJniFrameVisitor(Thread* self,
bool is_static,
@@ -2150,7 +2150,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
}
}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -2168,7 +2168,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
private:
// A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
- class FillJniCall FINAL : public FillNativeCall {
+ class FillJniCall final : public FillNativeCall {
public:
FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
HandleScope* handle_scope, bool critical_native)
@@ -2177,7 +2177,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
cur_entry_(0),
critical_native_(critical_native) {}
- uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 89694e351a..0f0fb69f4b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -26,7 +26,7 @@ namespace art {
class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use 64-bit ISA for runtime setup to make method size potentially larger
// than necessary (rather than smaller) during CreateCalleeSaveMethod
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -35,7 +35,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
// Do not do any of the finalization. We don't want to run any code, we don't need the heap
// prepared, it actually will be a problem with setting the instruction set to x86_64 in
// SetUpRuntimeOptions.
- void FinalizeSetup() OVERRIDE {
+ void FinalizeSetup() override {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 3e2664c7f9..02eeefe0a0 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -90,11 +90,11 @@ class FaultHandler {
DISALLOW_COPY_AND_ASSIGN(FaultHandler);
};
-class NullPointerHandler FINAL : public FaultHandler {
+class NullPointerHandler final : public FaultHandler {
public:
explicit NullPointerHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
static bool IsValidImplicitCheck(siginfo_t* siginfo) {
// Our implicit NPE checks always limit the range to a page.
@@ -108,31 +108,31 @@ class NullPointerHandler FINAL : public FaultHandler {
DISALLOW_COPY_AND_ASSIGN(NullPointerHandler);
};
-class SuspensionHandler FINAL : public FaultHandler {
+class SuspensionHandler final : public FaultHandler {
public:
explicit SuspensionHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
private:
DISALLOW_COPY_AND_ASSIGN(SuspensionHandler);
};
-class StackOverflowHandler FINAL : public FaultHandler {
+class StackOverflowHandler final : public FaultHandler {
public:
explicit StackOverflowHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
private:
DISALLOW_COPY_AND_ASSIGN(StackOverflowHandler);
};
-class JavaStackTraceHandler FINAL : public FaultHandler {
+class JavaStackTraceHandler final : public FaultHandler {
public:
explicit JavaStackTraceHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override NO_THREAD_SAFETY_ANALYSIS;
private:
DISALLOW_COPY_AND_ASSIGN(JavaStackTraceHandler);
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 3a09634c0b..f0a82e0c88 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -33,7 +33,7 @@ class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
space::ContinuousSpace* space)
: ModUnionTableReferenceCache(name, heap, space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+ bool ShouldAddReference(const mirror::Object* ref) const override ALWAYS_INLINE {
return !space_->HasAddress(ref);
}
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0dd05cd6f0..40dc6e146a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -329,8 +329,8 @@ class ModUnionCheckReferences {
class EmptyMarkObjectVisitor : public MarkObjectVisitor {
public:
- mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
- void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {}
+ mirror::Object* MarkObject(mirror::Object* obj) override {return obj;}
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {}
};
void ModUnionTable::FilterCards() {
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 7a3c06a281..ec6f144fd9 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -125,33 +125,33 @@ class ModUnionTableReferenceCache : public ModUnionTable {
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ProcessCards() OVERRIDE;
+ void ProcessCards() override;
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify() OVERRIDE
+ void Verify() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) override;
- virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void SetCards() OVERRIDE;
+ virtual void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ virtual void ClearTable() override;
protected:
// Cleared card array, used to update the mod-union table.
@@ -172,27 +172,27 @@ class ModUnionTableCardCache : public ModUnionTable {
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- virtual void ProcessCards() OVERRIDE;
+ virtual void ProcessCards() override;
// Mark all references to the alloc space(s).
- virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Nothing to verify.
- virtual void Verify() OVERRIDE {}
+ virtual void Verify() override {}
- virtual void Dump(std::ostream& os) OVERRIDE;
+ virtual void Dump(std::ostream& os) override;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) override;
- virtual void SetCards() OVERRIDE;
+ virtual void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ virtual void ClearTable() override;
protected:
// Cleared card bitmap, used to update the mod-union table.
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index d59ff71676..5aa55506a5 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -98,12 +98,12 @@ class CollectVisitedVisitor : public MarkObjectVisitor {
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update ATTRIBUTE_UNUSED) OVERRIDE
+ bool do_atomic_update ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
@@ -122,7 +122,7 @@ class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache {
space::ContinuousSpace* target_space)
: ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+ bool ShouldAddReference(const mirror::Object* ref) const override {
return target_space_->HasAddress(ref);
}
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index a1d198652e..b9c1dc61b6 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -196,7 +196,7 @@ class AllocRecordStackVisitor : public StackVisitor {
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (trace_->GetDepth() >= max_depth_) {
return false;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c7a5f79cb2..f73ecf1c49 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -229,7 +229,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closu
explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -250,7 +250,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure
explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
@@ -393,7 +393,7 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -467,7 +467,7 @@ class ConcurrentCopying::FlipCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
ConcurrentCopying* cc = concurrent_copying_;
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
@@ -1072,7 +1072,7 @@ class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1096,7 +1096,7 @@ class ConcurrentCopying::DisableMarkingCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(concurrent_copying_->is_marking_);
@@ -1291,7 +1291,7 @@ class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor
}
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
operator()(root);
}
@@ -1457,7 +1457,7 @@ class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
disable_weak_ref_access_(disable_weak_ref_access) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1727,7 +1727,7 @@ class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a deadlock b/31500969.
CHECK(concurrent_copying_->weak_ref_access_enabled_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 0ebe6f0c25..a956d3807a 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,7 +71,7 @@ class ConcurrentCopying : public GarbageCollector {
bool measure_read_barrier_slow_path = false);
~ConcurrentCopying();
- virtual void RunPhases() OVERRIDE
+ virtual void RunPhases() override
REQUIRES(!immune_gray_stack_lock_,
!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
@@ -87,15 +87,15 @@ class ConcurrentCopying : public GarbageCollector {
void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
? kGcTypeSticky
: kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return kCollectorTypeCC;
}
- virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
+ virtual void RevokeAllThreadLocalBuffers() override;
void SetRegionSpace(space::RegionSpace* region_space) {
DCHECK(region_space != nullptr);
region_space_ = region_space;
@@ -144,7 +144,7 @@ class ConcurrentCopying : public GarbageCollector {
void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -167,7 +167,7 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
@@ -175,12 +175,12 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ virtual void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -205,20 +205,20 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
- ObjPtr<mirror::Reference> reference) OVERRIDE
+ ObjPtr<mirror::Reference> reference) override
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
@@ -293,7 +293,7 @@ class ConcurrentCopying : public GarbageCollector {
mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
+ void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
// Set the read barrier mark entrypoints to non-null.
void ActivateReadBarrierEntrypoints();
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 145bd0208d..677e3f3a05 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -167,19 +167,19 @@ class DummySpace : public space::ContinuousSpace {
end,
/*limit*/end) {}
- space::SpaceType GetType() const OVERRIDE {
+ space::SpaceType GetType() const override {
return space::kSpaceTypeMallocSpace;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 58a75ee189..840a4b03dc 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@ class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
public:
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
@@ -607,7 +607,7 @@ class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// See if the root is on any space bitmap.
auto* heap = Runtime::Current()->GetHeap();
@@ -1110,7 +1110,7 @@ class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
virtual mirror::Object* IsMarked(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
@@ -1144,7 +1144,7 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1154,14 +1154,14 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
}
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
ScopedTrace trace("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* const self = Thread::Current();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index af2bb973c9..012e17932f 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -57,7 +57,7 @@ class MarkSweep : public GarbageCollector {
~MarkSweep() {}
- virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
+ virtual void RunPhases() override REQUIRES(!mark_stack_lock_);
void InitializePhase();
void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -72,11 +72,11 @@ class MarkSweep : public GarbageCollector {
return is_concurrent_;
}
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypeFull;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
}
@@ -188,24 +188,24 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object.
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -216,7 +216,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -231,7 +231,7 @@ class MarkSweep : public GarbageCollector {
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
@@ -279,7 +279,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
virtual void ProcessMarkStack()
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 8b0d3ddf42..308699bf7e 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,7 @@ namespace collector {
class PartialMarkSweep : public MarkSweep {
public:
// Virtual as overridden by StickyMarkSweep.
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypePartial;
}
@@ -37,7 +37,7 @@ class PartialMarkSweep : public MarkSweep {
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index d1d45c8df6..49cd02e99a 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,7 @@ class SemiSpace : public GarbageCollector {
~SemiSpace() {}
- virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ virtual void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
virtual void InitializePhase();
virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
@@ -72,10 +72,10 @@ class SemiSpace : public GarbageCollector {
virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects()
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
+ virtual GcType GetGcType() const override {
return kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ virtual CollectorType GetCollectorType() const override {
return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
}
@@ -106,11 +106,11 @@ class SemiSpace : public GarbageCollector {
void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+ virtual mirror::Object* MarkObject(mirror::Object* root) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void ScanObject(mirror::Object* obj)
@@ -145,11 +145,11 @@ class SemiSpace : public GarbageCollector {
void SweepSystemWeaks()
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
@@ -162,12 +162,12 @@ class SemiSpace : public GarbageCollector {
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
- bool do_atomic_update) OVERRIDE
+ bool do_atomic_update) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 45f912f63a..f92a103b13 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -24,9 +24,9 @@ namespace art {
namespace gc {
namespace collector {
-class StickyMarkSweep FINAL : public PartialMarkSweep {
+class StickyMarkSweep final : public PartialMarkSweep {
public:
- GcType GetGcType() const OVERRIDE {
+ GcType GetGcType() const override {
return kGcTypeSticky;
}
@@ -34,7 +34,7 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
~StickyMarkSweep() {}
virtual void MarkConcurrentRoots(VisitRootFlags flags)
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -42,15 +42,15 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
- void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
void MarkReachableObjects()
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void Sweep(bool swap_bitmaps)
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf06cf9758..16fd78630d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1327,7 +1327,7 @@ class TrimIndirectReferenceTableClosure : public Closure {
public:
explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
thread->GetJniEnv()->TrimLocals();
// If thread is a running mutator, then act on behalf of the trim thread.
// See the code in ThreadList::RunCheckpoint.
@@ -2213,7 +2213,7 @@ void Heap::ChangeCollector(CollectorType collector_type) {
}
// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
-class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
+class ZygoteCompactingCollector final : public collector::SemiSpace {
public:
ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
: SemiSpace(heap, false, "zygote collector"),
@@ -2769,7 +2769,7 @@ class RootMatchesObjectVisitor : public SingleRootVisitor {
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
void VisitRoot(mirror::Object* root, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == obj_) {
LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
}
@@ -2826,7 +2826,7 @@ class VerifyReferenceVisitor : public SingleRootVisitor {
root->AsMirrorPtr(), RootInfo(kRootVMInternal));
}
- virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+ virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -3259,10 +3259,10 @@ void Heap::ProcessCards(TimingLogger* timings,
}
struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
+ virtual mirror::Object* MarkObject(mirror::Object* obj) override {
return obj;
}
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
}
};
@@ -3633,7 +3633,7 @@ class Heap::ConcurrentGCTask : public HeapTask {
public:
ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
: HeapTask(target_time), cause_(cause), force_full_(force_full) {}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->ConcurrentGC(self, cause_, force_full_);
heap->ClearConcurrentGCRequest();
@@ -3691,7 +3691,7 @@ class Heap::CollectorTransitionTask : public HeapTask {
public:
explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->DoPendingCollectorTransition();
heap->ClearPendingCollectorTransition(self);
@@ -3733,7 +3733,7 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint
class Heap::HeapTrimTask : public HeapTask {
public:
explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->Trim(self);
heap->ClearPendingTrim(self);
@@ -4176,7 +4176,7 @@ void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, si
class Heap::TriggerPostForkCCGcTask : public HeapTask {
public:
explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
// Trigger a GC, if not already done. The first GC after fork, whenever
// takes place, will adjust the thresholds to normal levels.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 8720a3e014..7cbad3b523 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -29,7 +29,7 @@ namespace gc {
class HeapTest : public CommonRuntimeTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
MemMap::Init();
std::string error_msg;
// Reserve the preferred address to force the heap to use another one for testing.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 9b315584fb..02e84b509e 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -35,11 +35,11 @@ namespace space {
// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
// implementation as its intended to be evacuated.
-class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
+class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeBumpPointerSpace;
}
@@ -51,27 +51,27 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(size_t num_bytes);
mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
// NOPS unless we support free lists.
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
return 0;
}
@@ -94,16 +94,16 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return GetMemMap()->Size();
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
// Reset the space to empty.
- void Clear() OVERRIDE REQUIRES(!block_lock_);
+ void Clear() override REQUIRES(!block_lock_);
void Dump(std::ostream& os) const;
@@ -122,7 +122,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return Begin() == End();
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -141,7 +141,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate a new TLAB, returns false if the allocation failed.
bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
- BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
+ BumpPointerSpace* AsBumpPointerSpace() override {
return this;
}
@@ -151,7 +151,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!block_lock_);
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
// Record objects / bytes freed.
void RecordFree(int32_t objects, int32_t bytes) {
@@ -159,7 +159,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 66537d5dac..09f3970408 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -53,36 +53,36 @@ class DlMallocSpace : public MallocSpace {
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_) {
+ override REQUIRES(!lock_) {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual(obj, usable_size);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ virtual size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return num_bytes;
}
// DlMallocSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -103,23 +103,23 @@ class DlMallocSpace : public MallocSpace {
return mspace_;
}
- size_t Trim() OVERRIDE;
+ size_t Trim() override;
// Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
// in use, indicated by num_bytes equaling zero.
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
// Returns the number of bytes that the space has currently obtained from the system. This is
// greater or equal to the amount of live data in the space.
- size_t GetFootprint() OVERRIDE;
+ size_t GetFootprint() override;
// Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
- size_t GetFootprintLimit() OVERRIDE;
+ size_t GetFootprintLimit() override;
// Set the maximum number of bytes that the heap is allowed to obtain from the system via
// MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
// allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ void SetFootprintLimit(size_t limit) override;
MallocSpace* CreateInstance(MemMap&& mem_map,
const std::string& name,
@@ -128,22 +128,22 @@ class DlMallocSpace : public MallocSpace {
uint8_t* end,
uint8_t* limit,
size_t growth_limit,
- bool can_move_objects) OVERRIDE;
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
- virtual void Clear() OVERRIDE;
+ virtual void Clear() override;
- bool IsDlMallocSpace() const OVERRIDE {
+ bool IsDlMallocSpace() const override {
return true;
}
- DlMallocSpace* AsDlMallocSpace() OVERRIDE {
+ DlMallocSpace* AsDlMallocSpace() override {
return this;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
@@ -165,7 +165,7 @@ class DlMallocSpace : public MallocSpace {
REQUIRES(lock_);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
+ size_t /*maximum_size*/, bool /*low_memory_mode*/) override {
return CreateMspace(base, morecore_start, initial_size);
}
static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 20bce66957..93cf947218 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -86,11 +86,11 @@ class ImageSpace : public MemMapSpace {
return image_location_;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
// ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
// special cases to test against.
return live_bitmap_.get();
@@ -102,7 +102,7 @@ class ImageSpace : public MemMapSpace {
void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index a1ffa067d0..d93385de3a 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -113,7 +113,7 @@ TEST_F(DexoptTest, ValidateOatFile) {
template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
class ImageSpaceLoadingTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
if (kImage) {
options->emplace_back(android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str()),
nullptr);
@@ -152,7 +152,7 @@ TEST_F(ImageSpaceNoRelocateNoDex2oatNoPatchoatTest, Test) {
class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
const char* android_data = getenv("ANDROID_DATA");
CHECK(android_data != nullptr);
old_android_data_ = android_data;
@@ -172,7 +172,7 @@ class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false,
ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
int result = unlink(bad_dalvik_cache_.c_str());
CHECK_EQ(result, 0) << strerror(errno);
result = rmdir(bad_android_data_.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 76ea9fda29..09d02518a3 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -39,12 +39,12 @@ namespace art {
namespace gc {
namespace space {
-class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
public:
explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
}
- ~MemoryToolLargeObjectMapSpace() OVERRIDE {
+ ~MemoryToolLargeObjectMapSpace() override {
// Historical note: We were deleting large objects to keep Valgrind happy if there were
// any large objects such as Dex cache arrays which aren't freed since they are held live
// by the class linker.
@@ -52,7 +52,7 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE {
+ override {
mirror::Object* obj =
LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
@@ -68,21 +68,21 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
return object_without_rdz;
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
}
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+ size_t Free(Thread* self, mirror::Object* obj) override {
mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
return LargeObjectMapSpace::Free(self, object_with_rdz);
}
- bool Contains(const mirror::Object* obj) const OVERRIDE {
+ bool Contains(const mirror::Object* obj) const override {
return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b69bd91162..39ff2c3e43 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -41,7 +41,7 @@ enum class LargeObjectSpaceType {
// Abstraction implemented by all large object spaces.
class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
public:
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeLargeObjectSpace;
}
void SwapBitmaps();
@@ -49,10 +49,10 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
virtual ~LargeObjectSpace() {}
- uint64_t GetBytesAllocated() OVERRIDE {
+ uint64_t GetBytesAllocated() override {
return num_bytes_allocated_;
}
- uint64_t GetObjectsAllocated() OVERRIDE {
+ uint64_t GetObjectsAllocated() override {
return num_objects_allocated_;
}
uint64_t GetTotalBytesAllocated() const {
@@ -61,22 +61,22 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
uint64_t GetTotalObjectsAllocated() const {
return total_objects_allocated_;
}
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// LargeObjectSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
collector::ObjectBytePair Sweep(bool swap_bitmaps);
- virtual bool CanMoveObjects() const OVERRIDE {
+ virtual bool CanMoveObjects() const override {
return false;
}
// Current address at which the space begins, which may vary as the space is filled.
@@ -96,7 +96,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
@@ -140,11 +140,11 @@ class LargeObjectMapSpace : public LargeObjectSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
REQUIRES(!lock_);
size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
struct LargeObject {
@@ -154,8 +154,8 @@ class LargeObjectMapSpace : public LargeObjectSpace {
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -164,22 +164,22 @@ class LargeObjectMapSpace : public LargeObjectSpace {
};
// A continuous large object space with a free-list to handle holes.
-class FreeListSpace FINAL : public LargeObjectSpace {
+class FreeListSpace final : public LargeObjectSpace {
public:
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
void Dump(std::ostream& os) const REQUIRES(!lock_);
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
@@ -198,8 +198,8 @@ class FreeListSpace FINAL : public LargeObjectSpace {
}
// Removes header from the free blocks set by finding the corresponding iterator and erasing it.
void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
class SortByPrevFree {
public:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index e4a6f158ec..6bf2d71c7c 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,7 +133,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return can_move_objects_;
}
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 32bd204354..33bddfa4c8 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -29,28 +29,28 @@ template <typename BaseMallocSpaceType,
size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace final : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE;
+ override;
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) override {}
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override;
template <typename... Params>
MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8ad26baff1..0bf4f38a4b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -39,7 +39,7 @@ namespace space {
static constexpr bool kCyclicRegionAllocation = true;
// A space that consists of equal-sized regions.
-class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
+class RegionSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
@@ -49,7 +49,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
kEvacModeForceAll,
};
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeRegionSpace;
}
@@ -65,14 +65,14 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!region_lock_);
+ override REQUIRES(!region_lock_);
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self,
size_t num_bytes,
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
// The main allocation routine.
template<bool kForEvac>
ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
@@ -90,29 +90,29 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return mark_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
- void Clear() OVERRIDE REQUIRES(!region_lock_);
+ void Clear() override REQUIRES(!region_lock_);
// Remove read and write memory protection from the whole region space,
// i.e. make memory pages backing the region area not readable and not
@@ -188,7 +188,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return num_regions_;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -197,7 +197,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return byte_obj >= Begin() && byte_obj < Limit();
}
- RegionSpace* AsRegionSpace() OVERRIDE {
+ RegionSpace* AsRegionSpace() override {
return this;
}
@@ -212,10 +212,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
WalkInternal<true /* kToSpaceOnly */>(visitor);
}
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index c630826f48..5162a064d1 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -52,24 +52,24 @@ class RosAllocSpace : public MallocSpace {
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ override REQUIRES(Locks::mutator_lock_) {
return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual<true>(obj, usable_size);
}
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -93,7 +93,7 @@ class RosAllocSpace : public MallocSpace {
// run without allocating a new run.
ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
size_t* bytes_allocated);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
}
ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
@@ -107,13 +107,13 @@ class RosAllocSpace : public MallocSpace {
return rosalloc_;
}
- size_t Trim() OVERRIDE;
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
- size_t GetFootprint() OVERRIDE;
- size_t GetFootprintLimit() OVERRIDE;
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ size_t Trim() override;
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
+ size_t GetFootprint() override;
+ size_t GetFootprintLimit() override;
+ void SetFootprintLimit(size_t limit) override;
- void Clear() OVERRIDE;
+ void Clear() override;
MallocSpace* CreateInstance(MemMap&& mem_map,
const std::string& name,
@@ -122,10 +122,10 @@ class RosAllocSpace : public MallocSpace {
uint8_t* end,
uint8_t* limit,
size_t growth_limit,
- bool can_move_objects) OVERRIDE;
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
size_t RevokeThreadLocalBuffers(Thread* thread);
size_t RevokeAllThreadLocalBuffers();
@@ -135,11 +135,11 @@ class RosAllocSpace : public MallocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool IsRosAllocSpace() const OVERRIDE {
+ bool IsRosAllocSpace() const override {
return true;
}
- RosAllocSpace* AsRosAllocSpace() OVERRIDE {
+ RosAllocSpace* AsRosAllocSpace() override {
return this;
}
@@ -149,7 +149,7 @@ class RosAllocSpace : public MallocSpace {
virtual ~RosAllocSpace();
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
}
@@ -174,7 +174,7 @@ class RosAllocSpace : public MallocSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t maximum_size, bool low_memory_mode) OVERRIDE {
+ size_t maximum_size, bool low_memory_mode) override {
return CreateRosAlloc(
base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
}
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4e173a86f1..2fe1f82547 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -352,7 +352,7 @@ class DiscontinuousSpace : public Space {
return mark_bitmap_.get();
}
- virtual bool IsDiscontinuousSpace() const OVERRIDE {
+ virtual bool IsDiscontinuousSpace() const override {
return true;
}
@@ -409,14 +409,14 @@ class MemMapSpace : public ContinuousSpace {
// Used by the heap compaction interface to enable copying from one type of alloc space to another.
class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
public:
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
- bool IsContinuousMemMapAllocSpace() const OVERRIDE {
+ bool IsContinuousMemMapAllocSpace() const override {
return true;
}
ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
@@ -435,11 +435,11 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 200c79f00c..1f73577a3a 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -27,7 +27,7 @@ namespace gc {
namespace space {
// A zygote space is a space which you cannot allocate into or free from.
-class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
+class ZygoteSpace final : public ContinuousMemMapAllocSpace {
public:
// Returns the remaining storage in the out_map field.
static ZygoteSpace* Create(const std::string& name,
@@ -38,28 +38,28 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
void Dump(std::ostream& os) const;
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeZygoteSpace;
}
- ZygoteSpace* AsZygoteSpace() OVERRIDE {
+ ZygoteSpace* AsZygoteSpace() override {
return this;
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE;
+ size_t Free(Thread* self, mirror::Object* ptr) override;
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// ZygoteSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -71,13 +71,13 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return objects_allocated_.load(std::memory_order_seq_cst);
}
- void Clear() OVERRIDE;
+ void Clear() override;
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 60105f4e4f..ef85b3942f 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -45,7 +45,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
}
virtual ~SystemWeakHolder() {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -54,7 +54,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
new_weak_condition_.Broadcast(Thread::Current());
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -62,7 +62,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
allow_new_system_weak_ = false;
}
- void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) override
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
new_weak_condition_.Broadcast(Thread::Current());
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 897ab01251..07725b9a56 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -44,7 +44,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_(0),
sweep_count_(0) {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Allow();
@@ -52,7 +52,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
allow_count_++;
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Disallow();
@@ -60,7 +60,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_++;
}
- void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint) override
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
@@ -70,7 +70,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
}
}
- void Sweep(IsMarkedVisitor* visitor) OVERRIDE
+ void Sweep(IsMarkedVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 38581ce807..caa8802823 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -33,7 +33,7 @@ class RecursiveTask : public HeapTask {
: HeapTask(NanoTime() + MsToNs(10)), task_processor_(task_processor), counter_(counter),
max_recursion_(max_recursion) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
if (max_recursion_ > 0) {
task_processor_->AddTask(self,
new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
@@ -52,7 +52,7 @@ class WorkUntilDoneTask : public SelfDeletingTask {
WorkUntilDoneTask(TaskProcessor* task_processor, Atomic<bool>* done_running)
: task_processor_(task_processor), done_running_(done_running) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ virtual void Run(Thread* self) override {
task_processor_->RunAllTasks(self);
done_running_->store(true, std::memory_order_seq_cst);
}
@@ -105,7 +105,7 @@ class TestOrderTask : public HeapTask {
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
+ virtual void Run(Thread* thread ATTRIBUTE_UNUSED) override {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index d6a2fa0cb5..5d234eaac3 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -198,7 +198,7 @@ class Verification::CollectRootVisitor : public SingleRootVisitor {
CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
void VisitRoot(mirror::Object* obj, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (obj != nullptr && visited_->insert(obj).second) {
std::ostringstream oss;
oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 986e28ec79..0bd43f95c0 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -133,7 +133,7 @@ class RootVisitor {
// critical.
class SingleRootVisitor : public RootVisitor {
private:
- void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(*roots[i], info);
@@ -141,7 +141,7 @@ class SingleRootVisitor : public RootVisitor {
}
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(roots[i]->AsMirrorPtr(), info);
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 28a230291d..9eaf1ec71a 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -259,7 +259,7 @@ class PACKED(4) FixedSizeHandleScope : public HandleScope {
// Scoped handle storage of a fixed size that is stack allocated.
template<size_t kNumReferences>
-class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> {
+class PACKED(4) StackHandleScope final : public FixedSizeHandleScope<kNumReferences> {
public:
explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
ALWAYS_INLINE ~StackHandleScope();
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index a41d28492d..4c7efe666f 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -27,7 +27,7 @@ using hiddenapi::GetActionFromAccessFlags;
class HiddenApiTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
// Do the normal setup.
CommonRuntimeTest::SetUp();
self_ = Thread::Current();
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3f44928e3a..e8a47d1087 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -303,7 +303,7 @@ class EndianOutputBuffered : public EndianOutput {
}
virtual ~EndianOutputBuffered() {}
- void UpdateU4(size_t offset, uint32_t new_value) OVERRIDE {
+ void UpdateU4(size_t offset, uint32_t new_value) override {
DCHECK_LE(offset, length_ - 4);
buffer_[offset + 0] = static_cast<uint8_t>((new_value >> 24) & 0xFF);
buffer_[offset + 1] = static_cast<uint8_t>((new_value >> 16) & 0xFF);
@@ -312,12 +312,12 @@ class EndianOutputBuffered : public EndianOutput {
}
protected:
- void HandleU1List(const uint8_t* values, size_t count) OVERRIDE {
+ void HandleU1List(const uint8_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
buffer_.insert(buffer_.end(), values, values + count);
}
- void HandleU1AsU2List(const uint8_t* values, size_t count) OVERRIDE {
+ void HandleU1AsU2List(const uint8_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
// All 8-bits are grouped in 2 to make 16-bit block like Java Char
if (count & 1) {
@@ -330,7 +330,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU2List(const uint16_t* values, size_t count) OVERRIDE {
+ void HandleU2List(const uint16_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint16_t value = *values;
@@ -340,7 +340,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU4List(const uint32_t* values, size_t count) OVERRIDE {
+ void HandleU4List(const uint32_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint32_t value = *values;
@@ -352,7 +352,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU8List(const uint64_t* values, size_t count) OVERRIDE {
+ void HandleU8List(const uint64_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint64_t value = *values;
@@ -368,7 +368,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleEndRecord() OVERRIDE {
+ void HandleEndRecord() override {
DCHECK_EQ(buffer_.size(), length_);
if (kIsDebugBuild && started_) {
uint32_t stored_length =
@@ -388,7 +388,7 @@ class EndianOutputBuffered : public EndianOutput {
std::vector<uint8_t> buffer_;
};
-class FileEndianOutput FINAL : public EndianOutputBuffered {
+class FileEndianOutput final : public EndianOutputBuffered {
public:
FileEndianOutput(File* fp, size_t reserved_size)
: EndianOutputBuffered(reserved_size), fp_(fp), errors_(false) {
@@ -402,7 +402,7 @@ class FileEndianOutput FINAL : public EndianOutputBuffered {
}
protected:
- void HandleFlush(const uint8_t* buffer, size_t length) OVERRIDE {
+ void HandleFlush(const uint8_t* buffer, size_t length) override {
if (!errors_) {
errors_ = !fp_->WriteFully(buffer, length);
}
@@ -413,14 +413,14 @@ class FileEndianOutput FINAL : public EndianOutputBuffered {
bool errors_;
};
-class VectorEndianOuputput FINAL : public EndianOutputBuffered {
+class VectorEndianOuputput final : public EndianOutputBuffered {
public:
VectorEndianOuputput(std::vector<uint8_t>& data, size_t reserved_size)
: EndianOutputBuffered(reserved_size), full_data_(data) {}
~VectorEndianOuputput() {}
protected:
- void HandleFlush(const uint8_t* buf, size_t length) OVERRIDE {
+ void HandleFlush(const uint8_t* buf, size_t length) override {
size_t old_size = full_data_.size();
full_data_.resize(old_size + length);
memcpy(full_data_.data() + old_size, buf, length);
@@ -604,7 +604,7 @@ class Hprof : public SingleRootVisitor {
}
void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::mutator_lock_);
void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
uint32_t thread_serial);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4196e19383..b42433cad3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -85,7 +85,7 @@ class InstallStubsClassVisitor : public ClassVisitor {
explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
: instrumentation_(instrumentation) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
instrumentation_->InstallStubsForClass(klass.Ptr());
return true; // we visit all classes.
}
@@ -264,7 +264,7 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
// existing instrumentation frames.
static void InstrumentationInstallStack(Thread* thread, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- struct InstallStackVisitor FINAL : public StackVisitor {
+ struct InstallStackVisitor final : public StackVisitor {
InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
: StackVisitor(thread_in, context, kInstrumentationStackWalk),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -273,7 +273,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
last_return_pc_(0) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr) {
if (kVerboseInstrumentation) {
@@ -429,7 +429,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- struct RestoreStackVisitor FINAL : public StackVisitor {
+ struct RestoreStackVisitor final : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
: StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
@@ -439,7 +439,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 8ac26afe9f..9146245895 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -36,7 +36,7 @@
namespace art {
namespace instrumentation {
-class TestInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class TestInstrumentationListener final : public instrumentation::InstrumentationListener {
public:
TestInstrumentationListener()
: received_method_enter_event(false),
@@ -59,7 +59,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_enter_event = true;
}
@@ -68,7 +68,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
Handle<mirror::Object> return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_object_event = true;
}
@@ -77,7 +77,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_event = true;
}
@@ -85,7 +85,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_unwind_event = true;
}
@@ -93,7 +93,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_dex_pc_moved_event = true;
}
@@ -102,7 +102,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_read_event = true;
}
@@ -112,7 +112,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
Handle<mirror::Object> field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_object_event = true;
}
@@ -122,19 +122,19 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_event = true;
}
void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_thrown_event = true;
}
void ExceptionHandled(Thread* self ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_handled_event = true;
}
@@ -142,7 +142,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_branch_event = true;
}
@@ -151,12 +151,12 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtMethod* callee ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_invoke_virtual_or_interface_event = true;
}
void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_watched_frame_pop = true;
}
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b56c48d78c..8b4fe44c15 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -86,7 +86,7 @@ TEST_F(InternTableTest, CrossHash) {
class TestPredicate : public IsMarkedVisitor {
public:
- mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Object* IsMarked(mirror::Object* s) override REQUIRES_SHARED(Locks::mutator_lock_) {
bool erased = false;
for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 25ac6e2a31..452a76b89a 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -27,12 +27,12 @@ namespace art {
class StackVisitor;
-class JavaFrameRootInfo FINAL : public RootInfo {
+class JavaFrameRootInfo final : public RootInfo {
public:
JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
: RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
}
- void Describe(std::ostream& os) const OVERRIDE
+ void Describe(std::ostream& os) const override
REQUIRES_SHARED(Locks::mutator_lock_);
size_t GetVReg() const {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ed449b5433..a6bc029828 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -399,7 +399,7 @@ void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
struct CollectClasses : public ClassVisitor {
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
classes_.push_back(klass.Ptr());
return true;
}
@@ -576,7 +576,7 @@ void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
memory_use_.AddValue(bytes);
}
-class JitCompileTask FINAL : public Task {
+class JitCompileTask final : public Task {
public:
enum TaskKind {
kAllocateProfile,
@@ -596,7 +596,7 @@ class JitCompileTask FINAL : public Task {
soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
ScopedObjectAccess soa(self);
if (kind_ == kCompile) {
Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
@@ -611,7 +611,7 @@ class JitCompileTask FINAL : public Task {
ProfileSaver::NotifyJitActivity();
}
- void Finalize() OVERRIDE {
+ void Finalize() override {
delete this;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index d9c7900577..33adc18673 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1088,14 +1088,14 @@ size_t JitCodeCache::ReserveData(Thread* self,
}
}
-class MarkCodeVisitor FINAL : public StackVisitor {
+class MarkCodeVisitor final : public StackVisitor {
public:
MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
code_cache_(code_cache_in),
bitmap_(code_cache_->GetLiveBitmap()) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
if (method_header == nullptr) {
return true;
@@ -1113,12 +1113,12 @@ class MarkCodeVisitor FINAL : public StackVisitor {
CodeCacheBitmap* const bitmap_;
};
-class MarkCodeClosure FINAL : public Closure {
+class MarkCodeClosure final : public Closure {
public:
MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
: code_cache_(code_cache), barrier_(barrier) {}
- void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
MarkCodeVisitor visitor(thread, code_cache_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 6ccda8b0bb..d9ef922390 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -255,7 +255,7 @@ class GetClassLoadersVisitor : public ClassLoaderVisitor {
class_loaders_(class_loaders) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
class_loaders_->push_back(hs_->NewHandle(class_loader));
}
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index 8424610cf8..f695c8fd9b 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -40,7 +40,7 @@ static constexpr size_t kMaxMethodIds = 65535;
class ProfileCompilationInfoTest : public CommonRuntimeTest {
public:
- void PostRuntimeCreate() OVERRIDE {
+ void PostRuntimeCreate() override {
allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index 74e4a30905..fe1c168db2 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -34,7 +34,7 @@ class JavaVmExtTest : public CommonRuntimeTest {
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
CommonRuntimeTest::TearDown();
}
@@ -137,7 +137,7 @@ TEST_F(JavaVmExtTest, DetachCurrentThread) {
class JavaVmExtStackTraceTest : public JavaVmExtTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr);
}
};
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a25049e681..a4b151a7e7 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -84,7 +84,7 @@ class JniInternalTest : public CommonCompilerTest {
}
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
CleanUpJniEnv();
CommonCompilerTest::TearDown();
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c3e167c306..811ee515d3 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -63,7 +63,7 @@ using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
// C++ mirror of java.lang.Class
-class MANAGED Class FINAL : public Object {
+class MANAGED Class final : public Object {
public:
// A magic value for reference_instance_offsets_. Ignore the bits and walk the super chain when
// this is the value.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 87f4f0ab7b..ba91e4fd58 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -141,7 +141,7 @@ using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
// C++ mirror of java.lang.DexCache.
-class MANAGED DexCache FINAL : public Object {
+class MANAGED DexCache final : public Object {
public:
// Size of java.lang.DexCache.class.
static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 7a70cae1ef..528740bea9 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,7 +34,7 @@ class DexCacheTest : public CommonRuntimeTest {};
class DexCacheMethodHandlesTest : public DexCacheTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d72c7866c5..9e3c9af86d 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -23,7 +23,7 @@
namespace art {
namespace mirror {
-class MANAGED IfTable FINAL : public ObjectArray<Object> {
+class MANAGED IfTable final : public ObjectArray<Object> {
public:
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/mirror/proxy.h b/runtime/mirror/proxy.h
index db511d6425..7775de35d2 100644
--- a/runtime/mirror/proxy.h
+++ b/runtime/mirror/proxy.h
@@ -26,7 +26,7 @@ struct ProxyOffsets;
namespace mirror {
// C++ mirror of java.lang.reflect.Proxy.
-class MANAGED Proxy FINAL : public Object {
+class MANAGED Proxy final : public Object {
private:
HeapReference<Object> h_;
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 55a2ef0b49..37ac57587f 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -27,7 +27,7 @@ struct StackTraceElementOffsets;
namespace mirror {
// C++ mirror of java.lang.StackTraceElement
-class MANAGED StackTraceElement FINAL : public Object {
+class MANAGED StackTraceElement final : public Object {
public:
String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 0e2fc903b5..d08717ca82 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -40,7 +40,7 @@ enum class StringCompressionFlag : uint32_t {
};
// C++ mirror of java.lang.String
-class MANAGED String FINAL : public Object {
+class MANAGED String final : public Object {
public:
// Size of java.lang.String.class.
static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 56c953b816..864e1eab73 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -353,7 +353,7 @@ inline void StoreResult(ObjPtr<Object> value, JValue* result)
//
template <typename T>
-class JValueByteSwapper FINAL {
+class JValueByteSwapper final {
public:
static void ByteSwap(JValue* value);
static void MaybeByteSwap(bool byte_swap, JValue* value) {
@@ -392,7 +392,7 @@ class AtomicGetAccessor : public Object::Accessor<T> {
public:
explicit AtomicGetAccessor(JValue* result) : result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
StoreResult(atom->load(MO), result_);
}
@@ -406,7 +406,7 @@ class AtomicSetAccessor : public Object::Accessor<T> {
public:
explicit AtomicSetAccessor(T new_value) : new_value_(new_value) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
atom->store(new_value_, MO);
}
@@ -431,7 +431,7 @@ class AtomicStrongCompareAndSetAccessor : public Object::Accessor<T> {
AtomicStrongCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
bool success = atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -453,7 +453,7 @@ class AtomicStrongCompareAndExchangeAccessor : public Object::Accessor<T> {
AtomicStrongCompareAndExchangeAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
StoreResult(expected_value_, result_);
@@ -475,7 +475,7 @@ class AtomicWeakCompareAndSetAccessor : public Object::Accessor<T> {
AtomicWeakCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
bool success = atom->compare_exchange_weak(expected_value_, desired_value_, MOS, MOF);
StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -496,7 +496,7 @@ class AtomicGetAndSetAccessor : public Object::Accessor<T> {
public:
AtomicGetAndSetAccessor(T new_value, JValue* result) : new_value_(new_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->exchange(new_value_, MO);
StoreResult(old_value, result_);
@@ -540,7 +540,7 @@ class AtomicGetAndAddAccessor : public Object::Accessor<T> {
public:
AtomicGetAndAddAccessor(T addend, JValue* result) : addend_(addend), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
constexpr bool kIsFloatingPoint = std::is_floating_point<T>::value;
T old_value = AtomicGetAndAddOperator<T, kIsFloatingPoint, MO>::Apply(addr, addend_);
StoreResult(old_value, result_);
@@ -562,7 +562,7 @@ class AtomicGetAndAddWithByteSwapAccessor : public Object::Accessor<T> {
public:
AtomicGetAndAddWithByteSwapAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* const atom = reinterpret_cast<std::atomic<T>*>(addr);
T current_value = atom->load(std::memory_order_relaxed);
T sum;
@@ -591,7 +591,7 @@ class AtomicGetAndBitwiseOrAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseOrAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_or(value_, MO);
StoreResult(old_value, result_);
@@ -610,7 +610,7 @@ class AtomicGetAndBitwiseAndAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseAndAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_and(value_, MO);
StoreResult(old_value, result_);
@@ -630,7 +630,7 @@ class AtomicGetAndBitwiseXorAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseXorAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_xor(value_, MO);
StoreResult(old_value, result_);
@@ -679,7 +679,7 @@ class TypeAdaptorAccessor : public Object::Accessor<T> {
explicit TypeAdaptorAccessor(Object::Accessor<U>* inner_accessor)
: inner_accessor_(inner_accessor) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
static_assert(sizeof(T) == sizeof(U), "bad conversion");
inner_accessor_->Access(reinterpret_cast<U*>(addr));
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index d47bc0d12e..6e5786a6c3 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -184,7 +184,7 @@ bool Monitor::Install(Thread* self) {
if (locking_method_ != nullptr && UNLIKELY(locking_method_->IsProxyMethod())) {
// Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
// enough that it's OK to walk the stack twice.
- struct NextMethodVisitor FINAL : public StackVisitor {
+ struct NextMethodVisitor final : public StackVisitor {
explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread,
nullptr,
@@ -193,7 +193,7 @@ bool Monitor::Install(Thread* self) {
count_(0),
method_(nullptr),
dex_pc_(0) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -271,7 +271,7 @@ void Monitor::SetObject(mirror::Object* object) {
// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
-struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
+struct NthCallerWithDexPcVisitor final : public StackVisitor {
explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -279,7 +279,7 @@ struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
dex_pc_(0),
current_frame_number_(0),
wanted_frame_number_(frame) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr || m->IsRuntimeMethod()) {
// Runtime method, upcall, or resolution issue. Skip.
@@ -514,7 +514,7 @@ void Monitor::Lock(Thread* self) {
if (should_dump_stacks) {
// Very long contention. Dump stacks.
struct CollectStackTrace : public Closure {
- void Run(art::Thread* thread) OVERRIDE
+ void Run(art::Thread* thread) override
REQUIRES_SHARED(art::Locks::mutator_lock_) {
thread->DumpJavaStack(oss);
}
@@ -1574,7 +1574,7 @@ class MonitorDeflateVisitor : public IsMarkedVisitor {
public:
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ virtual mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Monitor::Deflate(self_, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index 5c962c3b26..c943402126 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -54,7 +54,7 @@ class MonitorObjectsStackVisitor : public StackVisitor {
kEndStackWalk,
};
- bool VisitFrame() FINAL REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() final REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index bff8d7678c..c88748ffb8 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -34,7 +34,7 @@ namespace art {
class MonitorTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use a smaller heap
SetUpRuntimeOptionsForFillHeap(options);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 7ac4086362..6f98a6d381 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -207,7 +207,7 @@ static void VMDebug_printLoadedClasses(JNIEnv* env, jclass, jint flags) {
public:
explicit DumpClassVisitor(int dump_flags) : flags_(dump_flags) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
klass->DumpClass(LOG_STREAM(ERROR), flags_);
return true;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 9b3fd16ac0..0e619407e5 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -332,7 +332,7 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::String> string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 5b47eaca86..72dae4791c 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -120,9 +120,9 @@ static void DoCollectNonDebuggableCallback(Thread* thread, void* data)
: StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_set_(class_set) {}
- ~NonDebuggableStacksVisitor() OVERRIDE {}
+ ~NonDebuggableStacksVisitor() override {}
- bool VisitFrame() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true;
}
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 9c777cc277..496a6f3d09 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -21,22 +21,22 @@
namespace art {
-class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
+class NoopCompilerCallbacks final : public CompilerCallbacks {
public:
NoopCompilerCallbacks() : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp) {}
~NoopCompilerCallbacks() {}
- void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
+ void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {
}
- void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
// This is only used by compilers which need to be able to run without relocation even when it
// would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
// to disable the relocation since both deal with writing out the images directly.
- bool IsRelocationPossible() OVERRIDE { return false; }
+ bool IsRelocationPossible() override { return false; }
- verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return nullptr; }
+ verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
private:
DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index c7daef8310..4780aea003 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -889,7 +889,7 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e
// OatFile via dlopen //
////////////////////////
-class DlOpenOatFile FINAL : public OatFileBase {
+class DlOpenOatFile final : public OatFileBase {
public:
DlOpenOatFile(const std::string& filename, bool executable)
: OatFileBase(filename, executable),
@@ -911,7 +911,7 @@ class DlOpenOatFile FINAL : public OatFileBase {
protected:
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
const uint8_t* ptr =
reinterpret_cast<const uint8_t*>(dlsym(dlopen_handle_, symbol_name.c_str()));
if (ptr == nullptr) {
@@ -920,21 +920,21 @@ class DlOpenOatFile FINAL : public OatFileBase {
return ptr;
}
- void PreLoad() OVERRIDE;
+ void PreLoad() override;
bool Load(const std::string& elf_filename,
uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ std::string* error_msg) override;
bool Load(int, uint8_t*, bool, bool, bool, std::string*) {
return false;
}
// Ask the linker where it mmaped the file and notify our mmap wrapper of the regions.
- void PreSetup(const std::string& elf_filename) OVERRIDE;
+ void PreSetup(const std::string& elf_filename) override;
private:
bool Dlopen(const std::string& elf_filename,
@@ -1156,7 +1156,7 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
// OatFile via our own ElfFile implementation //
////////////////////////////////////////////////
-class ElfOatFile FINAL : public OatFileBase {
+class ElfOatFile final : public OatFileBase {
public:
ElfOatFile(const std::string& filename, bool executable) : OatFileBase(filename, executable) {}
@@ -1179,7 +1179,7 @@ class ElfOatFile FINAL : public OatFileBase {
protected:
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
const uint8_t* ptr = elf_file_->FindDynamicSymbolAddress(symbol_name);
if (ptr == nullptr) {
*error_msg = "(Internal implementation could not find symbol)";
@@ -1187,7 +1187,7 @@ class ElfOatFile FINAL : public OatFileBase {
return ptr;
}
- void PreLoad() OVERRIDE {
+ void PreLoad() override {
}
bool Load(const std::string& elf_filename,
@@ -1195,16 +1195,16 @@ class ElfOatFile FINAL : public OatFileBase {
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ std::string* error_msg) override;
bool Load(int oat_fd,
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ std::string* error_msg) override;
- void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) OVERRIDE {
+ void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) override {
}
private:
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 4ed26facf7..21e214408d 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -146,7 +146,7 @@ class OatFile {
const OatHeader& GetOatHeader() const;
- class OatMethod FINAL {
+ class OatMethod final {
public:
void LinkMethod(ArtMethod* method) const;
@@ -201,7 +201,7 @@ class OatFile {
friend class OatClass;
};
- class OatClass FINAL {
+ class OatClass final {
public:
ClassStatus GetStatus() const {
return status_;
@@ -444,7 +444,7 @@ class OatFile {
// support forward declarations of inner classes, and we want to
// forward-declare OatDexFile so that we can store an opaque pointer to an
// OatDexFile in DexFile.
-class OatDexFile FINAL {
+class OatDexFile final {
public:
// Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
std::unique_ptr<const DexFile> OpenDexFile(std::string* error_msg) const;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 36dea60367..f1e485b951 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -30,7 +30,7 @@ namespace proxy_test {
class ProxyTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
// The creation of a Proxy class uses WellKnownClasses. These are not normally initialized by
// CommonRuntimeTest so we need to do that now.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7f5717f736..7b92151c66 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -58,7 +58,7 @@ QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimizatio
full_fragment_done_(false) {}
// Finds catch handler.
-class CatchBlockStackVisitor FINAL : public StackVisitor {
+class CatchBlockStackVisitor final : public StackVisitor {
public:
CatchBlockStackVisitor(Thread* self,
Context* context,
@@ -72,7 +72,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
skip_frames_(skip_frames) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
@@ -350,7 +350,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
}
// Prepares deoptimization.
-class DeoptimizeStackVisitor FINAL : public StackVisitor {
+class DeoptimizeStackVisitor final : public StackVisitor {
public:
DeoptimizeStackVisitor(Thread* self,
Context* context,
@@ -399,7 +399,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
if (method == nullptr || single_frame_done_) {
@@ -667,14 +667,14 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
}
// Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
+class DumpFramesWithTypeStackVisitor final : public StackVisitor {
public:
explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
show_details_(show_details) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
if (show_details_) {
LOG(INFO) << "|> pc = " << std::hex << GetCurrentQuickFramePc();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 30d45871ff..243150759b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2638,7 +2638,7 @@ class UpdateEntryPointsClassVisitor : public ClassVisitor {
explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
: instrumentation_(instrumentation) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index ed0472f414..e1e0e23dac 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -50,7 +50,7 @@ namespace art {
class RuntimeCallbacksTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
Thread* self = Thread::Current();
@@ -60,7 +60,7 @@ class RuntimeCallbacksTest : public CommonRuntimeTest {
AddListener();
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
{
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -101,10 +101,10 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
}
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddThreadLifecycleCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveThreadLifecycleCallback(&cb_);
}
@@ -117,7 +117,7 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
};
struct Callback : public ThreadLifecycleCallback {
- void ThreadStart(Thread* self) OVERRIDE {
+ void ThreadStart(Thread* self) override {
if (state == CallbackState::kBase) {
state = CallbackState::kStarted;
stored_self = self;
@@ -126,7 +126,7 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
}
}
- void ThreadDeath(Thread* self) OVERRIDE {
+ void ThreadDeath(Thread* self) override {
if (state == CallbackState::kStarted && self == stored_self) {
state = CallbackState::kDied;
} else {
@@ -219,10 +219,10 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttac
class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddClassLoadCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveClassLoadCallback(&cb_);
}
@@ -259,7 +259,7 @@ class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
/*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
const std::string& location = initial_dex_file.GetLocation();
std::string event =
std::string("PreDefine:") + descriptor + " <" +
@@ -267,14 +267,14 @@ class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
data.push_back(event);
}
- void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string tmp;
std::string event = std::string("Load:") + klass->GetDescriptor(&tmp);
data.push_back(event);
}
void ClassPrepare(Handle<mirror::Class> temp_klass,
- Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string tmp, tmp2;
std::string event = std::string("Prepare:") + klass->GetDescriptor(&tmp)
+ "[" + temp_klass->GetDescriptor(&tmp2) + "]";
@@ -319,15 +319,15 @@ TEST_F(ClassLoadCallbackRuntimeCallbacksTest, ClassLoadCallback) {
class RuntimeSigQuitCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddRuntimeSigQuitCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimeSigQuitCallback(&cb_);
}
struct Callback : public RuntimeSigQuitCallback {
- void SigQuit() OVERRIDE {
+ void SigQuit() override {
++sigquit_count;
}
@@ -362,20 +362,20 @@ TEST_F(RuntimeSigQuitCallbackRuntimeCallbacksTest, SigQuit) {
class RuntimePhaseCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&cb_);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
// Bypass RuntimeCallbacksTest::TearDown, as the runtime is already gone.
CommonRuntimeTest::TearDown();
}
struct Callback : public RuntimePhaseCallback {
- void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) OVERRIDE {
+ void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) override {
if (p == RuntimePhaseCallback::RuntimePhase::kInitialAgents) {
if (start_seen > 0 || init_seen > 0 || death_seen > 0) {
LOG(FATAL) << "Unexpected order";
@@ -434,10 +434,10 @@ TEST_F(RuntimePhaseCallbackRuntimeCallbacksTest, Phases) {
class MonitorWaitCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(&cb_);
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ce99fb9591..eb9c661d18 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -461,7 +461,7 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
: StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
- bool VisitFrame() OVERRIDE {
+ bool VisitFrame() override {
frames++;
return true;
}
@@ -487,7 +487,7 @@ bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next
next_dex_pc_(0) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (found_frame_) {
ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -520,7 +520,7 @@ void StackVisitor::DescribeStack(Thread* thread) {
explicit DescribeStackVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
return true;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index df7f19d118..8a637a250d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1486,7 +1486,7 @@ class BarrierClosure : public Closure {
public:
explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
wrapped_->Run(self);
barrier_.Pass(self);
}
@@ -1844,7 +1844,7 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
static constexpr size_t kMaxRepetition = 3u;
VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
ObjPtr<mirror::Class> c = m->GetDeclaringClass();
@@ -1883,24 +1883,24 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
return VisitMethodResult::kContinueMethod;
}
- VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
return VisitMethodResult::kContinueMethod;
}
void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId);
}
void VisitSleepingObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId);
}
void VisitBlockedOnObject(mirror::Object* obj,
ThreadState state,
uint32_t owner_tid)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
const char* msg;
switch (state) {
@@ -1919,7 +1919,7 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
PrintObject(obj, msg, owner_tid);
}
void VisitLockedObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
}
@@ -2216,7 +2216,7 @@ class MonitorExitVisitor : public SingleRootVisitor {
// NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ override NO_THREAD_SAFETY_ANALYSIS {
if (self_->HoldsLock(entered_monitor)) {
LOG(WARNING) << "Calling MonitorExit on object "
<< entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
@@ -2845,7 +2845,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
protected:
VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
soaa_, m, GetDexPc(/* abort on error */ false));
@@ -2856,7 +2856,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
return VisitMethodResult::kContinueMethod;
}
- VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
lock_objects_.push_back({});
lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
@@ -2866,24 +2866,24 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
}
void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitSleepingObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitBlockedOnObject(mirror::Object* obj,
ThreadState state ATTRIBUTE_UNUSED,
uint32_t owner_tid ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitLockedObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
}
@@ -3450,7 +3450,7 @@ Context* Thread::GetLongJumpContext() {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
-struct CurrentMethodVisitor FINAL : public StackVisitor {
+struct CurrentMethodVisitor final : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread,
@@ -3461,7 +3461,7 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
method_(nullptr),
dex_pc_(0),
abort_on_error_(abort_on_error) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -3857,7 +3857,7 @@ void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
class VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyObject(root);
}
};
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 922202418e..cddc275839 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -199,7 +199,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack)
static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
// A closure used by Thread::Dump.
-class DumpCheckpoint FINAL : public Closure {
+class DumpCheckpoint final : public Closure {
public:
DumpCheckpoint(std::ostream* os, bool dump_native_stack)
: os_(os),
@@ -211,7 +211,7 @@ class DumpCheckpoint FINAL : public Closure {
}
}
- void Run(Thread* thread) OVERRIDE {
+ void Run(Thread* thread) override {
// Note thread and self may not be equal if thread was already suspended at the point of the
// request.
Thread* self = Thread::Current();
diff --git a/runtime/trace.h b/runtime/trace.h
index 1fae250d77..5d9649320a 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -102,7 +102,7 @@ enum TraceAction {
// Class for recording event traces. Trace data is either collected
// synchronously during execution (TracingMode::kMethodTracingActive),
// or by a separate sampling thread (TracingMode::kSampleProfilingActive).
-class Trace FINAL : public instrumentation::InstrumentationListener {
+class Trace final : public instrumentation::InstrumentationListener {
public:
enum TraceFlag {
kTraceCountAllocs = 1,
@@ -181,57 +181,57 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void MethodExited(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
const JValue& return_value)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void MethodUnwind(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void DexPcMoved(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t new_dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void FieldRead(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void FieldWritten(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field,
const JValue& field_value)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void ExceptionThrown(Thread* thread,
Handle<mirror::Throwable> exception_object)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void Branch(Thread* thread,
ArtMethod* method,
uint32_t dex_pc,
int32_t dex_pc_offset)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void InvokeVirtualOrInterface(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) override;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 7adf140218..de6edd2ff3 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -39,7 +39,7 @@ class String;
} // namespace mirror
class InternTable;
-class Transaction FINAL {
+class Transaction final {
public:
static constexpr const char* kAbortExceptionDescriptor = "dalvik.system.TransactionAbortError";
static constexpr const char* kAbortExceptionSignature = "Ldalvik/system/TransactionAbortError;";
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index e67067cdde..e5e71a4d07 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -25,7 +25,7 @@
namespace art {
namespace verifier {
-class InstructionFlags FINAL {
+class InstructionFlags final {
public:
InstructionFlags() : flags_(0) {}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 29da376091..04a7dfba66 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -378,11 +378,11 @@ class RegType {
};
// Bottom type.
-class ConflictType FINAL : public RegType {
+class ConflictType final : public RegType {
public:
- bool IsConflict() const OVERRIDE { return true; }
+ bool IsConflict() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static const ConflictType* GetInstance() PURE;
@@ -396,7 +396,7 @@ class ConflictType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kConflict;
}
@@ -414,11 +414,11 @@ class ConflictType FINAL : public RegType {
// A variant of the bottom type used to specify an undefined value in the
// incoming registers.
// Merging with UndefinedType yields ConflictType which is the true bottom.
-class UndefinedType FINAL : public RegType {
+class UndefinedType final : public RegType {
public:
- bool IsUndefined() const OVERRIDE { return true; }
+ bool IsUndefined() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static const UndefinedType* GetInstance() PURE;
@@ -432,7 +432,7 @@ class UndefinedType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -453,7 +453,7 @@ class PrimitiveType : public RegType {
const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
};
class Cat1Type : public PrimitiveType {
@@ -462,10 +462,10 @@ class Cat1Type : public PrimitiveType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
-class IntegerType FINAL : public Cat1Type {
+class IntegerType final : public Cat1Type {
public:
- bool IsInteger() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInteger() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const IntegerType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -473,7 +473,7 @@ class IntegerType FINAL : public Cat1Type {
static const IntegerType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kInteger;
}
@@ -487,10 +487,10 @@ class IntegerType FINAL : public Cat1Type {
static const IntegerType* instance_;
};
-class BooleanType FINAL : public Cat1Type {
+class BooleanType final : public Cat1Type {
public:
- bool IsBoolean() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsBoolean() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const BooleanType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -498,7 +498,7 @@ class BooleanType FINAL : public Cat1Type {
static const BooleanType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kBoolean;
}
@@ -513,10 +513,10 @@ class BooleanType FINAL : public Cat1Type {
static const BooleanType* instance_;
};
-class ByteType FINAL : public Cat1Type {
+class ByteType final : public Cat1Type {
public:
- bool IsByte() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsByte() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const ByteType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -524,7 +524,7 @@ class ByteType FINAL : public Cat1Type {
static const ByteType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kByte;
}
@@ -538,10 +538,10 @@ class ByteType FINAL : public Cat1Type {
static const ByteType* instance_;
};
-class ShortType FINAL : public Cat1Type {
+class ShortType final : public Cat1Type {
public:
- bool IsShort() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsShort() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const ShortType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -549,7 +549,7 @@ class ShortType FINAL : public Cat1Type {
static const ShortType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kShort;
}
@@ -562,10 +562,10 @@ class ShortType FINAL : public Cat1Type {
static const ShortType* instance_;
};
-class CharType FINAL : public Cat1Type {
+class CharType final : public Cat1Type {
public:
- bool IsChar() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsChar() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const CharType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -573,7 +573,7 @@ class CharType FINAL : public Cat1Type {
static const CharType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kChar;
}
@@ -587,10 +587,10 @@ class CharType FINAL : public Cat1Type {
static const CharType* instance_;
};
-class FloatType FINAL : public Cat1Type {
+class FloatType final : public Cat1Type {
public:
- bool IsFloat() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsFloat() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const FloatType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -598,7 +598,7 @@ class FloatType FINAL : public Cat1Type {
static const FloatType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kFloat;
}
@@ -619,11 +619,11 @@ class Cat2Type : public PrimitiveType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
-class LongLoType FINAL : public Cat2Type {
+class LongLoType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsLongLo() const OVERRIDE { return true; }
- bool IsLong() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsLongLo() const override { return true; }
+ bool IsLong() const override { return true; }
static const LongLoType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -631,7 +631,7 @@ class LongLoType FINAL : public Cat2Type {
static const LongLoType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kLongLo;
}
@@ -645,10 +645,10 @@ class LongLoType FINAL : public Cat2Type {
static const LongLoType* instance_;
};
-class LongHiType FINAL : public Cat2Type {
+class LongHiType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsLongHi() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsLongHi() const override { return true; }
static const LongHiType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -656,7 +656,7 @@ class LongHiType FINAL : public Cat2Type {
static const LongHiType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -670,11 +670,11 @@ class LongHiType FINAL : public Cat2Type {
static const LongHiType* instance_;
};
-class DoubleLoType FINAL : public Cat2Type {
+class DoubleLoType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsDoubleLo() const OVERRIDE { return true; }
- bool IsDouble() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsDoubleLo() const override { return true; }
+ bool IsDouble() const override { return true; }
static const DoubleLoType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -682,7 +682,7 @@ class DoubleLoType FINAL : public Cat2Type {
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kDoubleLo;
}
@@ -696,10 +696,10 @@ class DoubleLoType FINAL : public Cat2Type {
static const DoubleLoType* instance_;
};
-class DoubleHiType FINAL : public Cat2Type {
+class DoubleHiType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsDoubleHi() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual bool IsDoubleHi() const override { return true; }
static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -707,7 +707,7 @@ class DoubleHiType FINAL : public Cat2Type {
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -751,30 +751,30 @@ class ConstantType : public RegType {
}
}
- bool IsZero() const OVERRIDE {
+ bool IsZero() const override {
return IsPreciseConstant() && ConstantValue() == 0;
}
- bool IsOne() const OVERRIDE {
+ bool IsOne() const override {
return IsPreciseConstant() && ConstantValue() == 1;
}
- bool IsConstantChar() const OVERRIDE {
+ bool IsConstantChar() const override {
return IsConstant() && ConstantValue() >= 0 &&
ConstantValue() <= std::numeric_limits<uint16_t>::max();
}
- bool IsConstantByte() const OVERRIDE {
+ bool IsConstantByte() const override {
return IsConstant() &&
ConstantValue() >= std::numeric_limits<int8_t>::min() &&
ConstantValue() <= std::numeric_limits<int8_t>::max();
}
- bool IsConstantShort() const OVERRIDE {
+ bool IsConstantShort() const override {
return IsConstant() &&
ConstantValue() >= std::numeric_limits<int16_t>::min() &&
ConstantValue() <= std::numeric_limits<int16_t>::max();
}
- virtual bool IsConstantTypes() const OVERRIDE { return true; }
+ virtual bool IsConstantTypes() const override { return true; }
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -782,7 +782,7 @@ class ConstantType : public RegType {
const uint32_t constant_;
};
-class PreciseConstType FINAL : public ConstantType {
+class PreciseConstType final : public ConstantType {
public:
PreciseConstType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -790,94 +790,94 @@ class PreciseConstType FINAL : public ConstantType {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstant() const OVERRIDE { return true; }
+ bool IsPreciseConstant() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class PreciseConstLoType FINAL : public ConstantType {
+class PreciseConstLoType final : public ConstantType {
public:
PreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPreciseConstantLo() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class PreciseConstHiType FINAL : public ConstantType {
+class PreciseConstHiType final : public ConstantType {
public:
PreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPreciseConstantHi() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstType FINAL : public ConstantType {
+class ImpreciseConstType final : public ConstantType {
public:
ImpreciseConstType(uint32_t constat, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constat, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstant() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstLoType FINAL : public ConstantType {
+class ImpreciseConstLoType final : public ConstantType {
public:
ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstantLo() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstHiType FINAL : public ConstantType {
+class ImpreciseConstHiType final : public ConstantType {
public:
ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstantHi() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
// Special "null" type that captures the semantics of null / bottom.
-class NullType FINAL : public RegType {
+class NullType final : public RegType {
public:
- bool IsNull() const OVERRIDE {
+ bool IsNull() const override {
return true;
}
@@ -892,15 +892,15 @@ class NullType FINAL : public RegType {
static void Destroy();
- std::string Dump() const OVERRIDE {
+ std::string Dump() const override {
return "null";
}
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
- bool IsConstantTypes() const OVERRIDE {
+ bool IsConstantTypes() const override {
return true;
}
@@ -925,15 +925,15 @@ class UninitializedType : public RegType {
uint16_t cache_id)
: RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
- bool IsUninitializedTypes() const OVERRIDE;
- bool IsNonZeroReferenceTypes() const OVERRIDE;
+ bool IsUninitializedTypes() const override;
+ bool IsNonZeroReferenceTypes() const override;
uint32_t GetAllocationPc() const {
DCHECK(IsUninitializedTypes());
return allocation_pc_;
}
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
@@ -942,7 +942,7 @@ class UninitializedType : public RegType {
};
// Similar to ReferenceType but not yet having been passed to a constructor.
-class UninitializedReferenceType FINAL : public UninitializedType {
+class UninitializedReferenceType final : public UninitializedType {
public:
UninitializedReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -953,16 +953,16 @@ class UninitializedReferenceType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUninitializedReference() const OVERRIDE { return true; }
+ bool IsUninitializedReference() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a
// constructor.
-class UnresolvedUninitializedRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedRefType final : public UninitializedType {
public:
UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
@@ -971,19 +971,19 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedAndUninitializedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// Similar to UninitializedReferenceType but special case for the this argument
// of a constructor.
-class UninitializedThisReferenceType FINAL : public UninitializedType {
+class UninitializedThisReferenceType final : public UninitializedType {
public:
UninitializedThisReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -993,17 +993,17 @@ class UninitializedThisReferenceType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
+ virtual bool IsUninitializedThisReference() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
-class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedThisRefType final : public UninitializedType {
public:
UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
@@ -1012,19 +1012,19 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
+ bool IsUnresolvedAndUninitializedThisReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// A type of register holding a reference to an Object of type GetClass or a
// sub-class.
-class ReferenceType FINAL : public RegType {
+class ReferenceType final : public RegType {
public:
ReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -1033,15 +1033,15 @@ class ReferenceType FINAL : public RegType {
CheckConstructorInvariants(this);
}
- bool IsReference() const OVERRIDE { return true; }
+ bool IsReference() const override { return true; }
- bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+ bool IsNonZeroReferenceTypes() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1049,22 +1049,22 @@ class ReferenceType FINAL : public RegType {
// A type of register holding a reference to an Object of type GetClass and only
// an object of that
// type.
-class PreciseReferenceType FINAL : public RegType {
+class PreciseReferenceType final : public RegType {
public:
PreciseReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsPreciseReference() const OVERRIDE { return true; }
+ bool IsPreciseReference() const override { return true; }
- bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+ bool IsNonZeroReferenceTypes() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1076,9 +1076,9 @@ class UnresolvedType : public RegType {
REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
- bool IsNonZeroReferenceTypes() const OVERRIDE;
+ bool IsNonZeroReferenceTypes() const override;
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1086,7 +1086,7 @@ class UnresolvedType : public RegType {
// Similar to ReferenceType except the Class couldn't be loaded. Assignability
// and other tests made
// of this type must be conservative.
-class UnresolvedReferenceType FINAL : public UnresolvedType {
+class UnresolvedReferenceType final : public UnresolvedType {
public:
UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1094,18 +1094,18 @@ class UnresolvedReferenceType FINAL : public UnresolvedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// Type representing the super-class of an unresolved type.
-class UnresolvedSuperClass FINAL : public UnresolvedType {
+class UnresolvedSuperClass final : public UnresolvedType {
public:
UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
uint16_t cache_id)
@@ -1116,19 +1116,19 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
+ bool IsUnresolvedSuperClass() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
uint16_t GetUnresolvedSuperClassChildId() const {
DCHECK(IsUnresolvedSuperClass());
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
@@ -1136,7 +1136,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
// A merge of unresolved (and resolved) types. If the types were resolved this may be
// Conflict or another known ReferenceType.
-class UnresolvedMergedType FINAL : public UnresolvedType {
+class UnresolvedMergedType final : public UnresolvedType {
public:
// Note: the constructor will copy the unresolved BitVector, not use it directly.
UnresolvedMergedType(const RegType& resolved,
@@ -1154,17 +1154,17 @@ class UnresolvedMergedType FINAL : public UnresolvedType {
return unresolved_types_;
}
- bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedMergedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- bool IsArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
const RegTypeCache* const reg_type_cache_;
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 15a38f3fd7..0430d205af 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -1042,7 +1042,7 @@ TEST_F(RegTypeTest, ConstPrecision) {
class RegTypeOOMTest : public RegTypeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
SetUpRuntimeOptionsForFillHeap(options);
// We must not appear to be a compiler, or we'll abort on the host.
diff --git a/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h
index 8b665292af..e726500452 100644
--- a/simulator/code_simulator_arm64.h
+++ b/simulator/code_simulator_arm64.h
@@ -36,11 +36,11 @@ class CodeSimulatorArm64 : public CodeSimulator {
static CodeSimulatorArm64* CreateCodeSimulatorArm64();
virtual ~CodeSimulatorArm64();
- void RunFrom(intptr_t code_buffer) OVERRIDE;
+ void RunFrom(intptr_t code_buffer) override;
- bool GetCReturnBool() const OVERRIDE;
- int32_t GetCReturnInt32() const OVERRIDE;
- int64_t GetCReturnInt64() const OVERRIDE;
+ bool GetCReturnBool() const override;
+ int32_t GetCReturnInt32() const override;
+ int64_t GetCReturnInt64() const override;
private:
CodeSimulatorArm64();
diff --git a/test/167-visit-locks/visit_locks.cc b/test/167-visit-locks/visit_locks.cc
index e79c880639..8955f5a08e 100644
--- a/test/167-visit-locks/visit_locks.cc
+++ b/test/167-visit-locks/visit_locks.cc
@@ -42,7 +42,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testVisitLocks(JNIEnv*, jclass) {
: StackVisitor(thread, context, StackWalkKind::kIncludeInlinedFrames) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
// Ignore runtime methods.
diff --git a/test/1945-proxy-method-arguments/get_args.cc b/test/1945-proxy-method-arguments/get_args.cc
index 211ae10ab0..859e229d9e 100644
--- a/test/1945-proxy-method-arguments/get_args.cc
+++ b/test/1945-proxy-method-arguments/get_args.cc
@@ -27,7 +27,7 @@ namespace art {
namespace {
// Visit a proxy method Quick frame at a given depth.
-class GetProxyQuickFrameVisitor FINAL : public StackVisitor {
+class GetProxyQuickFrameVisitor final : public StackVisitor {
public:
GetProxyQuickFrameVisitor(art::Thread* target, art::Context* ctx, size_t frame_depth)
REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -36,7 +36,7 @@ class GetProxyQuickFrameVisitor FINAL : public StackVisitor {
frame_depth_(frame_depth),
quick_frame_(nullptr) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true;
}
diff --git a/test/203-multi-checkpoint/multi_checkpoint.cc b/test/203-multi-checkpoint/multi_checkpoint.cc
index 0799b6ed2d..424e9f1a96 100644
--- a/test/203-multi-checkpoint/multi_checkpoint.cc
+++ b/test/203-multi-checkpoint/multi_checkpoint.cc
@@ -28,7 +28,7 @@ struct TestClosure : public Closure {
bool second_run;
bool second_run_interleaved;
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
if (!first_run_start) {
CHECK(!second_run);
@@ -62,7 +62,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_checkCheckpointsRun(JNIEnv*, jclass)
}
struct SetupClosure : public Closure {
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
ScopedObjectAccess soa(self);
MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
index 093a93f349..93bb148745 100644
--- a/test/305-other-fault-handler/fault_handler.cc
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -29,7 +29,7 @@
namespace art {
-class TestFaultHandler FINAL : public FaultHandler {
+class TestFaultHandler final : public FaultHandler {
public:
explicit TestFaultHandler(FaultManager* manager)
: FaultHandler(manager),
@@ -51,7 +51,7 @@ class TestFaultHandler FINAL : public FaultHandler {
manager_->RemoveHandler(this);
}
- bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
CHECK_EQ(sig, SIGSEGV);
CHECK_EQ(reinterpret_cast<uint32_t*>(siginfo->si_addr),
GetTargetPointer()) << "Segfault on unexpected address!";
diff --git a/test/616-cha-unloading/cha_unload.cc b/test/616-cha-unloading/cha_unload.cc
index b17be6bd07..b5166ce1a7 100644
--- a/test/616-cha-unloading/cha_unload.cc
+++ b/test/616-cha-unloading/cha_unload.cc
@@ -35,7 +35,7 @@ class FindPointerAllocatorVisitor : public AllocatorVisitor {
explicit FindPointerAllocatorVisitor(void* ptr) : is_found(false), ptr_(ptr) {}
bool Visit(LinearAlloc* alloc)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
is_found = alloc->Contains(ptr_);
return !is_found;
}
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 57c0274557..2a06a7b9d6 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -87,7 +87,7 @@ extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateThroughHeapCount(
jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
jlong size ATTRIBUTE_UNUSED,
jlong* tag_ptr ATTRIBUTE_UNUSED,
- jint length ATTRIBUTE_UNUSED) OVERRIDE {
+ jint length ATTRIBUTE_UNUSED) override {
counter++;
if (counter == stop_after) {
return JVMTI_VISIT_ABORT;
@@ -120,7 +120,7 @@ extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateThroughHeapData(
jintArray lengths) {
class DataIterationConfig : public IterationConfig {
public:
- jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) OVERRIDE {
+ jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) override {
class_tags_.push_back(class_tag);
sizes_.push_back(size);
tags_.push_back(*tag_ptr);
@@ -164,7 +164,7 @@ extern "C" JNIEXPORT void JNICALL Java_art_Test906_iterateThroughHeapAdd(
jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
jlong size ATTRIBUTE_UNUSED,
jlong* tag_ptr,
- jint length ATTRIBUTE_UNUSED) OVERRIDE {
+ jint length ATTRIBUTE_UNUSED) override {
jlong current_tag = *tag_ptr;
if (current_tag != 0) {
*tag_ptr = current_tag + 10;
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index b07554ca46..b0e0f07db8 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -41,8 +41,6 @@ namespace Test913Heaps {
using android::base::StringPrintf;
-#define FINAL final
-#define OVERRIDE override
#define UNREACHABLE __builtin_unreachable
extern "C" JNIEXPORT void JNICALL Java_art_Test913_forceGarbageCollection(
@@ -144,7 +142,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
jint stop_after,
jint follow_set,
jobject jniRef) {
- class PrintIterationConfig FINAL : public IterationConfig {
+ class PrintIterationConfig final : public IterationConfig {
public:
PrintIterationConfig(jint _stop_after, jint _follow_set)
: counter_(0),
@@ -160,7 +158,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
jlong* tag_ptr,
jlong* referrer_tag_ptr,
jint length,
- void* user_data ATTRIBUTE_UNUSED) OVERRIDE {
+ void* user_data ATTRIBUTE_UNUSED) override {
jlong tag = *tag_ptr;
// Ignore any jni-global roots with untagged classes. These can be from the environment,
@@ -303,7 +301,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
}
protected:
- std::string PrintArrowType() const OVERRIDE {
+ std::string PrintArrowType() const override {
char* name = nullptr;
if (info_.jni_local.method != nullptr) {
jvmti_env->GetMethodName(info_.jni_local.method, &name, nullptr, nullptr);
@@ -349,7 +347,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
}
protected:
- std::string PrintArrowType() const OVERRIDE {
+ std::string PrintArrowType() const override {
char* name = nullptr;
if (info_.stack_local.method != nullptr) {
jvmti_env->GetMethodName(info_.stack_local.method, &name, nullptr, nullptr);
@@ -391,7 +389,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
: Elem(referrer, referree, size, length), string_(string) {}
protected:
- std::string PrintArrowType() const OVERRIDE {
+ std::string PrintArrowType() const override {
return string_;
}
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 192274e5ae..97a589fa48 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -77,7 +77,7 @@ struct MethodIsInterpretedVisitor : public StackVisitor {
prev_was_runtime_(true),
require_deoptable_(require_deoptable) {}
- virtual bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ virtual bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (goal_ == GetMethod()) {
method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
method_found_ = true;
diff --git a/test/ti-agent/ti_macros.h b/test/ti-agent/ti_macros.h
index d91338324f..a871270dcf 100644
--- a/test/ti-agent/ti_macros.h
+++ b/test/ti-agent/ti_macros.h
@@ -19,8 +19,6 @@
#include "android-base/macros.h"
-#define FINAL final
-#define OVERRIDE override
#define UNREACHABLE __builtin_unreachable
#endif // ART_TEST_TI_AGENT_TI_MACROS_H_
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
index fc62410889..500c1c5133 100644
--- a/tools/art_verifier/art_verifier.cc
+++ b/tools/art_verifier/art_verifier.cc
@@ -93,7 +93,7 @@ struct MethodVerifierArgs : public CmdlineArgs {
using Base = CmdlineArgs;
virtual ParseStatus ParseCustom(const StringPiece& option,
- std::string* error_msg) OVERRIDE {
+ std::string* error_msg) override {
{
ParseStatus base_parse = Base::ParseCustom(option, error_msg);
if (base_parse != kParseUnknownArgument) {
@@ -119,7 +119,7 @@ struct MethodVerifierArgs : public CmdlineArgs {
return kParseOk;
}
- virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+ virtual ParseStatus ParseChecks(std::string* error_msg) override {
// Perform the parent checks.
ParseStatus parent_checks = Base::ParseChecks(error_msg);
if (parent_checks != kParseOk) {
@@ -166,16 +166,16 @@ struct MethodVerifierArgs : public CmdlineArgs {
};
struct MethodVerifierMain : public CmdlineMain<MethodVerifierArgs> {
- bool NeedsRuntime() OVERRIDE {
+ bool NeedsRuntime() override {
return true;
}
- bool ExecuteWithoutRuntime() OVERRIDE {
+ bool ExecuteWithoutRuntime() override {
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
- bool ExecuteWithRuntime(Runtime* runtime) OVERRIDE {
+ bool ExecuteWithRuntime(Runtime* runtime) override {
CHECK(args_ != nullptr);
const size_t dex_reps = args_->dex_file_verifier_
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 3542d959ba..55d2f44e99 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -65,8 +65,8 @@ class Experiment {
// Analyze debug info sizes.
class AnalyzeDebugInfo : public Experiment {
public:
- void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
- void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
private:
int64_t total_bytes_ = 0u;
@@ -91,8 +91,8 @@ class AnalyzeDebugInfo : public Experiment {
// Count numbers of dex indices.
class CountDexIndices : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
- void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
+ void ProcessDexFile(const DexFile& dex_file) override;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
void Dump(std::ostream& os, uint64_t total_size) const;
@@ -162,9 +162,9 @@ class CountDexIndices : public Experiment {
// Measure various code metrics including args per invoke-virtual, fill/spill move patterns.
class CodeMetrics : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
+ void ProcessDexFile(const DexFile& dex_file) override;
- void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
private:
static constexpr size_t kMaxArgCount = 6;
diff --git a/tools/dexanalyze/dexanalyze_strings.h b/tools/dexanalyze/dexanalyze_strings.h
index a5c202e31f..3559afaff7 100644
--- a/tools/dexanalyze/dexanalyze_strings.h
+++ b/tools/dexanalyze/dexanalyze_strings.h
@@ -32,8 +32,8 @@ namespace dexanalyze {
// Analyze string data and strings accessed from code.
class AnalyzeStrings : public Experiment {
public:
- void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
- void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
private:
int64_t wide_string_bytes_ = 0u;
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index bf8a1b755e..6d9b6fbe40 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -235,7 +235,7 @@ class DexMember {
const bool is_method_;
};
-class ClassPath FINAL {
+class ClassPath final {
public:
ClassPath(const std::vector<std::string>& dex_paths, bool open_writable) {
OpenDexFiles(dex_paths, open_writable);
@@ -316,7 +316,7 @@ class ClassPath FINAL {
std::vector<std::unique_ptr<const DexFile>> dex_files_;
};
-class HierarchyClass FINAL {
+class HierarchyClass final {
public:
HierarchyClass() {}
@@ -455,7 +455,7 @@ class HierarchyClass FINAL {
std::vector<HierarchyClass*> extended_by_;
};
-class Hierarchy FINAL {
+class Hierarchy final {
public:
explicit Hierarchy(ClassPath& classpath) : classpath_(classpath) {
BuildClassHierarchy();
@@ -559,7 +559,7 @@ class Hierarchy FINAL {
std::map<std::string, HierarchyClass> classes_;
};
-class HiddenApi FINAL {
+class HiddenApi final {
public:
HiddenApi() {}
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index ed6ac3d199..4ea5b2ddd9 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -38,7 +38,7 @@ static constexpr const char* kTracerInstrumentationKey = "tracefast_TRAMPOLINE";
static constexpr bool kNeedsInterpreter = false;
#endif // TRACEFAST_INITERPRETER
-class Tracer FINAL : public art::instrumentation::InstrumentationListener {
+class Tracer final : public art::instrumentation::InstrumentationListener {
public:
Tracer() {}
@@ -46,40 +46,40 @@ class Tracer FINAL : public art::instrumentation::InstrumentationListener {
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const art::JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void DexPcMoved(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void FieldRead(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -87,7 +87,7 @@ class Tracer FINAL : public art::instrumentation::InstrumentationListener {
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtField* field ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -95,32 +95,32 @@ class Tracer FINAL : public art::instrumentation::InstrumentationListener {
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtField* field ATTRIBUTE_UNUSED,
const art::JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void ExceptionThrown(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void ExceptionHandled(art::Thread* self ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Throwable> throwable ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void Branch(art::Thread* thread ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtMethod* callee ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
private:
DISALLOW_COPY_AND_ASSIGN(Tracer);
@@ -149,7 +149,7 @@ class TraceFastPhaseCB : public art::RuntimePhaseCallback {
TraceFastPhaseCB() {}
void NextRuntimePhase(art::RuntimePhaseCallback::RuntimePhase phase)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (phase == art::RuntimePhaseCallback::RuntimePhase::kInit) {
art::ScopedThreadSuspension sts(art::Thread::Current(),
art::ThreadState::kWaitingForMethodTracingStart);
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 9c86024711..865b9df03d 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -192,8 +192,8 @@ class FlowAnalysisCollector : public VeriFlowAnalysis {
return uses_;
}
- RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
- void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+ RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+ void AnalyzeFieldSet(const Instruction& instruction) override;
private:
// List of reflection uses found, concrete and abstract.
@@ -212,8 +212,8 @@ class FlowAnalysisSubstitutor : public VeriFlowAnalysis {
return uses_;
}
- RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
- void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+ RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+ void AnalyzeFieldSet(const Instruction& instruction) override;
private:
// List of reflection uses found, concrete and abstract.