summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk172
-rw-r--r--adbconnection/adbconnection.h6
-rw-r--r--build/Android.common.mk3
-rw-r--r--build/Android.gtest.mk13
-rw-r--r--build/art.go24
-rw-r--r--cmdline/cmdline_parser_test.cc5
-rw-r--r--compiler/common_compiler_test.cc19
-rw-r--r--compiler/common_compiler_test.h8
-rw-r--r--compiler/compiled_method.h2
-rw-r--r--compiler/debug/dwarf/debug_abbrev_writer.h2
-rw-r--r--compiler/debug/dwarf/debug_info_entry_writer.h2
-rw-r--r--compiler/debug/dwarf/debug_line_opcode_writer.h2
-rw-r--r--compiler/dex/quick_compiler_callbacks.h20
-rw-r--r--compiler/driver/compiler_driver.cc22
-rw-r--r--compiler/driver/compiler_driver_test.cc6
-rw-r--r--compiler/driver/compiler_options.h2
-rw-r--r--compiler/jni/jni_compiler_test.cc4
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.h56
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.h54
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.h56
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.h54
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.h54
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.h54
-rw-r--r--compiler/linker/buffered_output_stream.h10
-rw-r--r--compiler/linker/elf_builder.h18
-rw-r--r--compiler/linker/error_delaying_output_stream.h8
-rw-r--r--compiler/linker/file_output_stream.h10
-rw-r--r--compiler/linker/output_stream_test.cc8
-rw-r--r--compiler/linker/vector_output_stream.h10
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc32
-rw-r--r--compiler/optimizing/bounds_check_elimination.h2
-rw-r--r--compiler/optimizing/cha_guard_optimization.cc4
-rw-r--r--compiler/optimizing/cha_guard_optimization.h2
-rw-r--r--compiler/optimizing/code_generator.cc31
-rw-r--r--compiler/optimizing/code_generator_arm64.cc54
-rw-r--r--compiler/optimizing/code_generator_arm64.h118
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc52
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h116
-rw-r--r--compiler/optimizing/code_generator_mips.cc60
-rw-r--r--compiler/optimizing/code_generator_mips.h110
-rw-r--r--compiler/optimizing/code_generator_mips64.cc60
-rw-r--r--compiler/optimizing/code_generator_mips64.h108
-rw-r--r--compiler/optimizing/code_generator_x86.cc66
-rw-r--r--compiler/optimizing/code_generator_x86.h108
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc66
-rw-r--r--compiler/optimizing/code_generator_x86_64.h106
-rw-r--r--compiler/optimizing/code_sinking.h2
-rw-r--r--compiler/optimizing/codegen_test.cc2
-rw-r--r--compiler/optimizing/codegen_test_utils.h12
-rw-r--r--compiler/optimizing/constant_folding.cc46
-rw-r--r--compiler/optimizing/constant_folding.h2
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc36
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.h2
-rw-r--r--compiler/optimizing/dead_code_elimination.h2
-rw-r--r--compiler/optimizing/emit_swap_mips_test.cc4
-rw-r--r--compiler/optimizing/graph_checker.h48
-rw-r--r--compiler/optimizing/graph_visualizer.cc84
-rw-r--r--compiler/optimizing/gvn.h2
-rw-r--r--compiler/optimizing/induction_var_analysis.h2
-rw-r--r--compiler/optimizing/inliner.h2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc76
-rw-r--r--compiler/optimizing/instruction_simplifier.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc20
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc26
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.h2
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc12
-rw-r--r--compiler/optimizing/intrinsics_arm64.h8
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc12
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.h8
-rw-r--r--compiler/optimizing/intrinsics_mips.cc4
-rw-r--r--compiler/optimizing/intrinsics_mips.h8
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc4
-rw-r--r--compiler/optimizing/intrinsics_mips64.h8
-rw-r--r--compiler/optimizing/intrinsics_utils.h4
-rw-r--r--compiler/optimizing/intrinsics_x86.cc4
-rw-r--r--compiler/optimizing/intrinsics_x86.h8
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc4
-rw-r--r--compiler/optimizing/intrinsics_x86_64.h8
-rw-r--r--compiler/optimizing/licm.h2
-rw-r--r--compiler/optimizing/live_ranges_test.cc2
-rw-r--r--compiler/optimizing/liveness_test.cc2
-rw-r--r--compiler/optimizing/load_store_analysis.h22
-rw-r--r--compiler/optimizing/load_store_elimination.cc36
-rw-r--r--compiler/optimizing/load_store_elimination.h2
-rw-r--r--compiler/optimizing/loop_analysis.cc12
-rw-r--r--compiler/optimizing/loop_optimization.h2
-rw-r--r--compiler/optimizing/nodes.h917
-rw-r--r--compiler/optimizing/nodes_mips.h14
-rw-r--r--compiler/optimizing/nodes_shared.h36
-rw-r--r--compiler/optimizing/nodes_vector.h124
-rw-r--r--compiler/optimizing/nodes_x86.h12
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc24
-rw-r--r--compiler/optimizing/parallel_move_resolver.h4
-rw-r--r--compiler/optimizing/parallel_move_test.cc18
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc8
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.h2
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc38
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.h2
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc16
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h35
-rw-r--r--compiler/optimizing/pretty_printer.h14
-rw-r--r--compiler/optimizing/reference_type_propagation.cc40
-rw-r--r--compiler/optimizing/reference_type_propagation.h2
-rw-r--r--compiler/optimizing/register_allocator_graph_color.h4
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc2
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.h6
-rw-r--r--compiler/optimizing/register_allocator_test.cc2
-rw-r--r--compiler/optimizing/scheduler.cc31
-rw-r--r--compiler/optimizing/scheduler.h10
-rw-r--r--compiler/optimizing/scheduler_arm.h6
-rw-r--r--compiler/optimizing/scheduler_arm64.h8
-rw-r--r--compiler/optimizing/scheduler_test.cc18
-rw-r--r--compiler/optimizing/select_generator.h2
-rw-r--r--compiler/optimizing/sharpening.h2
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc71
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h26
-rw-r--r--compiler/optimizing/ssa_liveness_analysis_test.cc2
-rw-r--r--compiler/optimizing/ssa_phi_elimination.h4
-rw-r--r--compiler/optimizing/ssa_test.cc8
-rw-r--r--compiler/optimizing/x86_memory_gen.cc2
-rw-r--r--compiler/optimizing/x86_memory_gen.h2
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.h16
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h94
-rw-r--r--compiler/utils/arm64/assembler_arm64.h12
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h96
-rw-r--r--compiler/utils/assembler.h4
-rw-r--r--compiler/utils/assembler_test.h4
-rw-r--r--compiler/utils/jni_macro_assembler.h10
-rw-r--r--compiler/utils/jni_macro_assembler_test.h4
-rw-r--r--compiler/utils/mips/assembler_mips.h102
-rw-r--r--compiler/utils/mips/assembler_mips32r5_test.cc24
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc26
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc18
-rw-r--r--compiler/utils/mips64/assembler_mips64.h100
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc26
-rw-r--r--compiler/utils/swap_space.cc1
-rw-r--r--compiler/utils/x86/assembler_x86.h6
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc18
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc4
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h94
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h6
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc24
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc4
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h92
-rw-r--r--compiler/verifier_deps_test.cc8
-rw-r--r--dex2oat/dex2oat.cc39
-rw-r--r--dex2oat/dex2oat_image_test.cc2
-rw-r--r--dex2oat/dex2oat_test.cc106
-rw-r--r--dex2oat/linker/arm/relative_patcher_arm_base.h8
-rw-r--r--dex2oat/linker/arm/relative_patcher_thumb2.h12
-rw-r--r--dex2oat/linker/arm64/relative_patcher_arm64.h18
-rw-r--r--dex2oat/linker/elf_writer_quick.cc36
-rw-r--r--dex2oat/linker/elf_writer_test.cc45
-rw-r--r--dex2oat/linker/image_test.h10
-rw-r--r--dex2oat/linker/image_writer.cc67
-rw-r--r--dex2oat/linker/image_writer.h6
-rw-r--r--dex2oat/linker/mips/relative_patcher_mips.h16
-rw-r--r--dex2oat/linker/mips64/relative_patcher_mips64.h16
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher.h6
-rw-r--r--dex2oat/linker/multi_oat_relative_patcher_test.cc10
-rw-r--r--dex2oat/linker/oat_writer.cc67
-rw-r--r--dex2oat/linker/oat_writer.h4
-rw-r--r--dex2oat/linker/oat_writer_test.cc54
-rw-r--r--dex2oat/linker/relative_patcher.cc14
-rw-r--r--dex2oat/linker/relative_patcher_test.h10
-rw-r--r--dex2oat/linker/x86/relative_patcher_x86.h6
-rw-r--r--dex2oat/linker/x86/relative_patcher_x86_base.h10
-rw-r--r--dex2oat/linker/x86_64/relative_patcher_x86_64.h6
-rw-r--r--dexdump/dexdump.cc17
-rw-r--r--dexlayout/compact_dex_writer.h20
-rw-r--r--dexlayout/dex_container.h8
-rw-r--r--dexlayout/dex_ir.h38
-rw-r--r--dexlayout/dex_ir_builder.cc2
-rw-r--r--dexlayout/dex_writer.h6
-rw-r--r--dexlayout/dexdiag_test.cc10
-rw-r--r--dexlayout/dexlayout.cc57
-rw-r--r--dexlayout/dexlayout_main.cc6
-rw-r--r--dexlayout/dexlayout_test.cc16
-rw-r--r--dexoptanalyzer/dexoptanalyzer.cc2
-rw-r--r--disassembler/disassembler_arm.cc14
-rw-r--r--disassembler/disassembler_arm.h6
-rw-r--r--disassembler/disassembler_arm64.h14
-rw-r--r--disassembler/disassembler_mips.h6
-rw-r--r--disassembler/disassembler_x86.h6
-rw-r--r--imgdiag/imgdiag.cc13
-rw-r--r--imgdiag/imgdiag_test.cc2
-rw-r--r--libartbase/base/allocator.cc4
-rw-r--r--libartbase/base/arena_bit_vector.cc2
-rw-r--r--libartbase/base/bit_memory_region.h2
-rw-r--r--libartbase/base/common_art_test.cc3
-rw-r--r--libartbase/base/common_art_test.h4
-rw-r--r--libartbase/base/dumpable.h2
-rw-r--r--libartbase/base/globals.h8
-rw-r--r--libartbase/base/hash_set.h3
-rw-r--r--libartbase/base/indenter.h4
-rw-r--r--libartbase/base/leb128.h2
-rw-r--r--libartbase/base/macros.h3
-rw-r--r--libartbase/base/malloc_arena_pool.cc2
-rw-r--r--libartbase/base/malloc_arena_pool.h14
-rw-r--r--libartbase/base/mem_map.cc451
-rw-r--r--libartbase/base/mem_map.h198
-rw-r--r--libartbase/base/mem_map_test.cc833
-rw-r--r--libartbase/base/memory_region.h2
-rw-r--r--libartbase/base/unix_file/fd_file.h12
-rw-r--r--libartbase/base/zip_archive.cc75
-rw-r--r--libartbase/base/zip_archive.h15
-rw-r--r--libdexfile/dex/art_dex_file_loader.cc87
-rw-r--r--libdexfile/dex/art_dex_file_loader.h6
-rw-r--r--libdexfile/dex/art_dex_file_loader_test.cc2
-rw-r--r--libdexfile/dex/compact_dex_file.h12
-rw-r--r--libdexfile/dex/dex_file_loader.cc10
-rw-r--r--libdexfile/dex/dex_instruction.h12
-rw-r--r--libdexfile/dex/standard_dex_file.h10
-rw-r--r--libprofile/profile/profile_compilation_info.cc16
-rw-r--r--libprofile/profile/profile_compilation_info.h8
-rw-r--r--libprofile/profile/profile_compilation_info_test.cc2
-rw-r--r--oatdump/oatdump.cc63
-rw-r--r--openjdkjvmti/deopt_manager.h6
-rw-r--r--openjdkjvmti/events-inl.h2
-rw-r--r--openjdkjvmti/events.cc50
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc5
-rw-r--r--openjdkjvmti/object_tagging.h10
-rw-r--r--openjdkjvmti/ti_class.cc20
-rw-r--r--openjdkjvmti/ti_class_definition.cc65
-rw-r--r--openjdkjvmti/ti_class_definition.h20
-rw-r--r--openjdkjvmti/ti_dump.cc2
-rw-r--r--openjdkjvmti/ti_heap.cc10
-rw-r--r--openjdkjvmti/ti_method.cc14
-rw-r--r--openjdkjvmti/ti_phase.cc2
-rw-r--r--openjdkjvmti/ti_redefine.cc45
-rw-r--r--openjdkjvmti/ti_redefine.h6
-rw-r--r--openjdkjvmti/ti_search.cc2
-rw-r--r--openjdkjvmti/ti_stack.cc16
-rw-r--r--openjdkjvmti/ti_thread.cc4
-rw-r--r--openjdkjvmti/transform.cc4
-rw-r--r--patchoat/patchoat.cc50
-rw-r--r--patchoat/patchoat.h6
-rw-r--r--profman/profile_assistant_test.cc2
-rw-r--r--profman/profman.cc2
-rw-r--r--runtime/aot_class_linker.h4
-rw-r--r--runtime/arch/arch_test.cc4
-rw-r--r--runtime/arch/arm/context_arm.h30
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.h14
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S21
-rw-r--r--runtime/arch/arm64/context_arm64.cc8
-rw-r--r--runtime/arch/arm64/context_arm64.h30
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.h12
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S121
-rw-r--r--runtime/arch/mips/context_mips.h28
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.h12
-rw-r--r--runtime/arch/mips64/context_mips64.h28
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.h12
-rw-r--r--runtime/arch/stub_test.cc4
-rw-r--r--runtime/arch/x86/context_x86.h30
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.h14
-rw-r--r--runtime/arch/x86_64/context_x86_64.h30
-rw-r--r--runtime/arch/x86_64/instruction_set_features_x86_64.h6
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/base/mem_map_arena_pool.cc37
-rw-r--r--runtime/base/mem_map_arena_pool.h14
-rw-r--r--runtime/base/mutex.cc14
-rw-r--r--runtime/base/mutex.h16
-rw-r--r--runtime/cha.cc82
-rw-r--r--runtime/class_linker.cc24
-rw-r--r--runtime/class_linker_test.cc4
-rw-r--r--runtime/class_root.h7
-rw-r--r--runtime/common_runtime_test.h4
-rw-r--r--runtime/compiler_filter.h2
-rw-r--r--runtime/debugger.cc34
-rw-r--r--runtime/debugger.h22
-rw-r--r--runtime/dex2oat_environment_test.h6
-rw-r--r--runtime/dexopt_test.cc69
-rw-r--r--runtime/dexopt_test.h8
-rw-r--r--runtime/elf_file.cc218
-rw-r--r--runtime/elf_file.h14
-rw-r--r--runtime/elf_file_impl.h32
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc36
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc4
-rw-r--r--runtime/fault_handler.h16
-rw-r--r--runtime/gc/accounting/atomic_stack.h18
-rw-r--r--runtime/gc/accounting/bitmap.cc46
-rw-r--r--runtime/gc/accounting/bitmap.h26
-rw-r--r--runtime/gc/accounting/card_table-inl.h12
-rw-r--r--runtime/gc/accounting/card_table.cc25
-rw-r--r--runtime/gc/accounting/card_table.h12
-rw-r--r--runtime/gc/accounting/mod_union_table-inl.h2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc4
-rw-r--r--runtime/gc/accounting/mod_union_table.h32
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc8
-rw-r--r--runtime/gc/accounting/read_barrier_table.h27
-rw-r--r--runtime/gc/accounting/space_bitmap.cc28
-rw-r--r--runtime/gc/accounting/space_bitmap.h12
-rw-r--r--runtime/gc/allocation_record.cc2
-rw-r--r--runtime/gc/allocator/rosalloc.cc17
-rw-r--r--runtime/gc/allocator/rosalloc.h5
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h40
-rw-r--r--runtime/gc/collector/concurrent_copying.cc431
-rw-r--r--runtime/gc/collector/concurrent_copying.h67
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc83
-rw-r--r--runtime/gc/collector/mark_sweep.cc32
-rw-r--r--runtime/gc/collector/mark_sweep.h34
-rw-r--r--runtime/gc/collector/partial_mark_sweep.h4
-rw-r--r--runtime/gc/collector/semi_space.h25
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h13
-rw-r--r--runtime/gc/heap.cc346
-rw-r--r--runtime/gc/heap.h21
-rw-r--r--runtime/gc/heap_test.cc19
-rw-r--r--runtime/gc/space/bump_pointer_space.cc33
-rw-r--r--runtime/gc/space/bump_pointer_space.h32
-rw-r--r--runtime/gc/space/dlmalloc_space.cc113
-rw-r--r--runtime/gc/space/dlmalloc_space.h86
-rw-r--r--runtime/gc/space/image_space.cc506
-rw-r--r--runtime/gc/space/image_space.h15
-rw-r--r--runtime/gc/space/image_space_test.cc56
-rw-r--r--runtime/gc/space/large_object_space.cc77
-rw-r--r--runtime/gc/space/large_object_space.h52
-rw-r--r--runtime/gc/space/malloc_space.cc63
-rw-r--r--runtime/gc/space/malloc_space.h36
-rw-r--r--runtime/gc/space/memory_tool_malloc_space-inl.h4
-rw-r--r--runtime/gc/space/memory_tool_malloc_space.h20
-rw-r--r--runtime/gc/space/region_space-inl.h10
-rw-r--r--runtime/gc/space/region_space.cc215
-rw-r--r--runtime/gc/space/region_space.h100
-rw-r--r--runtime/gc/space/rosalloc_space.cc135
-rw-r--r--runtime/gc/space/rosalloc_space.h74
-rw-r--r--runtime/gc/space/space.h36
-rw-r--r--runtime/gc/space/zygote_space.cc17
-rw-r--r--runtime/gc/space/zygote_space.h29
-rw-r--r--runtime/gc/system_weak.h6
-rw-r--r--runtime/gc/system_weak_test.cc9
-rw-r--r--runtime/gc/task_processor_test.cc6
-rw-r--r--runtime/gc/verification.cc4
-rw-r--r--runtime/gc_root.h4
-rw-r--r--runtime/generated/asm_support_gen.h4
-rw-r--r--runtime/handle_scope.h2
-rw-r--r--runtime/hidden_api_test.cc2
-rw-r--r--runtime/hprof/hprof.cc24
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/indirect_reference_table.cc37
-rw-r--r--runtime/indirect_reference_table.h5
-rw-r--r--runtime/instrumentation.cc10
-rw-r--r--runtime/instrumentation_test.cc28
-rw-r--r--runtime/intern_table_test.cc2
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc4
-rw-r--r--runtime/interpreter/mterp/arm/instruction_end.S2
-rw-r--r--runtime/interpreter/mterp/arm/instruction_end_alt.S2
-rw-r--r--runtime/interpreter/mterp/arm/instruction_end_sister.S2
-rw-r--r--runtime/interpreter/mterp/arm/instruction_start.S2
-rw-r--r--runtime/interpreter/mterp/arm/instruction_start_alt.S2
-rw-r--r--runtime/interpreter/mterp/arm/instruction_start_sister.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iput.S6
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_char.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_object.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_short.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_wide.S4
-rw-r--r--runtime/interpreter/mterp/arm/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/arm/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_object.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_wide.S4
-rw-r--r--runtime/interpreter/mterp/arm64/instruction_end.S2
-rw-r--r--runtime/interpreter/mterp/arm64/instruction_end_alt.S2
-rw-r--r--runtime/interpreter/mterp/arm64/instruction_end_sister.S2
-rw-r--r--runtime/interpreter/mterp/arm64/instruction_start.S2
-rw-r--r--runtime/interpreter/mterp/arm64/instruction_start_alt.S2
-rw-r--r--runtime/interpreter/mterp/arm64/instruction_start_sister.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput.S6
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_char.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_object.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_short.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_wide.S4
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_object.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iput.S6
-rw-r--r--runtime/interpreter/mterp/mips/op_iput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iput_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iput_char.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iput_object.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iput_short.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_iput_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_object.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/mips/op_sput_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iget_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips64/op_iput.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iput_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iput_char.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iput_object.S4
-rw-r--r--runtime/interpreter/mterp/mips64/op_iput_short.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_iput_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_object.S4
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/mips64/op_sput_wide.S4
-rw-r--r--runtime/interpreter/mterp/mterp.cc202
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S96
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S94
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S84
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S112
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S108
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86_64.S108
-rw-r--r--runtime/interpreter/mterp/x86/header.S2
-rw-r--r--runtime/interpreter/mterp/x86/instruction_end.S2
-rw-r--r--runtime/interpreter/mterp/x86/instruction_end_alt.S2
-rw-r--r--runtime/interpreter/mterp/x86/instruction_end_sister.S2
-rw-r--r--runtime/interpreter/mterp/x86/instruction_start.S2
-rw-r--r--runtime/interpreter/mterp/x86/instruction_start_alt.S2
-rw-r--r--runtime/interpreter/mterp/x86/instruction_start_sister.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_char.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_short.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86_64/header.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/instruction_end.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/instruction_end_alt.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/instruction_end_sister.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/instruction_start.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/instruction_start_alt.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/instruction_start_sister.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iget_wide.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iput.S6
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iput_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iput_char.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iput_object.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iput_short.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_iput_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_char.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_object.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_short.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sget_wide.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_boolean.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_byte.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_char.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_object.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_short.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/op_sput_wide.S4
-rw-r--r--runtime/interpreter/shadow_frame.h8
-rw-r--r--runtime/interpreter/unstarted_runtime.cc29
-rw-r--r--runtime/java_frame_root_info.h4
-rw-r--r--runtime/jit/jit.cc8
-rw-r--r--runtime/jit/jit_code_cache.cc190
-rw-r--r--runtime/jit/jit_code_cache.h16
-rw-r--r--runtime/jit/profile_saver.cc2
-rw-r--r--runtime/jit/profiling_info_test.cc2
-rw-r--r--runtime/jni/java_vm_ext_test.cc4
-rw-r--r--runtime/jni/jni_internal_test.cc2
-rw-r--r--runtime/mirror/class-inl.h2
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/dex_cache.h6
-rw-r--r--runtime/mirror/dex_cache_test.cc2
-rw-r--r--runtime/mirror/iftable.h2
-rw-r--r--runtime/mirror/proxy.h2
-rw-r--r--runtime/mirror/stack_trace_element.h2
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/mirror/var_handle.cc26
-rw-r--r--runtime/monitor.cc12
-rw-r--r--runtime/monitor_objects_stack_visitor.h2
-rw-r--r--runtime/monitor_test.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc40
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc4
-rw-r--r--runtime/noop_compiler_callbacks.h10
-rw-r--r--runtime/oat_file.cc254
-rw-r--r--runtime/oat_file.h14
-rw-r--r--runtime/oat_file_assistant.cc346
-rw-r--r--runtime/oat_file_assistant.h83
-rw-r--r--runtime/oat_file_assistant_test.cc469
-rw-r--r--runtime/oat_file_manager.cc46
-rw-r--r--runtime/oat_file_test.cc28
-rw-r--r--runtime/parsed_options.cc12
-rw-r--r--runtime/proxy_test.cc2
-rw-r--r--runtime/quick_exception_handler.cc12
-rw-r--r--runtime/runtime.cc25
-rw-r--r--runtime/runtime.h9
-rw-r--r--runtime/runtime_callbacks_test.cc71
-rw-r--r--runtime/runtime_options.def5
-rw-r--r--runtime/stack.cc6
-rw-r--r--runtime/thread.cc44
-rw-r--r--runtime/thread_list.cc7
-rw-r--r--runtime/thread_pool.cc16
-rw-r--r--runtime/thread_pool.h6
-rw-r--r--runtime/trace.h24
-rw-r--r--runtime/transaction.h2
-rw-r--r--runtime/vdex_file.cc19
-rw-r--r--runtime/vdex_file.h12
-rw-r--r--runtime/verifier/instruction_flags.h2
-rw-r--r--runtime/verifier/reg_type.h280
-rw-r--r--runtime/verifier/reg_type_test.cc2
-rw-r--r--simulator/code_simulator_arm64.h8
-rwxr-xr-xtest/071-dexfile-map-clean/run10
-rw-r--r--test/071-dexfile-map-clean/src/Main.java13
-rw-r--r--test/1000-non-moving-space-stress/expected.txt1
-rw-r--r--test/1000-non-moving-space-stress/info.txt5
-rw-r--r--test/1000-non-moving-space-stress/src-art/Main.java53
-rw-r--r--test/116-nodex2oat/expected.txt9
-rwxr-xr-xtest/116-nodex2oat/run16
-rw-r--r--test/116-nodex2oat/src/Main.java11
-rw-r--r--test/117-nopatchoat/expected.txt11
-rwxr-xr-xtest/117-nopatchoat/run16
-rw-r--r--test/117-nopatchoat/src/Main.java11
-rw-r--r--test/118-noimage-dex2oat/run4
-rwxr-xr-xtest/134-nodex2oat-nofallback/run2
-rw-r--r--test/134-nodex2oat-nofallback/src/Main.java11
-rwxr-xr-xtest/138-duplicate-classes-check2/run19
-rwxr-xr-xtest/147-stripped-dex-fallback/run2
-rw-r--r--test/167-visit-locks/visit_locks.cc2
-rw-r--r--test/1945-proxy-method-arguments/get_args.cc4
-rw-r--r--test/203-multi-checkpoint/multi_checkpoint.cc4
-rw-r--r--test/305-other-fault-handler/fault_handler.cc21
-rw-r--r--test/565-checker-condition-liveness/src/Main.java110
-rw-r--r--test/616-cha-unloading/cha_unload.cc2
-rw-r--r--test/638-checker-inline-cache-intrinsic/run2
-rwxr-xr-xtest/652-deopt-intrinsic/run6
-rwxr-xr-xtest/667-jit-jni-stub/run2
-rw-r--r--test/667-jit-jni-stub/src/Main.java2
-rw-r--r--test/677-fsi/expected.txt1
-rw-r--r--test/677-fsi2/expected.txt3
-rw-r--r--test/677-fsi2/run8
-rw-r--r--test/718-zipfile-finalizer/expected.txt0
-rw-r--r--test/718-zipfile-finalizer/info.txt2
-rw-r--r--test/718-zipfile-finalizer/src/Main.java40
-rw-r--r--test/906-iterate-heap/iterate_heap.cc6
-rw-r--r--test/913-heaps/heaps.cc12
-rwxr-xr-xtest/916-obsolete-jit/run10
-rw-r--r--test/988-method-trace/expected.txt82
-rw-r--r--test/988-method-trace/src/art/Test988.java10
-rw-r--r--test/988-method-trace/trace_fib.cc (renamed from libartbase/base/fuchsia_compat.h)35
-rw-r--r--test/Android.bp1
-rw-r--r--test/Android.run-test.mk1
-rw-r--r--test/common/runtime_state.cc7
-rw-r--r--test/common/stack_inspect.cc2
-rw-r--r--test/dexdump/bytecodes.txt4
-rwxr-xr-xtest/dexdump/bytecodes.xml12
-rwxr-xr-xtest/dexdump/checkers.xml2
-rw-r--r--test/dexdump/invoke-custom.txt10
-rwxr-xr-xtest/etc/run-test-jar18
-rw-r--r--test/knownfailures.json28
-rwxr-xr-xtest/run-test13
-rw-r--r--test/testrunner/target_config.py14
-rwxr-xr-xtest/testrunner/testrunner.py4
-rw-r--r--test/ti-agent/ti_macros.h2
-rw-r--r--tools/ahat/Android.mk1
-rw-r--r--tools/ahat/ahat-tests.xml (renamed from tools/ahat/AndroidTest.xml)0
-rw-r--r--tools/art213
-rw-r--r--tools/art_verifier/art_verifier.cc11
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java66
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java11
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java119
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java165
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java89
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java72
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java66
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java146
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java20
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java47
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Status.java8
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java18
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java55
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java84
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java147
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java116
-rw-r--r--tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java (renamed from tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java)225
-rw-r--r--tools/dexanalyze/Android.bp1
-rw-r--r--tools/dexanalyze/dexanalyze.cc1
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.cc166
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.h17
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.cc131
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.h31
-rw-r--r--tools/dexanalyze/dexanalyze_strings.cc370
-rw-r--r--tools/dexanalyze/dexanalyze_strings.h60
-rwxr-xr-xtools/golem/build-target.sh2
-rw-r--r--tools/hiddenapi/hiddenapi.cc8
-rw-r--r--tools/jfuzz/Android.bp29
-rw-r--r--tools/jfuzz/Android.mk25
-rw-r--r--tools/libcore_failures.txt10
-rw-r--r--tools/libcore_gcstress_debug_failures.txt14
-rw-r--r--tools/libcore_gcstress_failures.txt10
-rwxr-xr-xtools/run-jdwp-tests.sh8
-rwxr-xr-xtools/run-libcore-tests.sh7
-rw-r--r--tools/tracefast-plugin/tracefast.cc30
-rw-r--r--tools/veridex/flow_analysis.h8
697 files changed, 10719 insertions, 8010 deletions
diff --git a/Android.mk b/Android.mk
index 19c65a1e67..7852be519f 100644
--- a/Android.mk
+++ b/Android.mk
@@ -41,18 +41,18 @@ endif
.PHONY: clean-oat-target
clean-oat-target:
- adb root
- adb wait-for-device remount
- adb shell rm -rf $(ART_TARGET_NATIVETEST_DIR)
- adb shell rm -rf $(ART_TARGET_TEST_DIR)
- adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/*
- adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$(DEX2OAT_TARGET_ARCH)
- adb shell rm -rf system/app/$(DEX2OAT_TARGET_ARCH)
+ $(ADB) root
+ $(ADB) wait-for-device remount
+ $(ADB) shell rm -rf $(ART_TARGET_NATIVETEST_DIR)
+ $(ADB) shell rm -rf $(ART_TARGET_TEST_DIR)
+ $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/*
+ $(ADB) shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$(DEX2OAT_TARGET_ARCH)
+ $(ADB) shell rm -rf system/app/$(DEX2OAT_TARGET_ARCH)
ifdef TARGET_2ND_ARCH
- adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
- adb shell rm -rf system/app/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
+ $(ADB) shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
+ $(ADB) shell rm -rf system/app/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
endif
- adb shell rm -rf data/run-test/test-*/dalvik-cache/*
+ $(ADB) shell rm -rf data/run-test/test-*/dalvik-cache/*
########################################################################
# cpplint rules to style check art source files
@@ -92,7 +92,7 @@ endif
# test rules
# All the dependencies that must be built ahead of sync-ing them onto the target device.
-TEST_ART_TARGET_SYNC_DEPS :=
+TEST_ART_TARGET_SYNC_DEPS := $(ADB_EXECUTABLE)
include $(art_path)/build/Android.common_test.mk
include $(art_path)/build/Android.gtest.mk
@@ -100,14 +100,14 @@ include $(art_path)/test/Android.run-test.mk
# Make sure /system is writable on the device.
TEST_ART_ADB_ROOT_AND_REMOUNT := \
- (adb root && \
- adb wait-for-device remount && \
- ((adb shell touch /system/testfile && \
- (adb shell rm /system/testfile || true)) || \
- (adb disable-verity && \
- adb reboot && \
- adb wait-for-device root && \
- adb wait-for-device remount)))
+ ($(ADB) root && \
+ $(ADB) wait-for-device remount && \
+ (($(ADB) shell touch /system/testfile && \
+ ($(ADB) shell rm /system/testfile || true)) || \
+ ($(ADB) disable-verity && \
+ $(ADB) reboot && \
+ $(ADB) wait-for-device root && \
+ $(ADB) wait-for-device remount)))
# Sync test files to the target, depends upon all things that must be pushed to the target.
.PHONY: test-art-target-sync
@@ -121,25 +121,25 @@ ifeq ($(ART_TEST_ANDROID_ROOT),)
ifeq ($(ART_TEST_CHROOT),)
test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
$(TEST_ART_ADB_ROOT_AND_REMOUNT)
- adb sync system && adb sync data
+ $(ADB) sync system && $(ADB) sync data
else
# TEST_ART_ADB_ROOT_AND_REMOUNT is not needed here, as we are only
# pushing things to the chroot dir, which is expected to be under
# /data on the device.
test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
- adb wait-for-device
- adb push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)/
- adb push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
+ $(ADB) wait-for-device
+ $(ADB) push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)/
+ $(ADB) push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
endif
else
test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
$(TEST_ART_ADB_ROOT_AND_REMOUNT)
- adb wait-for-device
- adb push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)$(ART_TEST_ANDROID_ROOT)
+ $(ADB) wait-for-device
+ $(ADB) push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)$(ART_TEST_ANDROID_ROOT)
# Push the contents of the `data` dir into `$(ART_TEST_CHROOT)/data` on the device (note
# that $(ART_TEST_CHROOT) can be empty). If `$(ART_TEST_CHROOT)/data` already exists on
# the device, it is not overwritten, but its content is updated.
- adb push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
+ $(ADB) push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
endif
endif
@@ -493,90 +493,90 @@ build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(T
.PHONY: use-art
use-art:
- adb root
- adb wait-for-device shell stop
- adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+ $(ADB) shell start
.PHONY: use-artd
use-artd:
- adb root
- adb wait-for-device shell stop
- adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
+ $(ADB) shell start
.PHONY: use-dalvik
use-dalvik:
- adb root
- adb wait-for-device shell stop
- adb shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so
+ $(ADB) shell start
.PHONY: use-art-full
use-art-full:
- adb root
- adb wait-for-device shell stop
- adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter \"\"
- adb shell setprop dalvik.vm.image-dex2oat-filter \"\"
- adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
- adb shell setprop dalvik.vm.usejit false
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+ $(ADB) shell setprop dalvik.vm.dex2oat-filter \"\"
+ $(ADB) shell setprop dalvik.vm.image-dex2oat-filter \"\"
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+ $(ADB) shell setprop dalvik.vm.usejit false
+ $(ADB) shell start
.PHONY: use-artd-full
use-artd-full:
- adb root
- adb wait-for-device shell stop
- adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter \"\"
- adb shell setprop dalvik.vm.image-dex2oat-filter \"\"
- adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
- adb shell setprop dalvik.vm.usejit false
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+ $(ADB) shell setprop dalvik.vm.dex2oat-filter \"\"
+ $(ADB) shell setprop dalvik.vm.image-dex2oat-filter \"\"
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
+ $(ADB) shell setprop dalvik.vm.usejit false
+ $(ADB) shell start
.PHONY: use-art-jit
use-art-jit:
- adb root
- adb wait-for-device shell stop
- adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime"
- adb shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime"
- adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
- adb shell setprop dalvik.vm.usejit true
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+ $(ADB) shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime"
+ $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime"
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+ $(ADB) shell setprop dalvik.vm.usejit true
+ $(ADB) shell start
.PHONY: use-art-interpret-only
use-art-interpret-only:
- adb root
- adb wait-for-device shell stop
- adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
- adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
- adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
- adb shell setprop dalvik.vm.usejit false
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+ $(ADB) shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+ $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+ $(ADB) shell setprop dalvik.vm.usejit false
+ $(ADB) shell start
.PHONY: use-artd-interpret-only
use-artd-interpret-only:
- adb root
- adb wait-for-device shell stop
- adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
- adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
- adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
- adb shell setprop dalvik.vm.usejit false
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+ $(ADB) shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+ $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
+ $(ADB) shell setprop dalvik.vm.usejit false
+ $(ADB) shell start
.PHONY: use-art-verify-none
use-art-verify-none:
- adb root
- adb wait-for-device shell stop
- adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter "verify-none"
- adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none"
- adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
- adb shell setprop dalvik.vm.usejit false
- adb shell start
+ $(ADB) root
+ $(ADB) wait-for-device shell stop
+ $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+ $(ADB) shell setprop dalvik.vm.dex2oat-filter "verify-none"
+ $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "verify-none"
+ $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+ $(ADB) shell setprop dalvik.vm.usejit false
+ $(ADB) shell start
########################################################################
diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h
index 04e39bf4ff..c51f981f40 100644
--- a/adbconnection/adbconnection.h
+++ b/adbconnection/adbconnection.h
@@ -46,12 +46,12 @@ struct AdbConnectionDebuggerController : public art::DebuggerControlCallback {
: connection_(connection) {}
// Begin running the debugger.
- void StartDebugger() OVERRIDE;
+ void StartDebugger() override;
// The debugger should begin shutting down since the runtime is ending.
- void StopDebugger() OVERRIDE;
+ void StopDebugger() override;
- bool IsDebuggerConfigured() OVERRIDE;
+ bool IsDebuggerConfigured() override;
private:
AdbConnectionState* connection_;
diff --git a/build/Android.common.mk b/build/Android.common.mk
index a6a9f0fc47..316ce646ab 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -92,4 +92,7 @@ else
2ND_ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES)
endif
+ADB_EXECUTABLE := $(HOST_OUT_EXECUTABLES)/adb
+ADB := $(ADB_EXECUTABLE)
+
endif # ART_ANDROID_COMMON_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 81c82b7460..20f20c9f7b 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -447,7 +447,8 @@ define define-art-gtest-rule-target
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
$$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
$$(gtest_rule): PRIVATE_MAYBE_CHROOT_COMMAND := $$(maybe_chroot_command)
@@ -460,14 +461,14 @@ $$(gtest_rule): PRIVATE_GTEST_WITNESS := $$(gtest_witness)
.PHONY: $$(gtest_rule)
$$(gtest_rule): test-art-target-sync
- $(hide) adb shell touch $$(PRIVATE_GTEST_WITNESS)
- $(hide) adb shell rm $$(PRIVATE_GTEST_WITNESS)
- $(hide) adb shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
+ $(hide) $(ADB) shell touch $$(PRIVATE_GTEST_WITNESS)
+ $(hide) $(ADB) shell rm $$(PRIVATE_GTEST_WITNESS)
+ $(hide) $(ADB) shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
- (adb shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
+ ($(ADB) shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) $$(PRIVATE_TARGET_EXE) \
&& touch $$(PRIVATE_GTEST_WITNESS)" \
- && (adb pull $$(PRIVATE_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
+ && ($(ADB) pull $$(PRIVATE_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@))
$(hide) rm -f /tmp/$$@-$$$$PPID
diff --git a/build/art.go b/build/art.go
index 3dabce3975..61b1a4e501 100644
--- a/build/art.go
+++ b/build/art.go
@@ -66,8 +66,12 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
}
- cdexLevel := envDefault(ctx, "ART_DEFAULT_COMPACT_DEX_LEVEL", "fast")
- cflags = append(cflags, "-DART_DEFAULT_COMPACT_DEX_LEVEL="+cdexLevel)
+ if envTrue(ctx, "ART_USE_GENERATIONAL_CC") {
+ cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1")
+ }
+
+ cdexLevel := envDefault(ctx, "ART_DEFAULT_COMPACT_DEX_LEVEL", "fast")
+ cflags = append(cflags, "-DART_DEFAULT_COMPACT_DEX_LEVEL="+cdexLevel)
// We need larger stack overflow guards for ASAN, as the compiled code will have
// larger frame sizes. For simplicity, just use global not-target-specific cflags.
@@ -312,19 +316,19 @@ func libartDefaultsFactory() android.Module {
codegen(ctx, c, true)
type props struct {
- Target struct {
- Android struct {
- Shared_libs []string
- }
- }
+ Target struct {
+ Android struct {
+ Shared_libs []string
+ }
+ }
}
p := &props{}
// TODO: express this in .bp instead b/79671158
if !envTrue(ctx, "ART_TARGET_LINUX") {
- p.Target.Android.Shared_libs = []string {
- "libmetricslogger",
- }
+ p.Target.Android.Shared_libs = []string{
+ "libmetricslogger",
+ }
}
ctx.AppendProperties(p)
})
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index a33d53741c..42c6a5ff59 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -558,13 +558,12 @@ TEST_F(CmdlineParserTest, TestIgnoredArguments) {
TEST_F(CmdlineParserTest, MultipleArguments) {
EXPECT_TRUE(IsResultSuccessful(parser_->Parse(
"-help -XX:ForegroundHeapGrowthMultiplier=0.5 "
- "-Xnodex2oat -Xmethod-trace -XX:LargeObjectSpace=map")));
+ "-Xmethod-trace -XX:LargeObjectSpace=map")));
auto&& map = parser_->ReleaseArgumentsMap();
- EXPECT_EQ(5u, map.Size());
+ EXPECT_EQ(4u, map.Size());
EXPECT_KEY_VALUE(map, M::Help, Unit{});
EXPECT_KEY_VALUE(map, M::ForegroundHeapGrowthMultiplier, 0.5);
- EXPECT_KEY_VALUE(map, M::Dex2Oat, false);
EXPECT_KEY_VALUE(map, M::MethodTrace, Unit{});
EXPECT_KEY_VALUE(map, M::LargeObjectSpace, gc::space::LargeObjectSpaceType::kMap);
} // TEST_F
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 87197becf9..d603d9673c 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -221,7 +221,7 @@ void CommonCompilerTest::TearDown() {
callbacks_.reset();
verification_results_.reset();
compiler_options_.reset();
- image_reservation_.reset();
+ image_reservation_.Reset();
CommonRuntimeTest::TearDown();
}
@@ -323,18 +323,17 @@ void CommonCompilerTest::ReserveImageSpace() {
// accidentally end up colliding with the fixed memory address when we need to load the image.
std::string error_msg;
MemMap::Init();
- image_reservation_.reset(MemMap::MapAnonymous("image reservation",
- reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
- (size_t)120 * 1024 * 1024, // 120MB
- PROT_NONE,
- false /* no need for 4gb flag with fixed mmap*/,
- false /* not reusing existing reservation */,
- &error_msg));
- CHECK(image_reservation_.get() != nullptr) << error_msg;
+ image_reservation_ = MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
+ (size_t)120 * 1024 * 1024, // 120MB
+ PROT_NONE,
+ false /* no need for 4gb flag with fixed mmap */,
+ &error_msg);
+ CHECK(image_reservation_.IsValid()) << error_msg;
}
void CommonCompilerTest::UnreserveImageSpace() {
- image_reservation_.reset();
+ image_reservation_.Reset();
}
void CommonCompilerTest::SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index db38110400..e6d1564621 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -57,9 +57,9 @@ class CommonCompilerTest : public CommonRuntimeTest {
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
- void SetUp() OVERRIDE;
+ void SetUp() override;
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE;
+ void SetUpRuntimeOptions(RuntimeOptions* options) override;
Compiler::Kind GetCompilerKind() const;
void SetCompilerKind(Compiler::Kind compiler_kind);
@@ -73,7 +73,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
return CompilerFilter::kDefaultCompilerFilter;
}
- void TearDown() OVERRIDE;
+ void TearDown() override;
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -115,7 +115,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
std::unique_ptr<CompilerDriver> compiler_driver_;
private:
- std::unique_ptr<MemMap> image_reservation_;
+ MemMap image_reservation_;
// Chunks must not move their storage after being created - use the node-based std::list.
std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index f88028034d..864ce585cf 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -104,7 +104,7 @@ class CompiledCode {
uint32_t packed_fields_;
};
-class CompiledMethod FINAL : public CompiledCode {
+class CompiledMethod final : public CompiledCode {
public:
// Constructs a CompiledMethod.
// Note: Consider using the static allocation methods below that will allocate the CompiledMethod
diff --git a/compiler/debug/dwarf/debug_abbrev_writer.h b/compiler/debug/dwarf/debug_abbrev_writer.h
index cccca255c1..63a049b2cc 100644
--- a/compiler/debug/dwarf/debug_abbrev_writer.h
+++ b/compiler/debug/dwarf/debug_abbrev_writer.h
@@ -37,7 +37,7 @@ namespace dwarf {
// determines all the attributes and their format.
// It is possible to think of them as type definitions.
template <typename Vector = std::vector<uint8_t>>
-class DebugAbbrevWriter FINAL : private Writer<Vector> {
+class DebugAbbrevWriter final : private Writer<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
public:
diff --git a/compiler/debug/dwarf/debug_info_entry_writer.h b/compiler/debug/dwarf/debug_info_entry_writer.h
index 89d16f2b2a..b198178d5d 100644
--- a/compiler/debug/dwarf/debug_info_entry_writer.h
+++ b/compiler/debug/dwarf/debug_info_entry_writer.h
@@ -42,7 +42,7 @@ namespace dwarf {
* EndTag();
*/
template <typename Vector = std::vector<uint8_t>>
-class DebugInfoEntryWriter FINAL : private Writer<Vector> {
+class DebugInfoEntryWriter final : private Writer<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
public:
diff --git a/compiler/debug/dwarf/debug_line_opcode_writer.h b/compiler/debug/dwarf/debug_line_opcode_writer.h
index b4a4d63f01..bb4e87ff7e 100644
--- a/compiler/debug/dwarf/debug_line_opcode_writer.h
+++ b/compiler/debug/dwarf/debug_line_opcode_writer.h
@@ -31,7 +31,7 @@ namespace dwarf {
// * Keep track of current state and convert absolute values to deltas.
// * Divide by header-defined factors as appropriate.
template<typename Vector = std::vector<uint8_t>>
-class DebugLineOpCodeWriter FINAL : private Writer<Vector> {
+class DebugLineOpCodeWriter final : private Writer<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
public:
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 8a07e9c12c..b7117bd223 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -26,7 +26,7 @@ class CompilerDriver;
class DexFile;
class VerificationResults;
-class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
+class QuickCompilerCallbacks final : public CompilerCallbacks {
public:
explicit QuickCompilerCallbacks(CompilerCallbacks::CallbackMode mode)
: CompilerCallbacks(mode), dex_files_(nullptr) {}
@@ -34,20 +34,20 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
~QuickCompilerCallbacks() { }
void MethodVerified(verifier::MethodVerifier* verifier)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) override;
- void ClassRejected(ClassReference ref) OVERRIDE;
+ void ClassRejected(ClassReference ref) override;
// We are running in an environment where we can call patchoat safely so we should.
- bool IsRelocationPossible() OVERRIDE {
+ bool IsRelocationPossible() override {
return true;
}
- verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
+ verifier::VerifierDeps* GetVerifierDeps() const override {
return verifier_deps_.get();
}
- void SetVerifierDeps(verifier::VerifierDeps* deps) OVERRIDE {
+ void SetVerifierDeps(verifier::VerifierDeps* deps) override {
verifier_deps_.reset(deps);
}
@@ -55,18 +55,18 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
verification_results_ = verification_results;
}
- ClassStatus GetPreviousClassState(ClassReference ref) OVERRIDE;
+ ClassStatus GetPreviousClassState(ClassReference ref) override;
void SetDoesClassUnloading(bool does_class_unloading, CompilerDriver* compiler_driver)
- OVERRIDE {
+ override {
does_class_unloading_ = does_class_unloading;
compiler_driver_ = compiler_driver;
DCHECK(!does_class_unloading || compiler_driver_ != nullptr);
}
- void UpdateClassState(ClassReference ref, ClassStatus state) OVERRIDE;
+ void UpdateClassState(ClassReference ref, ClassStatus state) override;
- bool CanUseOatStatusForVerification(mirror::Class* klass) OVERRIDE
+ bool CanUseOatStatusForVerification(mirror::Class* klass) override
REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexFiles(const std::vector<const DexFile*>* dex_files) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6eca304223..21975dee68 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -971,7 +971,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
ResolveCatchBlockExceptionsClassVisitor() : classes_() {}
- virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
classes_.push_back(c);
return true;
}
@@ -1034,7 +1034,7 @@ class RecordImageClassesVisitor : public ClassVisitor {
explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
: image_classes_(image_classes) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
image_classes_->insert(klass->GetDescriptor(&temp));
return true;
@@ -1210,7 +1210,7 @@ class ClinitImageUpdate {
: data_(data),
hs_(hs) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
StringPiece name(klass->GetDescriptor(&temp));
if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
@@ -1475,7 +1475,7 @@ class ParallelCompilationManager {
end_(end),
fn_(fn) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
while (true) {
const size_t index = manager_->NextIndex();
if (UNLIKELY(index >= end_)) {
@@ -1486,7 +1486,7 @@ class ParallelCompilationManager {
}
}
- void Finalize() OVERRIDE {
+ void Finalize() override {
delete this;
}
@@ -1568,7 +1568,7 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
: manager_(manager) {}
- void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ void Visit(size_t class_def_index) override REQUIRES(!Locks::mutator_lock_) {
ScopedTrace trace(__FUNCTION__);
Thread* const self = Thread::Current();
jobject jclass_loader = manager_->GetClassLoader();
@@ -1667,7 +1667,7 @@ class ResolveTypeVisitor : public CompilationVisitor {
public:
explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) {
}
- void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ void Visit(size_t type_idx) override REQUIRES(!Locks::mutator_lock_) {
// Class derived values are more complicated, they require the linker and loader.
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = manager_->GetClassLinker();
@@ -1888,7 +1888,7 @@ class VerifyClassVisitor : public CompilationVisitor {
VerifyClassVisitor(const ParallelCompilationManager* manager, verifier::HardFailLogMode log_level)
: manager_(manager), log_level_(log_level) {}
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2020,7 +2020,7 @@ class SetVerifiedClassVisitor : public CompilationVisitor {
public:
explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2085,7 +2085,7 @@ class InitializeClassVisitor : public CompilationVisitor {
public:
explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
- void Visit(size_t class_def_index) OVERRIDE {
+ void Visit(size_t class_def_index) override {
ScopedTrace trace(__FUNCTION__);
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
@@ -2470,7 +2470,7 @@ class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor
explicit InitializeArrayClassesAndCreateConflictTablesVisitor(VariableSizedHandleScope& hs)
: hs_(hs) {}
- virtual bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE
+ bool operator()(ObjPtr<mirror::Class> klass) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 2eeb4399db..fe1568da83 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -186,7 +186,7 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
class CompilerDriverProfileTest : public CompilerDriverTest {
protected:
- ProfileCompilationInfo* GetProfileCompilationInfo() OVERRIDE {
+ ProfileCompilationInfo* GetProfileCompilationInfo() override {
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
@@ -200,7 +200,7 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
return &profile_info_;
}
- CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+ CompilerFilter::Filter GetCompilerFilter() const override {
// Use a profile based filter.
return CompilerFilter::kSpeedProfile;
}
@@ -278,7 +278,7 @@ TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) {
// which will be used for OatClass.
class CompilerDriverVerifyTest : public CompilerDriverTest {
protected:
- CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+ CompilerFilter::Filter GetCompilerFilter() const override {
return CompilerFilter::kVerify;
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 601c9140dd..34aceba1c4 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -43,7 +43,7 @@ class DexFile;
enum class InstructionSet;
class InstructionSetFeatures;
-class CompilerOptions FINAL {
+class CompilerOptions final {
public:
// Guide heuristics to determine whether to compile method if profile data not available.
static const size_t kDefaultHugeMethodThreshold = 10000;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3cb4a652ad..92b9543c27 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -221,12 +221,12 @@ struct jni_remove_extra_parameters : public remove_extra_parameters_helper<T, fn
class JniCompilerTest : public CommonCompilerTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonCompilerTest::SetUp();
check_generic_jni_ = false;
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
android::ResetNativeLoader();
CommonCompilerTest::TearDown();
}
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 249f20225d..b327898483 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -25,24 +25,24 @@ namespace arm {
constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
-class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class ArmManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k32) {}
- ~ArmManagedRuntimeCallingConvention() OVERRIDE {}
+ ~ArmManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -50,37 +50,37 @@ class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
};
-class ArmJniCallingConvention FINAL : public JniCallingConvention {
+class ArmJniCallingConvention final : public JniCallingConvention {
public:
ArmJniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~ArmJniCallingConvention() OVERRIDE {}
+ ~ArmJniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- void Next() OVERRIDE; // Override default behavior for AAPCS
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ void Next() override; // Override default behavior for AAPCS
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// AAPCS mandates return values are extended.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return false;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
// Padding to ensure longs and doubles are not split in AAPCS
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 56189427b6..ed0ddeb1b2 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -25,24 +25,24 @@ namespace arm64 {
constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
-class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Arm64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k64) {}
- ~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~Arm64ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -50,36 +50,36 @@ class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingC
DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
};
-class Arm64JniCallingConvention FINAL : public JniCallingConvention {
+class Arm64JniCallingConvention final : public JniCallingConvention {
public:
Arm64JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~Arm64JniCallingConvention() OVERRIDE {}
+ ~Arm64JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// aarch64 calling convention leaves upper bits undefined.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return true;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention);
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index ad3f118bad..165fc6056e 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -27,24 +27,24 @@ constexpr size_t kFramePointerSize = 4;
static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
"Invalid frame pointer size");
-class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class MipsManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k32) {}
- ~MipsManagedRuntimeCallingConvention() OVERRIDE {}
+ ~MipsManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -52,37 +52,37 @@ class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCo
DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
};
-class MipsJniCallingConvention FINAL : public JniCallingConvention {
+class MipsJniCallingConvention final : public JniCallingConvention {
public:
MipsJniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~MipsJniCallingConvention() OVERRIDE {}
+ ~MipsJniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- void Next() OVERRIDE; // Override default behavior for o32.
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ void Next() override; // Override default behavior for o32.
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// Mips does not need to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return false;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
// Padding to ensure longs and doubles are not split in o32.
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index faedaeff6c..d87f73a1ea 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -27,24 +27,24 @@ constexpr size_t kFramePointerSize = 8;
static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
"Invalid frame pointer size");
-class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Mips64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k64) {}
- ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~Mips64ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
@@ -52,36 +52,36 @@ class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCalling
DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
};
-class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+class Mips64JniCallingConvention final : public JniCallingConvention {
public:
Mips64JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~Mips64JniCallingConvention() OVERRIDE {}
+ ~Mips64JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// Mips64 does not need to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return false;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index be83cdaad0..d0c6198e77 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -25,7 +25,7 @@ namespace x86 {
constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
-class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
@@ -33,17 +33,17 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
shorty,
PointerSize::k32),
gpr_arg_count_(0) {}
- ~X86ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~X86ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
int gpr_arg_count_;
@@ -53,36 +53,36 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
};
// Implements the x86 cdecl calling convention.
-class X86JniCallingConvention FINAL : public JniCallingConvention {
+class X86JniCallingConvention final : public JniCallingConvention {
public:
X86JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~X86JniCallingConvention() OVERRIDE {}
+ ~X86JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// x86 needs to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return true;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index cdba334d81..dfab41b154 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -23,59 +23,59 @@
namespace art {
namespace x86_64 {
-class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86_64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
public:
X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
: ManagedRuntimeCallingConvention(is_static,
is_synchronized,
shorty,
PointerSize::k64) {}
- ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
+ ~X86_64ManagedRuntimeCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// Managed runtime calling convention
- ManagedRegister MethodRegister() OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
- const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ ManagedRegister MethodRegister() override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
+ const ManagedRegisterEntrySpills& EntrySpills() override;
private:
ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
};
-class X86_64JniCallingConvention FINAL : public JniCallingConvention {
+class X86_64JniCallingConvention final : public JniCallingConvention {
public:
X86_64JniCallingConvention(bool is_static,
bool is_synchronized,
bool is_critical_native,
const char* shorty);
- ~X86_64JniCallingConvention() OVERRIDE {}
+ ~X86_64JniCallingConvention() override {}
// Calling convention
- ManagedRegister ReturnRegister() OVERRIDE;
- ManagedRegister IntReturnRegister() OVERRIDE;
- ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ ManagedRegister ReturnRegister() override;
+ ManagedRegister IntReturnRegister() override;
+ ManagedRegister InterproceduralScratchRegister() override;
// JNI calling convention
- size_t FrameSize() OVERRIDE;
- size_t OutArgSize() OVERRIDE;
- ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
- ManagedRegister ReturnScratchRegister() const OVERRIDE;
- uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE;
- bool IsCurrentParamInRegister() OVERRIDE;
- bool IsCurrentParamOnStack() OVERRIDE;
- ManagedRegister CurrentParamRegister() OVERRIDE;
- FrameOffset CurrentParamStackOffset() OVERRIDE;
+ size_t FrameSize() override;
+ size_t OutArgSize() override;
+ ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+ ManagedRegister ReturnScratchRegister() const override;
+ uint32_t CoreSpillMask() const override;
+ uint32_t FpSpillMask() const override;
+ bool IsCurrentParamInRegister() override;
+ bool IsCurrentParamOnStack() override;
+ ManagedRegister CurrentParamRegister() override;
+ FrameOffset CurrentParamStackOffset() override;
// x86-64 needs to extend small return types.
- bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ bool RequiresSmallResultTypeExtension() const override {
return true;
}
protected:
- size_t NumberOfOutgoingStackArgs() OVERRIDE;
+ size_t NumberOfOutgoingStackArgs() override;
private:
DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
diff --git a/compiler/linker/buffered_output_stream.h b/compiler/linker/buffered_output_stream.h
index 512409cb2f..cb1c44ba23 100644
--- a/compiler/linker/buffered_output_stream.h
+++ b/compiler/linker/buffered_output_stream.h
@@ -26,17 +26,17 @@
namespace art {
namespace linker {
-class BufferedOutputStream FINAL : public OutputStream {
+class BufferedOutputStream final : public OutputStream {
public:
explicit BufferedOutputStream(std::unique_ptr<OutputStream> out);
- ~BufferedOutputStream() OVERRIDE;
+ ~BufferedOutputStream() override;
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+ bool WriteFully(const void* buffer, size_t byte_count) override;
- off_t Seek(off_t offset, Whence whence) OVERRIDE;
+ off_t Seek(off_t offset, Whence whence) override;
- bool Flush() OVERRIDE;
+ bool Flush() override;
private:
static const size_t kBufferSize = 8 * KB;
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 974c590a65..81ecc175b5 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -75,7 +75,7 @@ namespace linker {
// The debug sections are written last for easier stripping.
//
template <typename ElfTypes>
-class ElfBuilder FINAL {
+class ElfBuilder final {
public:
static constexpr size_t kMaxProgramHeaders = 16;
// SHA-1 digest. Not using SHA_DIGEST_LENGTH from openssl/sha.h to avoid
@@ -173,21 +173,21 @@ class ElfBuilder FINAL {
// This function always succeeds to simplify code.
// Use builder's Good() to check the actual status.
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
CHECK(owner_->current_section_ == this);
return owner_->stream_.WriteFully(buffer, byte_count);
}
// This function always succeeds to simplify code.
// Use builder's Good() to check the actual status.
- off_t Seek(off_t offset, Whence whence) OVERRIDE {
+ off_t Seek(off_t offset, Whence whence) override {
// Forward the seek as-is and trust the caller to use it reasonably.
return owner_->stream_.Seek(offset, whence);
}
// This function flushes the output and returns whether it succeeded.
// If there was a previous failure, this does nothing and returns false, i.e. failed.
- bool Flush() OVERRIDE {
+ bool Flush() override {
return owner_->stream_.Flush();
}
@@ -271,7 +271,7 @@ class ElfBuilder FINAL {
};
// Writer of .dynstr section.
- class CachedStringSection FINAL : public CachedSection {
+ class CachedStringSection final : public CachedSection {
public:
CachedStringSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
@@ -295,7 +295,7 @@ class ElfBuilder FINAL {
};
// Writer of .strtab and .shstrtab sections.
- class StringSection FINAL : public Section {
+ class StringSection final : public Section {
public:
StringSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
@@ -338,7 +338,7 @@ class ElfBuilder FINAL {
};
// Writer of .dynsym and .symtab sections.
- class SymbolSection FINAL : public Section {
+ class SymbolSection final : public Section {
public:
SymbolSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
@@ -410,7 +410,7 @@ class ElfBuilder FINAL {
std::vector<Elf_Sym> syms_; // Buffered/cached content of the whole section.
};
- class AbiflagsSection FINAL : public Section {
+ class AbiflagsSection final : public Section {
public:
// Section with Mips abiflag info.
static constexpr uint8_t MIPS_AFL_REG_NONE = 0; // no registers
@@ -480,7 +480,7 @@ class ElfBuilder FINAL {
} abiflags_;
};
- class BuildIdSection FINAL : public Section {
+ class BuildIdSection final : public Section {
public:
BuildIdSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
diff --git a/compiler/linker/error_delaying_output_stream.h b/compiler/linker/error_delaying_output_stream.h
index 659f1dc093..cadd71c3f0 100644
--- a/compiler/linker/error_delaying_output_stream.h
+++ b/compiler/linker/error_delaying_output_stream.h
@@ -27,7 +27,7 @@ namespace art {
namespace linker {
// OutputStream wrapper that delays reporting an error until Flush().
-class ErrorDelayingOutputStream FINAL : public OutputStream {
+class ErrorDelayingOutputStream final : public OutputStream {
public:
explicit ErrorDelayingOutputStream(OutputStream* output)
: OutputStream(output->GetLocation()),
@@ -37,7 +37,7 @@ class ErrorDelayingOutputStream FINAL : public OutputStream {
// This function always succeeds to simplify code.
// Use Good() to check the actual status of the output stream.
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
if (output_good_) {
if (!output_->WriteFully(buffer, byte_count)) {
PLOG(ERROR) << "Failed to write " << byte_count
@@ -51,7 +51,7 @@ class ErrorDelayingOutputStream FINAL : public OutputStream {
// This function always succeeds to simplify code.
// Use Good() to check the actual status of the output stream.
- off_t Seek(off_t offset, Whence whence) OVERRIDE {
+ off_t Seek(off_t offset, Whence whence) override {
// We keep shadow copy of the offset so that we return
// the expected value even if the output stream failed.
off_t new_offset;
@@ -81,7 +81,7 @@ class ErrorDelayingOutputStream FINAL : public OutputStream {
// Flush the output and return whether all operations have succeeded.
// Do nothing if we already have a pending error.
- bool Flush() OVERRIDE {
+ bool Flush() override {
if (output_good_) {
output_good_ = output_->Flush();
}
diff --git a/compiler/linker/file_output_stream.h b/compiler/linker/file_output_stream.h
index deb051fca4..1417132981 100644
--- a/compiler/linker/file_output_stream.h
+++ b/compiler/linker/file_output_stream.h
@@ -24,17 +24,17 @@
namespace art {
namespace linker {
-class FileOutputStream FINAL : public OutputStream {
+class FileOutputStream final : public OutputStream {
public:
explicit FileOutputStream(File* file);
- ~FileOutputStream() OVERRIDE {}
+ ~FileOutputStream() override {}
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+ bool WriteFully(const void* buffer, size_t byte_count) override;
- off_t Seek(off_t offset, Whence whence) OVERRIDE;
+ off_t Seek(off_t offset, Whence whence) override;
- bool Flush() OVERRIDE;
+ bool Flush() override;
private:
File* const file_;
diff --git a/compiler/linker/output_stream_test.cc b/compiler/linker/output_stream_test.cc
index f93ea7a709..bcb129c2da 100644
--- a/compiler/linker/output_stream_test.cc
+++ b/compiler/linker/output_stream_test.cc
@@ -106,20 +106,20 @@ TEST_F(OutputStreamTest, BufferedFlush) {
CheckingOutputStream()
: OutputStream("dummy"),
flush_called(false) { }
- ~CheckingOutputStream() OVERRIDE {}
+ ~CheckingOutputStream() override {}
bool WriteFully(const void* buffer ATTRIBUTE_UNUSED,
- size_t byte_count ATTRIBUTE_UNUSED) OVERRIDE {
+ size_t byte_count ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
- off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) OVERRIDE {
+ off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
- bool Flush() OVERRIDE {
+ bool Flush() override {
flush_called = true;
return true;
}
diff --git a/compiler/linker/vector_output_stream.h b/compiler/linker/vector_output_stream.h
index 92caf596ab..0d34da6cba 100644
--- a/compiler/linker/vector_output_stream.h
+++ b/compiler/linker/vector_output_stream.h
@@ -26,13 +26,13 @@
namespace art {
namespace linker {
-class VectorOutputStream FINAL : public OutputStream {
+class VectorOutputStream final : public OutputStream {
public:
VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector);
- ~VectorOutputStream() OVERRIDE {}
+ ~VectorOutputStream() override {}
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
if (static_cast<size_t>(offset_) == vector_->size()) {
const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer);
vector_->insert(vector_->end(), &start[0], &start[byte_count]);
@@ -46,9 +46,9 @@ class VectorOutputStream FINAL : public OutputStream {
return true;
}
- off_t Seek(off_t offset, Whence whence) OVERRIDE;
+ off_t Seek(off_t offset, Whence whence) override;
- bool Flush() OVERRIDE {
+ bool Flush() override {
return true;
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index dfefa524bf..1c3660c0a7 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -388,10 +388,10 @@ class MonotonicValueRange : public ValueRange {
return induction_variable_->GetBlock();
}
- MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
+ MonotonicValueRange* AsMonotonicValueRange() override { return this; }
// If it's certain that this value range fits in other_range.
- bool FitsIn(ValueRange* other_range) const OVERRIDE {
+ bool FitsIn(ValueRange* other_range) const override {
if (other_range == nullptr) {
return true;
}
@@ -402,7 +402,7 @@ class MonotonicValueRange : public ValueRange {
// Try to narrow this MonotonicValueRange given another range.
// Ideally it will return a normal ValueRange. But due to
// possible overflow/underflow, that may not be possible.
- ValueRange* Narrow(ValueRange* range) OVERRIDE {
+ ValueRange* Narrow(ValueRange* range) override {
if (range == nullptr) {
return this;
}
@@ -530,7 +530,7 @@ class BCEVisitor : public HGraphVisitor {
induction_range_(induction_analysis),
next_(nullptr) {}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
DCHECK(!IsAddedBlock(block));
first_index_bounds_check_map_.clear();
// Visit phis and instructions using a safe iterator. The iteration protects
@@ -820,7 +820,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+ void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
HBasicBlock* block = bounds_check->GetBlock();
HInstruction* index = bounds_check->InputAt(0);
HInstruction* array_length = bounds_check->InputAt(1);
@@ -945,7 +945,7 @@ class BCEVisitor : public HGraphVisitor {
return true;
}
- void VisitPhi(HPhi* phi) OVERRIDE {
+ void VisitPhi(HPhi* phi) override {
if (phi->IsLoopHeaderPhi()
&& (phi->GetType() == DataType::Type::kInt32)
&& HasSameInputAtBackEdges(phi)) {
@@ -992,14 +992,14 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitIf(HIf* instruction) OVERRIDE {
+ void VisitIf(HIf* instruction) override {
if (instruction->InputAt(0)->IsCondition()) {
HCondition* cond = instruction->InputAt(0)->AsCondition();
HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition());
}
}
- void VisitAdd(HAdd* add) OVERRIDE {
+ void VisitAdd(HAdd* add) override {
HInstruction* right = add->GetRight();
if (right->IsIntConstant()) {
ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock());
@@ -1013,7 +1013,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitSub(HSub* sub) OVERRIDE {
+ void VisitSub(HSub* sub) override {
HInstruction* left = sub->GetLeft();
HInstruction* right = sub->GetRight();
if (right->IsIntConstant()) {
@@ -1115,19 +1115,19 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitDiv(HDiv* div) OVERRIDE {
+ void VisitDiv(HDiv* div) override {
FindAndHandlePartialArrayLength(div);
}
- void VisitShr(HShr* shr) OVERRIDE {
+ void VisitShr(HShr* shr) override {
FindAndHandlePartialArrayLength(shr);
}
- void VisitUShr(HUShr* ushr) OVERRIDE {
+ void VisitUShr(HUShr* ushr) override {
FindAndHandlePartialArrayLength(ushr);
}
- void VisitAnd(HAnd* instruction) OVERRIDE {
+ void VisitAnd(HAnd* instruction) override {
if (instruction->GetRight()->IsIntConstant()) {
int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
if (constant > 0) {
@@ -1142,7 +1142,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitRem(HRem* instruction) OVERRIDE {
+ void VisitRem(HRem* instruction) override {
HInstruction* left = instruction->GetLeft();
HInstruction* right = instruction->GetRight();
@@ -1202,7 +1202,7 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void VisitNewArray(HNewArray* new_array) OVERRIDE {
+ void VisitNewArray(HNewArray* new_array) override {
HInstruction* len = new_array->GetLength();
if (!len->IsIntConstant()) {
HInstruction *left;
@@ -1240,7 +1240,7 @@ class BCEVisitor : public HGraphVisitor {
* has occurred (see AddCompareWithDeoptimization()), since in those cases it would be
* unsafe to hoist array references across their deoptimization instruction inside a loop.
*/
- void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+ void VisitArrayGet(HArrayGet* array_get) override {
if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) {
HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 92ab7984c8..ef08877daa 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -34,7 +34,7 @@ class BoundsCheckElimination : public HOptimization {
side_effects_(side_effects),
induction_analysis_(induction_analysis) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kBoundsCheckEliminationPassName = "BCE";
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index bdc395b52d..c6232ef661 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -44,9 +44,9 @@ class CHAGuardVisitor : HGraphVisitor {
GetGraph()->SetNumberOfCHAGuards(0);
}
- void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) OVERRIDE;
+ void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) override;
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+ void VisitBasicBlock(HBasicBlock* block) override;
private:
void RemoveGuard(HShouldDeoptimizeFlag* flag);
diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h
index d2c5a344b7..440d51a969 100644
--- a/compiler/optimizing/cha_guard_optimization.h
+++ b/compiler/optimizing/cha_guard_optimization.h
@@ -30,7 +30,7 @@ class CHAGuardOptimization : public HOptimization {
const char* name = kCHAGuardOptimizationPassName)
: HOptimization(graph, name) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization";
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a13efcaee2..a90ff3f885 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1394,37 +1394,12 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
- HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
-
- return (first_next_not_move != nullptr)
- && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0));
+ return null_check->IsEmittedAtUseSite();
}
void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
- if (!compiler_options_.GetImplicitNullChecks()) {
- return;
- }
-
- // If we are from a static path don't record the pc as we can't throw NPE.
- // NB: having the checks here makes the code much less verbose in the arch
- // specific code generators.
- if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
- return;
- }
-
- if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
- return;
- }
-
- // Find the first previous instruction which is not a move.
- HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
-
- // If the instruction is a null check it means that `instr` is the first user
- // and needs to record the pc.
- if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
- HNullCheck* null_check = first_prev_not_move->AsNullCheck();
- // TODO: The parallel moves modify the environment. Their changes need to be
- // reverted otherwise the stack maps at the throw point will not be correct.
+ HNullCheck* null_check = instr->GetImplicitNullCheck();
+ if (null_check != nullptr) {
RecordPcInfo(null_check, null_check->GetDexPc());
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 462225aafb..d56f7aaca1 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -247,7 +247,7 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -273,9 +273,9 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
@@ -285,16 +285,16 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
@@ -308,7 +308,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -349,7 +349,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathARM64"; }
private:
// The class this slow path will load.
@@ -363,7 +363,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
explicit LoadStringSlowPathARM64(HLoadString* instruction)
: SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -384,7 +384,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
@@ -394,7 +394,7 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -408,9 +408,9 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
@@ -421,7 +421,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeARM64(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
@@ -445,7 +445,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathARM64"; }
private:
// If not null, the block to branch to after the suspend check.
@@ -462,7 +462,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
: SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(instruction_->IsCheckCast()
@@ -503,8 +503,8 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathARM64"; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -517,7 +517,7 @@ class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
: SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -529,7 +529,7 @@ class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
@@ -539,7 +539,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -570,7 +570,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathARM64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
@@ -628,7 +628,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
@@ -754,7 +754,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
+ const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -794,7 +794,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
DCHECK(locations->CanCall());
@@ -831,7 +831,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARM64"; }
private:
const Location out_;
@@ -5065,7 +5065,7 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
return;
}
{
- // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+ // Ensure that between load and RecordPcInfo there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
Location obj = instruction->GetLocations()->InAt(0);
__ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 4f6a44fe4d..2e7a20b553 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -125,8 +125,8 @@ class SlowPathCodeARM64 : public SlowPathCode {
vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
- void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
- void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
private:
vixl::aarch64::Label entry_label_;
@@ -216,11 +216,11 @@ class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConvention
InvokeDexCallingConventionVisitorARM64() {}
virtual ~InvokeDexCallingConventionVisitorARM64() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type return_type) const OVERRIDE {
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type return_type) const override {
return calling_convention.GetReturnLocation(return_type);
}
- Location GetMethodLocation() const OVERRIDE;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -232,22 +232,22 @@ class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionARM64() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return helpers::LocationFrom(vixl::aarch64::x1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return helpers::LocationFrom(vixl::aarch64::x0);
}
- Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return helpers::LocationFrom(vixl::aarch64::x0);
}
Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
- bool is_instance) const OVERRIDE {
+ bool is_instance) const override {
return is_instance
? helpers::LocationFrom(vixl::aarch64::x2)
: helpers::LocationFrom(vixl::aarch64::x1);
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return helpers::LocationFrom(vixl::aarch64::d0);
}
@@ -260,7 +260,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -268,7 +268,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -360,7 +360,7 @@ class LocationsBuilderARM64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -368,7 +368,7 @@ class LocationsBuilderARM64 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -393,11 +393,11 @@ class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
: ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
protected:
- void PrepareForEmitNativeCode() OVERRIDE;
- void FinishEmitNativeCode() OVERRIDE;
- Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE;
- void FreeScratchLocation(Location loc) OVERRIDE;
- void EmitMove(size_t index) OVERRIDE;
+ void PrepareForEmitNativeCode() override;
+ void FinishEmitNativeCode() override;
+ Location AllocateScratchLocationFor(Location::Kind kind) override;
+ void FreeScratchLocation(Location loc) override;
+ void EmitMove(size_t index) override;
private:
Arm64Assembler* GetAssembler() const;
@@ -418,39 +418,39 @@ class CodeGeneratorARM64 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARM64() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
- void Bind(HBasicBlock* block) OVERRIDE;
+ void Bind(HBasicBlock* block) override;
vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
block = FirstNonEmptyBlock(block);
return &(block_labels_[block->GetBlockId()]);
}
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return kArm64WordSize;
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kArm64WordSize // 16 bytes == 2 arm64 words for each spill
: 1 * kArm64WordSize; // 8 bytes == 1 arm64 words for each spill
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
return block_entry_label->GetLocation();
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
- Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
- const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+ Arm64Assembler* GetAssembler() override { return &assembler_; }
+ const Arm64Assembler& GetAssembler() const override { return assembler_; }
vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
// Emit a write barrier.
@@ -462,12 +462,12 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Register allocation.
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
// The number of registers that can be allocated. The register allocator may
// decide to reserve and not use a few of them.
@@ -479,35 +479,35 @@ class CodeGeneratorARM64 : public CodeGenerator {
static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
static constexpr int kNumberOfAllocatableRegisterPairs = 0;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm64;
}
const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const;
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_.resize(GetGraph()->GetBlocks().size());
}
// We want to use the STP and LDP instructions to spill and restore registers for slow paths.
// These instructions can only encode offsets that are multiples of the register size accessed.
- uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
+ uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; }
JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
return jump_tables_.back().get();
}
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Code generation helpers.
void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
void Load(DataType::Type type,
vixl::aarch64::CPURegister dst,
@@ -529,7 +529,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -537,35 +537,35 @@ class CodeGeneratorARM64 : public CodeGenerator {
HInstruction* instruction,
SlowPathCode* slow_path);
- ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+ ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; }
- bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
return false;
}
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
- DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ DataType::Type type ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL);
}
@@ -652,13 +652,13 @@ class CodeGeneratorARM64 : public CodeGenerator {
void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
void EmitThunkCode(const linker::LinkerPatch& patch,
/*out*/ ArenaVector<uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE;
+ /*out*/ std::string* debug_name) override;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Generate a GC root reference load:
//
@@ -765,10 +765,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
- void GenerateNop() OVERRIDE;
+ void GenerateNop() override;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
private:
// Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 8c5eafd0bb..3580975c62 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -383,7 +383,7 @@ class NullCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
explicit NullCheckSlowPathARMVIXL(HNullCheck* instruction) : SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -397,9 +397,9 @@ class NullCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARMVIXL);
@@ -410,16 +410,16 @@ class DivZeroCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL);
@@ -430,7 +430,7 @@ class SuspendCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
SuspendCheckSlowPathARMVIXL(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeARMVIXL(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
arm_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
@@ -451,7 +451,7 @@ class SuspendCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathARMVIXL"; }
private:
// If not null, the block to branch to after the suspend check.
@@ -468,7 +468,7 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
@@ -495,9 +495,9 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL);
@@ -511,7 +511,7 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -549,7 +549,7 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathARMVIXL"; }
private:
// The class this slow path will load.
@@ -563,7 +563,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit LoadStringSlowPathARMVIXL(HLoadString* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
@@ -585,7 +585,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARMVIXL);
@@ -596,7 +596,7 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal)
: SlowPathCodeARMVIXL(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -640,9 +640,9 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathARMVIXL"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -655,7 +655,7 @@ class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction)
: SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -668,7 +668,7 @@ class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL);
@@ -678,7 +678,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -709,7 +709,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathARMVIXL"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL);
@@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
vixl32::Register reg_out = RegisterFrom(out_);
@@ -868,7 +868,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierForHeapReferenceSlowPathARMVIXL";
}
@@ -910,7 +910,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
vixl32::Register reg_out = RegisterFrom(out_);
DCHECK(locations->CanCall());
@@ -936,7 +936,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARMVIXL"; }
private:
const Location out_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index cb131a7ac1..33502d4f68 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -178,9 +178,9 @@ class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventi
InvokeDexCallingConventionVisitorARMVIXL() {}
virtual ~InvokeDexCallingConventionVisitorARMVIXL() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConventionARMVIXL calling_convention;
@@ -193,25 +193,25 @@ class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention
public:
FieldAccessCallingConventionARMVIXL() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return helpers::LocationFrom(vixl::aarch32::r1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return helpers::LocationFrom(vixl::aarch32::r0);
}
- Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
: helpers::LocationFrom(vixl::aarch32::r0);
}
- Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+ Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
return DataType::Is64BitType(type)
? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
: (is_instance
? helpers::LocationFrom(vixl::aarch32::r2)
: helpers::LocationFrom(vixl::aarch32::r1));
}
- Location GetFpuLocation(DataType::Type type) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
: helpers::LocationFrom(vixl::aarch32::s0);
@@ -229,8 +229,8 @@ class SlowPathCodeARMVIXL : public SlowPathCode {
vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
- void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
- void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
private:
vixl::aarch32::Label entry_label_;
@@ -244,10 +244,10 @@ class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap {
ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
ArmVIXLAssembler* GetAssembler() const;
@@ -266,7 +266,7 @@ class LocationsBuilderARMVIXL : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -274,7 +274,7 @@ class LocationsBuilderARMVIXL : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -304,7 +304,7 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -312,7 +312,7 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -432,48 +432,48 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARMVIXL() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
- void Bind(HBasicBlock* block) OVERRIDE;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
+ void Bind(HBasicBlock* block) override;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return static_cast<size_t>(kArmPointerSize);
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
+ size_t GetFloatingPointSpillSlotSize() const override { return vixl::aarch32::kRegSizeInBytes; }
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
- ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+ ArmVIXLAssembler* GetAssembler() override { return &assembler_; }
- const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+ const ArmVIXLAssembler& GetAssembler() const override { return assembler_; }
ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
vixl::aarch32::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
return block_entry_label->GetLocation();
}
void FixJumpTables();
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
- InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
+ ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
+ InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; }
const ArmInstructionSetFeatures& GetInstructionSetFeatures() const;
@@ -495,7 +495,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -519,42 +519,42 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label);
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_.resize(GetGraph()->GetBlocks().size());
}
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
- bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type) const override {
return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
}
- void ComputeSpillMask() OVERRIDE;
+ void ComputeSpillMask() override;
vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void MoveFromReturnRegister(Location trg, DataType::Type type) override;
// The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
// whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
@@ -604,13 +604,13 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
void EmitThunkCode(const linker::LinkerPatch& patch,
/*out*/ ArenaVector<uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE;
+ /*out*/ std::string* debug_name) override;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Generate a GC root reference load:
//
@@ -722,10 +722,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
- void GenerateNop() OVERRIDE;
+ void GenerateNop() override;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index aed334b024..d74a7a760f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -176,7 +176,7 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
@@ -201,9 +201,9 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
@@ -213,16 +213,16 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
@@ -236,7 +236,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -280,7 +280,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathMIPS"; }
private:
// The class this slow path will load.
@@ -294,7 +294,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
explicit LoadStringSlowPathMIPS(HLoadString* instruction)
: SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
@@ -318,7 +318,7 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
@@ -328,7 +328,7 @@ class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -342,9 +342,9 @@ class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
@@ -355,7 +355,7 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeMIPS(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
@@ -375,7 +375,7 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
return &return_label_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS"; }
HBasicBlock* GetSuccessor() const {
return successor_;
@@ -396,7 +396,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
: SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
@@ -435,9 +435,9 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathMIPS"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -450,7 +450,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
: SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -462,7 +462,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
@@ -472,7 +472,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -503,7 +503,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathMIPS"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
@@ -533,9 +533,9 @@ class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -627,11 +627,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -798,7 +798,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
@@ -922,7 +922,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -965,7 +965,7 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -995,7 +995,7 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS"; }
private:
const Location out_;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 4830ac9bc6..bf9589331b 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -81,9 +81,9 @@ class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionV
InvokeDexCallingConventionVisitorMIPS() {}
virtual ~InvokeDexCallingConventionVisitorMIPS() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -110,23 +110,23 @@ class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionMIPS() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(A1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(A0);
}
- Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? Location::RegisterPairLocation(V0, V1)
: Location::RegisterLocation(V0);
}
- Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+ Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
return DataType::Is64BitType(type)
? Location::RegisterPairLocation(A2, A3)
: (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(F0);
}
@@ -139,10 +139,10 @@ class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
void Exchange(int index1, int index2, bool double_slot);
void ExchangeQuadSlots(int index1, int index2);
@@ -176,14 +176,14 @@ class LocationsBuilderMIPS : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -210,14 +210,14 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -374,35 +374,35 @@ class CodeGeneratorMIPS : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorMIPS() {}
- void ComputeSpillMask() OVERRIDE;
- bool HasAllocatedCalleeSaveRegisters() const OVERRIDE;
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
+ void ComputeSpillMask() override;
+ bool HasAllocatedCalleeSaveRegisters() const override;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
- void Bind(HBasicBlock* block) OVERRIDE;
+ void Bind(HBasicBlock* block) override;
void MoveConstant(Location location, HConstant* c);
- size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+ size_t GetWordSize() const override { return kMipsWordSize; }
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kMipsDoublewordSize // 16 bytes for each spill.
: 1 * kMipsDoublewordSize; // 8 bytes for each spill.
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
- MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
- const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+ MipsAssembler* GetAssembler() override { return &assembler_; }
+ const MipsAssembler& GetAssembler() const override { return assembler_; }
// Emit linker patches.
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -493,20 +493,20 @@ class CodeGeneratorMIPS : public CodeGenerator {
// Register allocation.
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
void ClobberRA() {
clobbered_ra_ = true;
}
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+ InstructionSet GetInstructionSet() const override { return InstructionSet::kMips; }
const MipsInstructionSetFeatures& GetInstructionSetFeatures() const;
@@ -514,25 +514,25 @@ class CodeGeneratorMIPS : public CodeGenerator {
return CommonGetLabelOf<MipsLabel>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<MipsLabel>();
}
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Code generation helpers.
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) override;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -543,41 +543,41 @@ class CodeGeneratorMIPS : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct);
- ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+ ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
- bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type) const override {
return type == DataType::Type::kInt64;
}
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
- DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ DataType::Type type ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
// whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 72318e98b0..7c89808d54 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -128,7 +128,7 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
@@ -153,9 +153,9 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
@@ -166,16 +166,16 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction)
: SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
@@ -189,7 +189,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -233,7 +233,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathMIPS64"; }
private:
// The class this slow path will load.
@@ -247,7 +247,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
: SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
@@ -274,7 +274,7 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
@@ -284,7 +284,7 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -298,9 +298,9 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
@@ -311,7 +311,7 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCodeMIPS64(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
@@ -331,7 +331,7 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
return &return_label_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS64"; }
HBasicBlock* GetSuccessor() const {
return successor_;
@@ -352,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal)
: SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
uint32_t dex_pc = instruction_->GetDexPc();
@@ -392,9 +392,9 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathMIPS64"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -407,7 +407,7 @@ class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
: SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -419,7 +419,7 @@ class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
@@ -429,7 +429,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -460,7 +460,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathMIPS64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64);
@@ -490,9 +490,9 @@ class ReadBarrierMarkSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
DCHECK(locations->CanCall());
@@ -583,11 +583,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64";
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
DCHECK(locations->CanCall());
@@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
@@ -864,7 +864,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierForHeapReferenceSlowPathMIPS64";
}
@@ -909,7 +909,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
GpuRegister reg_out = out_.AsRegister<GpuRegister>();
@@ -938,7 +938,7 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS64"; }
private:
const Location out_;
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index fc0908b2cb..ddc154d40f 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -79,9 +79,9 @@ class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventio
InvokeDexCallingConventionVisitorMIPS64() {}
virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -108,22 +108,22 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionMIPS64() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(A1);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(A0);
}
- Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::RegisterLocation(V0);
}
Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
- bool is_instance) const OVERRIDE {
+ bool is_instance) const override {
return is_instance
? Location::RegisterLocation(A2)
: Location::RegisterLocation(A1);
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(F0);
}
@@ -136,10 +136,10 @@ class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
void Exchange(int index1, int index2, bool double_slot);
void ExchangeQuadSlots(int index1, int index2);
@@ -173,14 +173,14 @@ class LocationsBuilderMIPS64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -207,14 +207,14 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -356,31 +356,31 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorMIPS64() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
- void Bind(HBasicBlock* block) OVERRIDE;
+ void Bind(HBasicBlock* block) override;
- size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; }
+ size_t GetWordSize() const override { return kMips64DoublewordSize; }
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kMips64DoublewordSize // 16 bytes for each spill.
: 1 * kMips64DoublewordSize; // 8 bytes for each spill.
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return assembler_.GetLabelLocation(GetLabelOf(block));
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
- Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
- const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+ HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+ Mips64Assembler* GetAssembler() override { return &assembler_; }
+ const Mips64Assembler& GetAssembler() const override { return assembler_; }
// Emit linker patches.
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -471,17 +471,17 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
// Register allocation.
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
+ InstructionSet GetInstructionSet() const override { return InstructionSet::kMips64; }
const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const;
@@ -489,22 +489,22 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
return CommonGetLabelOf<Mips64Label>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<Mips64Label>();
}
// We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
// at aligned locations.
- uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; }
+ uint32_t GetPreferredSlotsAlignment() const override { return kMips64DoublewordSize; }
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Code generation helpers.
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) override;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
void SwapLocations(Location loc1, Location loc2, DataType::Type type);
@@ -513,7 +513,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -523,39 +523,39 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset);
- ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+ ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
- bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
+ bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; }
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
- DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ DataType::Type type ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
// whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index df00ec7d30..6a27081dab 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -72,7 +72,7 @@ class NullCheckSlowPathX86 : public SlowPathCode {
public:
explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -86,9 +86,9 @@ class NullCheckSlowPathX86 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
@@ -98,16 +98,16 @@ class DivZeroCheckSlowPathX86 : public SlowPathCode {
public:
explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
@@ -118,7 +118,7 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCode {
DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div)
: SlowPathCode(instruction), reg_(reg), is_div_(is_div) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
__ Bind(GetEntryLabel());
if (is_div_) {
__ negl(reg_);
@@ -128,7 +128,7 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86"; }
+ const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86"; }
private:
Register reg_;
@@ -140,7 +140,7 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
public:
explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -187,9 +187,9 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
@@ -200,7 +200,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCode(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -224,7 +224,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathX86"; }
private:
HBasicBlock* const successor_;
@@ -237,7 +237,7 @@ class LoadStringSlowPathX86 : public SlowPathCode {
public:
explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -256,7 +256,7 @@ class LoadStringSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
@@ -270,7 +270,7 @@ class LoadClassSlowPathX86 : public SlowPathCode {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -308,7 +308,7 @@ class LoadClassSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathX86"; }
private:
// The class this slow path will load.
@@ -322,7 +322,7 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal)
: SlowPathCode(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -375,8 +375,8 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathX86"; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -389,7 +389,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode {
explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
: SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -402,7 +402,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
@@ -412,7 +412,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
public:
explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -443,7 +443,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
@@ -471,9 +471,9 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -558,9 +558,9 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -724,7 +724,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
@@ -843,7 +843,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86"; }
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -883,7 +883,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -909,7 +909,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86"; }
private:
const Location out_;
@@ -8100,7 +8100,7 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
HX86ComputeBaseMethodAddress* base_method_address_;
private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ void Process(const MemoryRegion& region, int pos) override {
// Patch the correct offset for the instruction. The place to patch is the
// last 4 bytes of the instruction.
// The value to patch is the distance from the offset in the constant area
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index cb58e920ea..615477171b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -83,9 +83,9 @@ class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVi
InvokeDexCallingConventionVisitorX86() {}
virtual ~InvokeDexCallingConventionVisitorX86() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -97,18 +97,18 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionX86() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(ECX);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(EAX);
}
- Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type) const override {
return DataType::Is64BitType(type)
? Location::RegisterPairLocation(EAX, EDX)
: Location::RegisterLocation(EAX);
}
- Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+ Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
return DataType::Is64BitType(type)
? (is_instance
? Location::RegisterPairLocation(EDX, EBX)
@@ -117,7 +117,7 @@ class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
? Location::RegisterLocation(EDX)
: Location::RegisterLocation(ECX));
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(XMM0);
}
@@ -130,10 +130,10 @@ class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
X86Assembler* GetAssembler() const;
@@ -155,14 +155,14 @@ class LocationsBuilderX86 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -186,14 +186,14 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -320,23 +320,23 @@ class CodeGeneratorX86 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
- void Bind(HBasicBlock* block) OVERRIDE;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
+ void Bind(HBasicBlock* block) override;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -346,46 +346,46 @@ class CodeGeneratorX86 : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset);
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return kX86WordSize;
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 4 * kX86WordSize // 16 bytes == 4 words for each spill
: 2 * kX86WordSize; // 8 bytes == 2 words for each spill
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() override {
return &location_builder_;
}
- HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() override {
return &instruction_visitor_;
}
- X86Assembler* GetAssembler() OVERRIDE {
+ X86Assembler* GetAssembler() override {
return &assembler_;
}
- const X86Assembler& GetAssembler() const OVERRIDE {
+ const X86Assembler& GetAssembler() const override {
return assembler_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters() const OVERRIDE;
+ void SetupBlockedRegisters() const override;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
- ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
+ ParallelMoveResolverX86* GetMoveResolver() override {
return &move_resolver_;
}
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86;
}
@@ -399,25 +399,25 @@ class CodeGeneratorX86 : public CodeGenerator {
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
// Generate a call to a virtual method.
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t intrinsic_data);
@@ -442,16 +442,16 @@ class CodeGeneratorX86 : public CodeGenerator {
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void MoveFromReturnRegister(Location trg, DataType::Type type) override;
// Emit linker patches.
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
void PatchJitRootUse(uint8_t* code,
const uint8_t* roots_data,
const PatchInfo<Label>& info,
uint64_t index_in_table) const;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Emit a write barrier.
void MarkGCCard(Register temp,
@@ -466,15 +466,15 @@ class CodeGeneratorX86 : public CodeGenerator {
return CommonGetLabelOf<Label>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<Label>();
}
- bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type) const override {
return type == DataType::Type::kInt64;
}
- bool ShouldSplitLongMoves() const OVERRIDE { return true; }
+ bool ShouldSplitLongMoves() const override { return true; }
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
@@ -513,7 +513,7 @@ class CodeGeneratorX86 : public CodeGenerator {
Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void Finalize(CodeAllocator* allocator) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -609,9 +609,9 @@ class CodeGeneratorX86 : public CodeGenerator {
}
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// When we don't know the proper offset for the value, we use kDummy32BitOffset.
// The correct value will be inserted when processing Assembler fixups.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ae2a000d07..489652b85b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -71,7 +71,7 @@ class NullCheckSlowPathX86_64 : public SlowPathCode {
public:
explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
@@ -85,9 +85,9 @@ class NullCheckSlowPathX86_64 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "NullCheckSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
@@ -97,16 +97,16 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode {
public:
explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "DivZeroCheckSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
@@ -117,7 +117,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode {
DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, DataType::Type type, bool is_div)
: SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
__ Bind(GetEntryLabel());
if (type_ == DataType::Type::kInt32) {
if (is_div_) {
@@ -137,7 +137,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86_64"; }
+ const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86_64"; }
private:
const CpuRegister cpu_reg_;
@@ -151,7 +151,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCode(instruction), successor_(successor) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -175,7 +175,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
return successor_;
}
- const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "SuspendCheckSlowPathX86_64"; }
private:
HBasicBlock* const successor_;
@@ -189,7 +189,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction)
: SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -236,9 +236,9 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
- bool IsFatal() const OVERRIDE { return true; }
+ bool IsFatal() const override { return true; }
- const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "BoundsCheckSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
@@ -252,7 +252,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
const uint32_t dex_pc = instruction_->GetDexPc();
@@ -291,7 +291,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86_64"; }
+ const char* GetDescription() const override { return "LoadClassSlowPathX86_64"; }
private:
// The class this slow path will load.
@@ -304,7 +304,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
public:
explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -326,7 +326,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; }
+ const char* GetDescription() const override { return "LoadStringSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
@@ -337,7 +337,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
: SlowPathCode(instruction), is_fatal_(is_fatal) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
@@ -385,9 +385,9 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
}
}
- const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; }
+ const char* GetDescription() const override { return "TypeCheckSlowPathX86_64"; }
- bool IsFatal() const OVERRIDE { return is_fatal_; }
+ bool IsFatal() const override { return is_fatal_; }
private:
const bool is_fatal_;
@@ -400,7 +400,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode {
explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
: SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
LocationSummary* locations = instruction_->GetLocations();
@@ -413,7 +413,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode {
CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
- const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
+ const char* GetDescription() const override { return "DeoptimizationSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
@@ -423,7 +423,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode {
public:
explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -454,7 +454,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; }
+ const char* GetDescription() const override { return "ArraySetSlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
@@ -482,9 +482,9 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86_64"; }
+ const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
Register ref_reg = ref_cpu_reg.AsRegister();
@@ -573,11 +573,11 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierMarkAndUpdateFieldSlowPathX86_64";
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
Register ref_reg = ref_cpu_reg.AsRegister();
@@ -745,7 +745,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
CpuRegister reg_out = out_.AsRegister<CpuRegister>();
@@ -864,7 +864,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierForHeapReferenceSlowPathX86_64";
}
@@ -906,7 +906,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
DCHECK(kEmitCompilerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
@@ -931,7 +931,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; }
+ const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86_64"; }
private:
const Location out_;
@@ -7395,7 +7395,7 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
CodeGeneratorX86_64* codegen_;
private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ void Process(const MemoryRegion& region, int pos) override {
// Patch the correct offset for the instruction. We use the address of the
// 'next' instruction, which is 'pos' (patch the 4 bytes before).
int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 5ba7f9cb71..f77a5c84b4 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -83,22 +83,22 @@ class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionX86_64() {}
- Location GetObjectLocation() const OVERRIDE {
+ Location GetObjectLocation() const override {
return Location::RegisterLocation(RSI);
}
- Location GetFieldIndexLocation() const OVERRIDE {
+ Location GetFieldIndexLocation() const override {
return Location::RegisterLocation(RDI);
}
- Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::RegisterLocation(RAX);
}
Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance)
- const OVERRIDE {
+ const override {
return is_instance
? Location::RegisterLocation(RDX)
: Location::RegisterLocation(RSI);
}
- Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
return Location::FpuRegisterLocation(XMM0);
}
@@ -112,9 +112,9 @@ class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventio
InvokeDexCallingConventionVisitorX86_64() {}
virtual ~InvokeDexCallingConventionVisitorX86_64() {}
- Location GetNextLocation(DataType::Type type) OVERRIDE;
- Location GetReturnLocation(DataType::Type type) const OVERRIDE;
- Location GetMethodLocation() const OVERRIDE;
+ Location GetNextLocation(DataType::Type type) override;
+ Location GetReturnLocation(DataType::Type type) const override;
+ Location GetMethodLocation() const override;
private:
InvokeDexCallingConvention calling_convention;
@@ -129,10 +129,10 @@ class ParallelMoveResolverX86_64 : public ParallelMoveResolverWithSwap {
ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
: ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
- void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) override;
+ void EmitSwap(size_t index) override;
+ void SpillScratch(int reg) override;
+ void RestoreScratch(int reg) override;
X86_64Assembler* GetAssembler() const;
@@ -157,14 +157,14 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -188,14 +188,14 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE;
+ void Visit##name(H##name* instr) override;
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
<< " (id " << instruction->GetId() << ")";
}
@@ -300,23 +300,23 @@ class CodeGeneratorX86_64 : public CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86_64() {}
- void GenerateFrameEntry() OVERRIDE;
- void GenerateFrameExit() OVERRIDE;
- void Bind(HBasicBlock* block) OVERRIDE;
- void MoveConstant(Location destination, int32_t value) OVERRIDE;
- void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
- void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ void GenerateFrameEntry() override;
+ void GenerateFrameExit() override;
+ void Bind(HBasicBlock* block) override;
+ void MoveConstant(Location destination, int32_t value) override;
+ void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) override;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path = nullptr) OVERRIDE;
+ SlowPathCode* slow_path = nullptr) override;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -326,46 +326,46 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void GenerateInvokeRuntime(int32_t entry_point_offset);
- size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const override {
return kX86_64WordSize;
}
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+ size_t GetFloatingPointSpillSlotSize() const override {
return GetGraph()->HasSIMD()
? 2 * kX86_64WordSize // 16 bytes == 2 x86_64 words for each spill
: 1 * kX86_64WordSize; // 8 bytes == 1 x86_64 words for each spill
}
- HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() override {
return &location_builder_;
}
- HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() override {
return &instruction_visitor_;
}
- X86_64Assembler* GetAssembler() OVERRIDE {
+ X86_64Assembler* GetAssembler() override {
return &assembler_;
}
- const X86_64Assembler& GetAssembler() const OVERRIDE {
+ const X86_64Assembler& GetAssembler() const override {
return assembler_;
}
- ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
+ ParallelMoveResolverX86_64* GetMoveResolver() override {
return &move_resolver_;
}
- uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+ uintptr_t GetAddressOf(HBasicBlock* block) override {
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters() const OVERRIDE;
- void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
- void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void SetupBlockedRegisters() const override;
+ void DumpCoreRegister(std::ostream& stream, int reg) const override;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
+ void Finalize(CodeAllocator* allocator) override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86_64;
}
@@ -387,34 +387,34 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return CommonGetLabelOf<Label>(block_labels_, block);
}
- void Initialize() OVERRIDE {
+ void Initialize() override {
block_labels_ = CommonInitializeLabels<Label>();
}
- bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
return false;
}
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+ HLoadString::LoadKind desired_string_load_kind) override;
// Check if the desired_class_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+ HLoadClass::LoadKind desired_class_load_kind) override;
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) override;
void GenerateStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void GenerateVirtualCall(
- HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
@@ -434,14 +434,14 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference);
void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
- void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+ void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
void PatchJitRootUse(uint8_t* code,
const uint8_t* roots_data,
const PatchInfo<Label>& info,
uint64_t index_in_table) const;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+ void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
@@ -565,7 +565,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void MoveFromReturnRegister(Location trg, DataType::Type type) override;
// Assign a 64 bit constant to an address.
void MoveInt64ToAddress(const Address& addr_low,
@@ -585,9 +585,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
}
}
- void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateNop() override;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) override;
// When we don't know the proper offset for the value, we use kDummy32BitOffset.
// We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
index 5db0b6dcc5..8eb3a520c3 100644
--- a/compiler/optimizing/code_sinking.h
+++ b/compiler/optimizing/code_sinking.h
@@ -33,7 +33,7 @@ class CodeSinking : public HOptimization {
const char* name = kCodeSinkingPassName)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kCodeSinkingPassName = "code_sinking";
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 86687e60a9..f186191a0f 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -453,7 +453,7 @@ TEST_F(CodegenTest, NonMaterializedCondition) {
ASSERT_FALSE(equal->IsEmittedAtUseSite());
graph->BuildDominatorTree();
- PrepareForRegisterAllocation(graph).Run();
+ PrepareForRegisterAllocation(graph, *compiler_options_).Run();
ASSERT_TRUE(equal->IsEmittedAtUseSite());
auto hook_before_codegen = [](HGraph* graph_in) {
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 91811262de..0289e9c4a7 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -101,7 +101,7 @@ class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
AddAllocatedRegister(Location::RegisterLocation(arm::R7));
}
- void SetupBlockedRegisters() const OVERRIDE {
+ void SetupBlockedRegisters() const override {
arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
blocked_core_registers_[arm::R4] = true;
blocked_core_registers_[arm::R6] = false;
@@ -109,7 +109,7 @@ class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
}
void MaybeGenerateMarkingRegisterCheck(int code ATTRIBUTE_UNUSED,
- Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+ Location temp_loc ATTRIBUTE_UNUSED) override {
// When turned on, the marking register checks in
// CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck expects the
// Thread Register and the Marking Register to be set to
@@ -141,7 +141,7 @@ class TestCodeGeneratorARM64 : public arm64::CodeGeneratorARM64 {
: arm64::CodeGeneratorARM64(graph, compiler_options) {}
void MaybeGenerateMarkingRegisterCheck(int codem ATTRIBUTE_UNUSED,
- Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+ Location temp_loc ATTRIBUTE_UNUSED) override {
// When turned on, the marking register checks in
// CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck expect the
// Thread Register and the Marking Register to be set to
@@ -161,7 +161,7 @@ class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 {
AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
}
- void SetupBlockedRegisters() const OVERRIDE {
+ void SetupBlockedRegisters() const override {
x86::CodeGeneratorX86::SetupBlockedRegisters();
// ebx is a callee-save register in C, but caller-save for ART.
blocked_core_registers_[x86::EBX] = true;
@@ -183,7 +183,7 @@ class InternalCodeAllocator : public CodeAllocator {
}
size_t GetSize() const { return size_; }
- ArrayRef<const uint8_t> GetMemory() const OVERRIDE {
+ ArrayRef<const uint8_t> GetMemory() const override {
return ArrayRef<const uint8_t>(memory_.get(), size_);
}
@@ -288,7 +288,7 @@ static void RunCodeNoCheck(CodeGenerator* codegen,
{
ScopedArenaAllocator local_allocator(graph->GetArenaStack());
SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
- PrepareForRegisterAllocation(graph).Run();
+ PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions()).Run();
liveness.Analyze();
std::unique_ptr<RegisterAllocator> register_allocator =
RegisterAllocator::Create(&local_allocator, codegen, liveness);
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index bb78c2357e..09e7cabfa4 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -26,13 +26,13 @@ class HConstantFoldingVisitor : public HGraphDelegateVisitor {
: HGraphDelegateVisitor(graph) {}
private:
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+ void VisitBasicBlock(HBasicBlock* block) override;
- void VisitUnaryOperation(HUnaryOperation* inst) OVERRIDE;
- void VisitBinaryOperation(HBinaryOperation* inst) OVERRIDE;
+ void VisitUnaryOperation(HUnaryOperation* inst) override;
+ void VisitBinaryOperation(HBinaryOperation* inst) override;
- void VisitTypeConversion(HTypeConversion* inst) OVERRIDE;
- void VisitDivZeroCheck(HDivZeroCheck* inst) OVERRIDE;
+ void VisitTypeConversion(HTypeConversion* inst) override;
+ void VisitDivZeroCheck(HDivZeroCheck* inst) override;
DISALLOW_COPY_AND_ASSIGN(HConstantFoldingVisitor);
};
@@ -47,24 +47,24 @@ class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor {
private:
void VisitShift(HBinaryOperation* shift);
- void VisitEqual(HEqual* instruction) OVERRIDE;
- void VisitNotEqual(HNotEqual* instruction) OVERRIDE;
-
- void VisitAbove(HAbove* instruction) OVERRIDE;
- void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
- void VisitBelow(HBelow* instruction) OVERRIDE;
- void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE;
-
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitCompare(HCompare* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitRem(HRem* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitSub(HSub* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
+ void VisitEqual(HEqual* instruction) override;
+ void VisitNotEqual(HNotEqual* instruction) override;
+
+ void VisitAbove(HAbove* instruction) override;
+ void VisitAboveOrEqual(HAboveOrEqual* instruction) override;
+ void VisitBelow(HBelow* instruction) override;
+ void VisitBelowOrEqual(HBelowOrEqual* instruction) override;
+
+ void VisitAnd(HAnd* instruction) override;
+ void VisitCompare(HCompare* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitRem(HRem* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitSub(HSub* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
+ void VisitXor(HXor* instruction) override;
};
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index f4dbc805c4..72bd95b3cb 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -41,7 +41,7 @@ class HConstantFolding : public HOptimization {
public:
HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kConstantFoldingPassName = "constant_folding";
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 54bff22e98..3cb8bf2f47 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -34,7 +34,7 @@ class CFREVisitor : public HGraphVisitor {
candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
stats_(stats) {}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// Visit all instructions in block.
HGraphVisitor::VisitBasicBlock(block);
@@ -43,7 +43,7 @@ class CFREVisitor : public HGraphVisitor {
MergeCandidateFences();
}
- void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE {
+ void VisitConstructorFence(HConstructorFence* constructor_fence) override {
candidate_fences_.push_back(constructor_fence);
for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
@@ -51,29 +51,29 @@ class CFREVisitor : public HGraphVisitor {
}
}
- void VisitBoundType(HBoundType* bound_type) OVERRIDE {
+ void VisitBoundType(HBoundType* bound_type) override {
VisitAlias(bound_type);
}
- void VisitNullCheck(HNullCheck* null_check) OVERRIDE {
+ void VisitNullCheck(HNullCheck* null_check) override {
VisitAlias(null_check);
}
- void VisitSelect(HSelect* select) OVERRIDE {
+ void VisitSelect(HSelect* select) override {
VisitAlias(select);
}
- void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
HInstruction* value = instruction->InputAt(1);
VisitSetLocation(instruction, value);
}
- void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
HInstruction* value = instruction->InputAt(1);
VisitSetLocation(instruction, value);
}
- void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ void VisitArraySet(HArraySet* instruction) override {
HInstruction* value = instruction->InputAt(2);
VisitSetLocation(instruction, value);
}
@@ -83,46 +83,46 @@ class CFREVisitor : public HGraphVisitor {
MergeCandidateFences();
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ void VisitInvokeInterface(HInvokeInterface* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+ void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
HandleInvoke(invoke);
}
- void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+ void VisitClinitCheck(HClinitCheck* clinit) override {
HandleInvoke(clinit);
}
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h
index 367d9f21a0..014b342258 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.h
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h
@@ -52,7 +52,7 @@ class ConstructorFenceRedundancyElimination : public HOptimization {
const char* name = kCFREPassName)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination";
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 90caa53764..799721acf2 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -32,7 +32,7 @@ class HDeadCodeElimination : public HOptimization {
HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 293c1ab3f3..63a370a47b 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -27,7 +27,7 @@ namespace art {
class EmitSwapMipsTest : public OptimizingUnitTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
instruction_set_ = InstructionSet::kMips;
instruction_set_features_ = MipsInstructionSetFeatures::FromCppDefines();
OptimizingUnitTest::SetUp();
@@ -46,7 +46,7 @@ class EmitSwapMipsTest : public OptimizingUnitTest {
GetAssemblyHeader()));
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
test_helper_.reset();
codegen_.reset();
graph_ = nullptr;
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 3a2bb7a00c..d085609197 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -44,30 +44,30 @@ class GraphChecker : public HGraphDelegateVisitor {
// and return value pass along the observed graph sizes.
size_t Run(bool pass_change = true, size_t last_size = 0);
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
-
- void VisitInstruction(HInstruction* instruction) OVERRIDE;
- void VisitPhi(HPhi* phi) OVERRIDE;
-
- void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE;
- void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE;
- void VisitBoundType(HBoundType* instruction) OVERRIDE;
- void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
- void VisitCheckCast(HCheckCast* check) OVERRIDE;
- void VisitCondition(HCondition* op) OVERRIDE;
- void VisitConstant(HConstant* instruction) OVERRIDE;
- void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE;
- void VisitIf(HIf* instruction) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitLoadException(HLoadException* load) OVERRIDE;
- void VisitNeg(HNeg* instruction) OVERRIDE;
- void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
- void VisitReturn(HReturn* ret) OVERRIDE;
- void VisitReturnVoid(HReturnVoid* ret) OVERRIDE;
- void VisitSelect(HSelect* instruction) OVERRIDE;
- void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+ void VisitBasicBlock(HBasicBlock* block) override;
+
+ void VisitInstruction(HInstruction* instruction) override;
+ void VisitPhi(HPhi* phi) override;
+
+ void VisitBinaryOperation(HBinaryOperation* op) override;
+ void VisitBooleanNot(HBooleanNot* instruction) override;
+ void VisitBoundType(HBoundType* instruction) override;
+ void VisitBoundsCheck(HBoundsCheck* check) override;
+ void VisitCheckCast(HCheckCast* check) override;
+ void VisitCondition(HCondition* op) override;
+ void VisitConstant(HConstant* instruction) override;
+ void VisitDeoptimize(HDeoptimize* instruction) override;
+ void VisitIf(HIf* instruction) override;
+ void VisitInstanceOf(HInstanceOf* check) override;
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+ void VisitLoadException(HLoadException* load) override;
+ void VisitNeg(HNeg* instruction) override;
+ void VisitPackedSwitch(HPackedSwitch* instruction) override;
+ void VisitReturn(HReturn* ret) override;
+ void VisitReturnVoid(HReturnVoid* ret) override;
+ void VisitSelect(HSelect* instruction) override;
+ void VisitTryBoundary(HTryBoundary* try_boundary) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
void CheckTypeCheckBitstringInput(HTypeCheckInstruction* check,
size_t input_pos,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index d65ad40565..31db8c205f 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -333,7 +333,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
return output_;
}
- void VisitParallelMove(HParallelMove* instruction) OVERRIDE {
+ void VisitParallelMove(HParallelMove* instruction) override {
StartAttributeStream("liveness") << instruction->GetLifetimePosition();
StringList moves;
for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) {
@@ -346,36 +346,36 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("moves") << moves;
}
- void VisitIntConstant(HIntConstant* instruction) OVERRIDE {
+ void VisitIntConstant(HIntConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitLongConstant(HLongConstant* instruction) OVERRIDE {
+ void VisitLongConstant(HLongConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitFloatConstant(HFloatConstant* instruction) OVERRIDE {
+ void VisitFloatConstant(HFloatConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitDoubleConstant(HDoubleConstant* instruction) OVERRIDE {
+ void VisitDoubleConstant(HDoubleConstant* instruction) override {
StartAttributeStream() << instruction->GetValue();
}
- void VisitPhi(HPhi* phi) OVERRIDE {
+ void VisitPhi(HPhi* phi) override {
StartAttributeStream("reg") << phi->GetRegNumber();
StartAttributeStream("is_catch_phi") << std::boolalpha << phi->IsCatchPhi() << std::noboolalpha;
}
- void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE {
+ void VisitMemoryBarrier(HMemoryBarrier* barrier) override {
StartAttributeStream("kind") << barrier->GetBarrierKind();
}
- void VisitMonitorOperation(HMonitorOperation* monitor) OVERRIDE {
+ void VisitMonitorOperation(HMonitorOperation* monitor) override {
StartAttributeStream("kind") << (monitor->IsEnter() ? "enter" : "exit");
}
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ void VisitLoadClass(HLoadClass* load_class) override {
StartAttributeStream("load_kind") << load_class->GetLoadKind();
const char* descriptor = load_class->GetDexFile().GetTypeDescriptor(
load_class->GetDexFile().GetTypeId(load_class->GetTypeIndex()));
@@ -386,19 +386,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
<< load_class->NeedsAccessCheck() << std::noboolalpha;
}
- void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) OVERRIDE {
+ void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) override {
StartAttributeStream("load_kind") << "RuntimeCall";
StartAttributeStream("method_handle_index") << load_method_handle->GetMethodHandleIndex();
}
- void VisitLoadMethodType(HLoadMethodType* load_method_type) OVERRIDE {
+ void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
StartAttributeStream("load_kind") << "RuntimeCall";
const DexFile& dex_file = load_method_type->GetDexFile();
const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ void VisitLoadString(HLoadString* load_string) override {
StartAttributeStream("load_kind") << load_string->GetLoadKind();
}
@@ -413,15 +413,15 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
+ void VisitCheckCast(HCheckCast* check_cast) override {
HandleTypeCheckInstruction(check_cast);
}
- void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE {
+ void VisitInstanceOf(HInstanceOf* instance_of) override {
HandleTypeCheckInstruction(instance_of);
}
- void VisitArrayLength(HArrayLength* array_length) OVERRIDE {
+ void VisitArrayLength(HArrayLength* array_length) override {
StartAttributeStream("is_string_length") << std::boolalpha
<< array_length->IsStringLength() << std::noboolalpha;
if (array_length->IsEmittedAtUseSite()) {
@@ -429,31 +429,31 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+ void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
StartAttributeStream("is_string_char_at") << std::boolalpha
<< bounds_check->IsStringCharAt() << std::noboolalpha;
}
- void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+ void VisitArrayGet(HArrayGet* array_get) override {
StartAttributeStream("is_string_char_at") << std::boolalpha
<< array_get->IsStringCharAt() << std::noboolalpha;
}
- void VisitArraySet(HArraySet* array_set) OVERRIDE {
+ void VisitArraySet(HArraySet* array_set) override {
StartAttributeStream("value_can_be_null") << std::boolalpha
<< array_set->GetValueCanBeNull() << std::noboolalpha;
StartAttributeStream("needs_type_check") << std::boolalpha
<< array_set->NeedsTypeCheck() << std::noboolalpha;
}
- void VisitCompare(HCompare* compare) OVERRIDE {
+ void VisitCompare(HCompare* compare) override {
ComparisonBias bias = compare->GetBias();
StartAttributeStream("bias") << (bias == ComparisonBias::kGtBias
? "gt"
: (bias == ComparisonBias::kLtBias ? "lt" : "none"));
}
- void VisitInvoke(HInvoke* invoke) OVERRIDE {
+ void VisitInvoke(HInvoke* invoke) override {
StartAttributeStream("dex_file_index") << invoke->GetDexMethodIndex();
ArtMethod* method = invoke->GetResolvedMethod();
// We don't print signatures, which conflict with c1visualizer format.
@@ -470,12 +470,12 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
<< std::noboolalpha;
}
- void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+ void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("invoke_type") << invoke->GetInvokeType();
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("method_load_kind") << invoke->GetMethodLoadKind();
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
@@ -484,96 +484,96 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
- void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
VisitInvoke(invoke);
StartAttributeStream("invoke_type") << "InvokePolymorphic";
}
- void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE {
+ void VisitInstanceFieldGet(HInstanceFieldGet* iget) override {
StartAttributeStream("field_name") <<
iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << iget->GetFieldType();
}
- void VisitInstanceFieldSet(HInstanceFieldSet* iset) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* iset) override {
StartAttributeStream("field_name") <<
iset->GetFieldInfo().GetDexFile().PrettyField(iset->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << iset->GetFieldType();
}
- void VisitStaticFieldGet(HStaticFieldGet* sget) OVERRIDE {
+ void VisitStaticFieldGet(HStaticFieldGet* sget) override {
StartAttributeStream("field_name") <<
sget->GetFieldInfo().GetDexFile().PrettyField(sget->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << sget->GetFieldType();
}
- void VisitStaticFieldSet(HStaticFieldSet* sset) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* sset) override {
StartAttributeStream("field_name") <<
sset->GetFieldInfo().GetDexFile().PrettyField(sset->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << sset->GetFieldType();
}
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) override {
StartAttributeStream("field_type") << field_access->GetFieldType();
}
- void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
+ void VisitTryBoundary(HTryBoundary* try_boundary) override {
StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
}
- void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE {
+ void VisitDeoptimize(HDeoptimize* deoptimize) override {
StartAttributeStream("kind") << deoptimize->GetKind();
}
- void VisitVecOperation(HVecOperation* vec_operation) OVERRIDE {
+ void VisitVecOperation(HVecOperation* vec_operation) override {
StartAttributeStream("packed_type") << vec_operation->GetPackedType();
}
- void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) OVERRIDE {
+ void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) override {
StartAttributeStream("alignment") << vec_mem_operation->GetAlignment().ToString();
}
- void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
+ void VisitVecHalvingAdd(HVecHalvingAdd* hadd) override {
VisitVecBinaryOperation(hadd);
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
- void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
+ void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) override {
VisitVecOperation(instruction);
StartAttributeStream("kind") << instruction->GetOpKind();
}
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
- void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE {
+ void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
StartAttributeStream("kind") << instruction->GetOpKind();
}
- void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) OVERRIDE {
+ void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) override {
StartAttributeStream("kind") << instruction->GetOpKind();
}
- void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) OVERRIDE {
+ void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) override {
StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind();
if (HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) {
StartAttributeStream("shift") << instruction->GetShiftAmount();
@@ -814,7 +814,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
Flush();
}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
StartTag("block");
PrintProperty("name", "B", block->GetBlockId());
if (block->GetLifetimeStart() != kNoLifetime) {
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 75cfff2140..bbf2265e98 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -31,7 +31,7 @@ class GVNOptimization : public HOptimization {
const char* pass_name = kGlobalValueNumberingPassName)
: HOptimization(graph, pass_name), side_effects_(side_effects) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kGlobalValueNumberingPassName = "GVN";
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 89fed2ec64..a48aa90059 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -37,7 +37,7 @@ class HInductionVarAnalysis : public HOptimization {
public:
explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kInductionPassName = "induction_var_analysis";
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 2fdf6a1306..6fd0c204b2 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -60,7 +60,7 @@ class HInliner : public HOptimization {
handles_(handles),
inline_stats_(nullptr) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kInlinerPassName = "inliner";
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f493b66cfd..2757f7b719 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -66,44 +66,44 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
bool TryCombineVecMultiplyAccumulate(HVecMul* mul);
void VisitShift(HBinaryOperation* shift);
- void VisitEqual(HEqual* equal) OVERRIDE;
- void VisitNotEqual(HNotEqual* equal) OVERRIDE;
- void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE;
- void VisitInstanceFieldSet(HInstanceFieldSet* equal) OVERRIDE;
- void VisitStaticFieldSet(HStaticFieldSet* equal) OVERRIDE;
- void VisitArraySet(HArraySet* equal) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
- void VisitArrayLength(HArrayLength* instruction) OVERRIDE;
- void VisitCheckCast(HCheckCast* instruction) OVERRIDE;
- void VisitAbs(HAbs* instruction) OVERRIDE;
- void VisitAdd(HAdd* instruction) OVERRIDE;
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitCondition(HCondition* instruction) OVERRIDE;
- void VisitGreaterThan(HGreaterThan* condition) OVERRIDE;
- void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) OVERRIDE;
- void VisitLessThan(HLessThan* condition) OVERRIDE;
- void VisitLessThanOrEqual(HLessThanOrEqual* condition) OVERRIDE;
- void VisitBelow(HBelow* condition) OVERRIDE;
- void VisitBelowOrEqual(HBelowOrEqual* condition) OVERRIDE;
- void VisitAbove(HAbove* condition) OVERRIDE;
- void VisitAboveOrEqual(HAboveOrEqual* condition) OVERRIDE;
- void VisitDiv(HDiv* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitNeg(HNeg* instruction) OVERRIDE;
- void VisitNot(HNot* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitSub(HSub* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
- void VisitSelect(HSelect* select) OVERRIDE;
- void VisitIf(HIf* instruction) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
- void VisitInvoke(HInvoke* invoke) OVERRIDE;
- void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
- void VisitVecMul(HVecMul* instruction) OVERRIDE;
+ void VisitEqual(HEqual* equal) override;
+ void VisitNotEqual(HNotEqual* equal) override;
+ void VisitBooleanNot(HBooleanNot* bool_not) override;
+ void VisitInstanceFieldSet(HInstanceFieldSet* equal) override;
+ void VisitStaticFieldSet(HStaticFieldSet* equal) override;
+ void VisitArraySet(HArraySet* equal) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
+ void VisitNullCheck(HNullCheck* instruction) override;
+ void VisitArrayLength(HArrayLength* instruction) override;
+ void VisitCheckCast(HCheckCast* instruction) override;
+ void VisitAbs(HAbs* instruction) override;
+ void VisitAdd(HAdd* instruction) override;
+ void VisitAnd(HAnd* instruction) override;
+ void VisitCondition(HCondition* instruction) override;
+ void VisitGreaterThan(HGreaterThan* condition) override;
+ void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) override;
+ void VisitLessThan(HLessThan* condition) override;
+ void VisitLessThanOrEqual(HLessThanOrEqual* condition) override;
+ void VisitBelow(HBelow* condition) override;
+ void VisitBelowOrEqual(HBelowOrEqual* condition) override;
+ void VisitAbove(HAbove* condition) override;
+ void VisitAboveOrEqual(HAboveOrEqual* condition) override;
+ void VisitDiv(HDiv* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitNeg(HNeg* instruction) override;
+ void VisitNot(HNot* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitSub(HSub* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
+ void VisitXor(HXor* instruction) override;
+ void VisitSelect(HSelect* select) override;
+ void VisitIf(HIf* instruction) override;
+ void VisitInstanceOf(HInstanceOf* instruction) override;
+ void VisitInvoke(HInvoke* invoke) override;
+ void VisitDeoptimize(HDeoptimize* deoptimize) override;
+ void VisitVecMul(HVecMul* instruction) override;
bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 2d134e0067..982a24a6f0 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -46,7 +46,7 @@ class InstructionSimplifier : public HOptimization {
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 37fcdb9d5c..24fbb6cb4c 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -56,7 +56,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
* (2) Since statements can be removed in a "forward" fashion,
* the visitor should test if each statement is still there.
*/
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// TODO: fragile iteration, provide more robust iterators?
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
@@ -66,15 +66,15 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
}
}
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
+ void VisitAnd(HAnd* instruction) override;
+ void VisitArrayGet(HArrayGet* instruction) override;
+ void VisitArraySet(HArraySet* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
OptimizingCompilerStats* stats_;
};
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index f1a16efc61..fca9341d59 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -30,7 +30,7 @@ class InstructionSimplifierArm : public HOptimization {
static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
- bool Run() OVERRIDE;
+ bool Run() override;
};
} // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index e0a627994d..b536cb4dc4 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -58,7 +58,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
* (2) Since statements can be removed in a "forward" fashion,
* the visitor should test if each statement is still there.
*/
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// TODO: fragile iteration, provide more robust iterators?
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
@@ -69,18 +69,18 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
}
// HInstruction visitors, sorted alphabetically.
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
- void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
- void VisitVecStore(HVecStore* instruction) OVERRIDE;
+ void VisitAnd(HAnd* instruction) override;
+ void VisitArrayGet(HArrayGet* instruction) override;
+ void VisitArraySet(HArraySet* instruction) override;
+ void VisitMul(HMul* instruction) override;
+ void VisitOr(HOr* instruction) override;
+ void VisitShl(HShl* instruction) override;
+ void VisitShr(HShr* instruction) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
+ void VisitUShr(HUShr* instruction) override;
+ void VisitXor(HXor* instruction) override;
+ void VisitVecLoad(HVecLoad* instruction) override;
+ void VisitVecStore(HVecStore* instruction) override;
OptimizingCompilerStats* stats_;
};
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 8659c1f5f4..8d93c01ebf 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -30,7 +30,7 @@ class InstructionSimplifierArm64 : public HOptimization {
static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
- bool Run() OVERRIDE;
+ bool Run() override;
};
} // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 3bdf90f652..5d0c63b76b 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -39,8 +39,8 @@ class InstructionSimplifierMipsVisitor : public HGraphVisitor {
bool TryExtractArrayAccessIndex(HInstruction* access,
HInstruction* index,
DataType::Type packed_type);
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
+ void VisitArrayGet(HArrayGet* instruction) override;
+ void VisitArraySet(HArraySet* instruction) override;
OptimizingCompilerStats* stats_;
CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 94ef73d425..b431334811 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -35,7 +35,7 @@ class InstructionSimplifierMips : public HOptimization {
static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 993648f765..06e2fbb355 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -42,7 +42,7 @@ class IntrinsicsRecognizer : public HOptimization {
const char* name = kIntrinsicsRecognizerPassName)
: HOptimization(graph, name, stats) {}
- bool Run() OVERRIDE;
+ bool Run() override;
// Static helper that recognizes intrinsic call. Returns true on success.
// If it fails due to invoke type mismatch, wrong_invoke_type is set.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a657b5818f..1abfcb022b 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -112,7 +112,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
explicit IntrinsicSlowPathARM64(HInvoke* invoke)
: SlowPathCodeARM64(invoke), invoke_(invoke) { }
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
__ Bind(GetEntryLabel());
@@ -145,7 +145,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathARM64"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPathARM64"; }
private:
// The instruction where this slow path is happening.
@@ -163,7 +163,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -216,7 +216,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
+ const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
private:
Location tmp_;
@@ -1006,9 +1006,9 @@ class BakerReadBarrierCasSlowPathARM64 : public SlowPathCodeARM64 {
explicit BakerReadBarrierCasSlowPathARM64(HInvoke* invoke)
: SlowPathCodeARM64(invoke) {}
- const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARM64"; }
+ const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARM64"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
Arm64Assembler* assembler = arm64_codegen->GetAssembler();
MacroAssembler* masm = assembler->GetVIXLAssembler();
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 033a644f34..9c46efddec 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -37,7 +37,7 @@ namespace arm64 {
class CodeGeneratorARM64;
-class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARM64 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
: allocator_(allocator), codegen_(codegen) {}
@@ -45,7 +45,7 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -63,14 +63,14 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
};
-class IntrinsicCodeGeneratorARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARM64 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorARM64(CodeGeneratorARM64* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 74a779d9e2..1127fb8191 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -85,7 +85,7 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL {
return calling_convention_visitor.GetMethodLocation();
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
ArmVIXLAssembler* assembler = down_cast<ArmVIXLAssembler*>(codegen->GetAssembler());
__ Bind(GetEntryLabel());
@@ -111,7 +111,7 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPath"; }
private:
// The instruction where this slow path is happening.
@@ -173,7 +173,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
LocationSummary* locations = instruction_->GetLocations();
@@ -233,7 +233,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE {
+ const char* GetDescription() const override {
return "ReadBarrierSystemArrayCopySlowPathARMVIXL";
}
@@ -969,9 +969,9 @@ class BakerReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL {
explicit BakerReadBarrierCasSlowPathARMVIXL(HInvoke* invoke)
: SlowPathCodeARMVIXL(invoke) {}
- const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARMVIXL"; }
+ const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARMVIXL"; }
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
__ Bind(GetEntryLabel());
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 9c02d0a4ad..1fea776f0d 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -27,14 +27,14 @@ namespace arm {
class ArmVIXLAssembler;
class CodeGeneratorARMVIXL;
-class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARMVIXL final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -54,14 +54,14 @@ class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
};
-class IntrinsicCodeGeneratorARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARMVIXL final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorARMVIXL(CodeGeneratorARMVIXL* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 01d9f962f2..771714bf41 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -108,7 +108,7 @@ class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
public:
explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { }
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in);
__ Bind(GetEntryLabel());
@@ -137,7 +137,7 @@ class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
__ B(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPathMIPS"; }
private:
// The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 1c1ba40132..08d4e82139 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -30,14 +30,14 @@ namespace mips {
class CodeGeneratorMIPS;
class MipsAssembler;
-class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
};
-class IntrinsicCodeGeneratorMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorMIPS(CodeGeneratorMIPS* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 0bd69c6ec8..4a1bd5b7b2 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -97,7 +97,7 @@ class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
explicit IntrinsicSlowPathMIPS64(HInvoke* invoke)
: SlowPathCodeMIPS64(invoke), invoke_(invoke) { }
- void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
__ Bind(GetEntryLabel());
@@ -126,7 +126,7 @@ class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
__ Bc(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPathMIPS64"; }
private:
// The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 748b0b02b2..ca8bc8f55a 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -30,14 +30,14 @@ namespace mips64 {
class CodeGeneratorMIPS64;
class Mips64Assembler;
-class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS64 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
};
-class IntrinsicCodeGeneratorMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS64 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorMIPS64(CodeGeneratorMIPS64* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index 8c69d9b643..41947f1ccd 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -47,7 +47,7 @@ class IntrinsicSlowPath : public SlowPathCode {
return calling_convention_visitor.GetMethodLocation();
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
Assembler* assembler = codegen->GetAssembler();
assembler->Bind(GetEntryLabel());
@@ -73,7 +73,7 @@ class IntrinsicSlowPath : public SlowPathCode {
assembler->Jump(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+ const char* GetDescription() const override { return "IntrinsicSlowPath"; }
private:
// The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5c7be54037..d33c0c344e 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -82,7 +82,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -160,7 +160,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86"; }
+ const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86"; }
private:
DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86);
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index e3555e78fc..ae150dad43 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -30,14 +30,14 @@ namespace x86 {
class CodeGeneratorX86;
class X86Assembler;
-class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
};
-class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorX86(CodeGeneratorX86* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index b5afe931ff..ae889744ad 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -80,7 +80,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
DCHECK(kUseBakerReadBarrier);
}
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) override {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -118,7 +118,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode {
__ jmp(GetExitLabel());
}
- const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
+ const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
private:
DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64);
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 5cb601edfe..199cfede1a 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -30,14 +30,14 @@ namespace x86_64 {
class CodeGeneratorX86_64;
class X86_64Assembler;
-class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86_64 final : public IntrinsicVisitor {
public:
explicit IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen);
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
@@ -55,14 +55,14 @@ class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor {
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
};
-class IntrinsicCodeGeneratorX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86_64 final : public IntrinsicVisitor {
public:
explicit IntrinsicCodeGeneratorX86_64(CodeGeneratorX86_64* codegen) : codegen_(codegen) {}
// Define visitor methods.
#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- void Visit ## Name(HInvoke* invoke) OVERRIDE;
+ void Visit ## Name(HInvoke* invoke) override;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index f72d195ab2..9cafddb05a 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -33,7 +33,7 @@ class LICM : public HOptimization {
: HOptimization(graph, name, stats),
side_effects_(side_effects) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoopInvariantCodeMotionPassName = "licm";
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 0fb90fb370..60f513ca48 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -38,7 +38,7 @@ HGraph* LiveRangesTest::BuildGraph(const std::vector<uint16_t>& data) {
// on how instructions are ordered.
RemoveSuspendChecks(graph);
// `Inline` conditions into ifs.
- PrepareForRegisterAllocation(graph).Run();
+ PrepareForRegisterAllocation(graph, *compiler_options_).Run();
return graph;
}
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 72f995e773..f11f7a9779 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -47,7 +47,7 @@ static void DumpBitVector(BitVector* vector,
void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
- PrepareForRegisterAllocation(graph).Run();
+ PrepareForRegisterAllocation(graph, *compiler_options_).Run();
std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 769a3f1b59..08d9309a3e 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -492,12 +492,12 @@ class HeapLocationCollector : public HGraphVisitor {
HeapLocation::kDeclaringClassDefIndexForArrays);
}
- void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+ void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
has_heap_stores_ = true;
if (location->GetReferenceInfo()->IsSingleton()) {
@@ -523,12 +523,12 @@ class HeapLocationCollector : public HGraphVisitor {
}
}
- void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+ void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
has_heap_stores_ = true;
}
@@ -536,7 +536,7 @@ class HeapLocationCollector : public HGraphVisitor {
// We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
// since we cannot accurately track the fields.
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+ void VisitArrayGet(HArrayGet* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetType();
@@ -544,7 +544,7 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ void VisitArraySet(HArraySet* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetComponentType();
@@ -552,7 +552,7 @@ class HeapLocationCollector : public HGraphVisitor {
has_heap_stores_ = true;
}
- void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
+ void VisitVecLoad(HVecLoad* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetPackedType();
@@ -560,7 +560,7 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitVecStore(HVecStore* instruction) OVERRIDE {
+ void VisitVecStore(HVecStore* instruction) override {
HInstruction* array = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
DataType::Type type = instruction->GetPackedType();
@@ -568,7 +568,7 @@ class HeapLocationCollector : public HGraphVisitor {
has_heap_stores_ = true;
}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
// Any new-instance or new-array cannot alias with references that
// pre-exist the new-instance/new-array. We append entries into
// ref_info_array_ which keeps track of the order of creation
@@ -580,7 +580,7 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+ void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) override {
has_monitor_operations_ = true;
}
@@ -605,7 +605,7 @@ class LoadStoreAnalysis : public HOptimization {
return heap_location_collector_;
}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis";
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 28ac94273c..7f71745a43 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -107,7 +107,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) {
}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
// Populate the heap_values array for this block.
// TODO: try to reuse the heap_values array from one predecessor if possible.
if (block->IsLoopHeader()) {
@@ -656,13 +656,13 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+ void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
HInstruction* object = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(object, &field));
}
- void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
HInstruction* object = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
HInstruction* value = instruction->InputAt(1);
@@ -670,24 +670,24 @@ class LSEVisitor : public HGraphDelegateVisitor {
VisitSetLocation(instruction, idx, value);
}
- void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+ void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
HInstruction* cls = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(cls, &field));
}
- void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
HInstruction* cls = instruction->InputAt(0);
const FieldInfo& field = instruction->GetFieldInfo();
size_t idx = heap_location_collector_.GetFieldHeapLocation(cls, &field);
VisitSetLocation(instruction, idx, instruction->InputAt(1));
}
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+ void VisitArrayGet(HArrayGet* instruction) override {
VisitGetLocation(instruction, heap_location_collector_.GetArrayHeapLocation(instruction));
}
- void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ void VisitArraySet(HArraySet* instruction) override {
size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction);
VisitSetLocation(instruction, idx, instruction->InputAt(2));
}
@@ -743,15 +743,15 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitReturn(HReturn* instruction) OVERRIDE {
+ void VisitReturn(HReturn* instruction) override {
HandleExit(instruction->GetBlock());
}
- void VisitReturnVoid(HReturnVoid* return_void) OVERRIDE {
+ void VisitReturnVoid(HReturnVoid* return_void) override {
HandleExit(return_void->GetBlock());
}
- void VisitThrow(HThrow* throw_instruction) OVERRIDE {
+ void VisitThrow(HThrow* throw_instruction) override {
HandleExit(throw_instruction->GetBlock());
}
@@ -777,35 +777,35 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitInvoke(HInvoke* invoke) OVERRIDE {
+ void VisitInvoke(HInvoke* invoke) override {
HandleInvoke(invoke);
}
- void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+ void VisitClinitCheck(HClinitCheck* clinit) override {
HandleInvoke(clinit);
}
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
// Conservatively treat it as an invocation.
HandleInvoke(instruction);
}
- void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+ void VisitNewInstance(HNewInstance* new_instance) override {
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance);
if (ref_info == nullptr) {
// new_instance isn't used for field accesses. No need to process it.
@@ -829,7 +829,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
- void VisitNewArray(HNewArray* new_array) OVERRIDE {
+ void VisitNewArray(HNewArray* new_array) override {
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_array);
if (ref_info == nullptr) {
// new_array isn't used for array accesses. No need to process it.
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 408386bd82..f7ba41a1af 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -35,7 +35,7 @@ class LoadStoreElimination : public HOptimization {
side_effects_(side_effects),
lsa_(lsa) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index d355cedb35..2ae3683ffa 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -87,14 +87,14 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
// Maximum number of instructions to be created as a result of full unrolling.
static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35;
- bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const override {
return analysis_info->HasLongTypeInstructions() ||
IsLoopTooBig(analysis_info,
kScalarHeuristicMaxBodySizeInstr,
kScalarHeuristicMaxBodySizeBlocks);
}
- uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const override {
int64_t trip_count = analysis_info->GetTripCount();
// Unroll only loops with known trip count.
if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
@@ -108,9 +108,9 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
return desired_unrolling_factor;
}
- bool IsLoopPeelingEnabled() const OVERRIDE { return true; }
+ bool IsLoopPeelingEnabled() const override { return true; }
- bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const override {
int64_t trip_count = analysis_info->GetTripCount();
// We assume that trip count is known.
DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount);
@@ -144,7 +144,7 @@ class Arm64LoopHelper : public ArchDefaultLoopHelper {
// Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8;
- bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE {
+ bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const override {
return IsLoopTooBig(loop_analysis_info,
kArm64ScalarHeuristicMaxBodySizeInstr,
kArm64ScalarHeuristicMaxBodySizeBlocks);
@@ -153,7 +153,7 @@ class Arm64LoopHelper : public ArchDefaultLoopHelper {
uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
int64_t trip_count,
uint32_t max_peel,
- uint32_t vector_length) const OVERRIDE {
+ uint32_t vector_length) const override {
// Don't unroll with insufficient iterations.
// TODO: Unroll loops with unknown trip count.
DCHECK_NE(vector_length, 0u);
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 644b740ed4..2b202fda75 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -43,7 +43,7 @@ class HLoopOptimization : public HOptimization {
OptimizingCompilerStats* stats,
const char* name = kLoopOptimizationPassName);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kLoopOptimizationPassName = "loop_optimization";
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8b9e1da0d3..5feffa0511 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1529,12 +1529,12 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
private: \
H##type& operator=(const H##type&) = delete; \
public: \
- const char* DebugName() const OVERRIDE { return #type; } \
- HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE { \
+ const char* DebugName() const override { return #type; } \
+ HInstruction* Clone(ArenaAllocator* arena) const override { \
DCHECK(IsClonable()); \
return new (arena) H##type(*this->As##type()); \
} \
- void Accept(HGraphVisitor* visitor) OVERRIDE
+ void Accept(HGraphVisitor* visitor) override
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
private: \
@@ -2079,6 +2079,19 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return false;
}
+ // If this instruction will do an implicit null check, return the `HNullCheck` associated
+ // with it. Otherwise return null.
+ HNullCheck* GetImplicitNullCheck() const {
+ // Find the first previous instruction which is not a move.
+ HInstruction* first_prev_not_move = GetPreviousDisregardingMoves();
+ if (first_prev_not_move != nullptr &&
+ first_prev_not_move->IsNullCheck() &&
+ first_prev_not_move->IsEmittedAtUseSite()) {
+ return first_prev_not_move->AsNullCheck();
+ }
+ return nullptr;
+ }
+
virtual bool IsActualObject() const {
return GetType() == DataType::Type::kReference;
}
@@ -2582,7 +2595,7 @@ class HBackwardInstructionIterator : public ValueObject {
class HVariableInputSizeInstruction : public HInstruction {
public:
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
}
@@ -2632,7 +2645,7 @@ class HExpression : public HInstruction {
virtual ~HExpression() {}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
}
@@ -2654,7 +2667,7 @@ class HExpression<0> : public HInstruction {
virtual ~HExpression() {}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
return ArrayRef<HUserRecord<HInstruction*>>();
}
@@ -2667,13 +2680,13 @@ class HExpression<0> : public HInstruction {
// Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
// instruction that branches to the exit block.
-class HReturnVoid FINAL : public HExpression<0> {
+class HReturnVoid final : public HExpression<0> {
public:
explicit HReturnVoid(uint32_t dex_pc = kNoDexPc)
: HExpression(kReturnVoid, SideEffects::None(), dex_pc) {
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
DECLARE_INSTRUCTION(ReturnVoid);
@@ -2683,14 +2696,14 @@ class HReturnVoid FINAL : public HExpression<0> {
// Represents dex's RETURN opcodes. A HReturn is a control flow
// instruction that branches to the exit block.
-class HReturn FINAL : public HExpression<1> {
+class HReturn final : public HExpression<1> {
public:
explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc)
: HExpression(kReturn, SideEffects::None(), dex_pc) {
SetRawInputAt(0, value);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
DECLARE_INSTRUCTION(Return);
@@ -2698,7 +2711,7 @@ class HReturn FINAL : public HExpression<1> {
DEFAULT_COPY_CONSTRUCTOR(Return);
};
-class HPhi FINAL : public HVariableInputSizeInstruction {
+class HPhi final : public HVariableInputSizeInstruction {
public:
HPhi(ArenaAllocator* allocator,
uint32_t reg_number,
@@ -2722,7 +2735,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
SetPackedFlag<kFlagCanBeNull>(true);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
// Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
static DataType::Type ToPhiType(DataType::Type type) {
@@ -2742,7 +2755,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
SetPackedField<TypeField>(new_type);
}
- bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+ bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
uint32_t GetRegNumber() const { return reg_number_; }
@@ -2800,13 +2813,13 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
// The exit instruction is the only instruction of the exit block.
// Instructions aborting the method (HThrow and HReturn) must branch to the
// exit block.
-class HExit FINAL : public HExpression<0> {
+class HExit final : public HExpression<0> {
public:
explicit HExit(uint32_t dex_pc = kNoDexPc)
: HExpression(kExit, SideEffects::None(), dex_pc) {
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
DECLARE_INSTRUCTION(Exit);
@@ -2815,14 +2828,14 @@ class HExit FINAL : public HExpression<0> {
};
// Jumps from one block to another.
-class HGoto FINAL : public HExpression<0> {
+class HGoto final : public HExpression<0> {
public:
explicit HGoto(uint32_t dex_pc = kNoDexPc)
: HExpression(kGoto, SideEffects::None(), dex_pc) {
}
- bool IsClonable() const OVERRIDE { return true; }
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool IsControlFlow() const override { return true; }
HBasicBlock* GetSuccessor() const {
return GetBlock()->GetSingleSuccessor();
@@ -2840,7 +2853,7 @@ class HConstant : public HExpression<0> {
: HExpression(kind, type, SideEffects::None(), dex_pc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
// Is this constant -1 in the arithmetic sense?
virtual bool IsMinusOne() const { return false; }
@@ -2859,15 +2872,15 @@ class HConstant : public HExpression<0> {
DEFAULT_COPY_CONSTRUCTOR(Constant);
};
-class HNullConstant FINAL : public HConstant {
+class HNullConstant final : public HConstant {
public:
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- uint64_t GetValueAsUint64() const OVERRIDE { return 0; }
+ uint64_t GetValueAsUint64() const override { return 0; }
- size_t ComputeHashCode() const OVERRIDE { return 0; }
+ size_t ComputeHashCode() const override { return 0; }
// The null constant representation is a 0-bit pattern.
virtual bool IsZeroBitPattern() const { return true; }
@@ -2887,25 +2900,25 @@ class HNullConstant FINAL : public HConstant {
// Constants of the type int. Those can be from Dex instructions, or
// synthesized (for example with the if-eqz instruction).
-class HIntConstant FINAL : public HConstant {
+class HIntConstant final : public HConstant {
public:
int32_t GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE {
+ uint64_t GetValueAsUint64() const override {
return static_cast<uint64_t>(static_cast<uint32_t>(value_));
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsIntConstant()) << other->DebugName();
return other->AsIntConstant()->value_ == value_;
}
- size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
+ size_t ComputeHashCode() const override { return GetValue(); }
- bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
- bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
- bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
- bool IsOne() const OVERRIDE { return GetValue() == 1; }
+ bool IsMinusOne() const override { return GetValue() == -1; }
+ bool IsArithmeticZero() const override { return GetValue() == 0; }
+ bool IsZeroBitPattern() const override { return GetValue() == 0; }
+ bool IsOne() const override { return GetValue() == 1; }
// Integer constants are used to encode Boolean values as well,
// where 1 means true and 0 means false.
@@ -2933,23 +2946,23 @@ class HIntConstant FINAL : public HConstant {
ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
};
-class HLongConstant FINAL : public HConstant {
+class HLongConstant final : public HConstant {
public:
int64_t GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE { return value_; }
+ uint64_t GetValueAsUint64() const override { return value_; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsLongConstant()) << other->DebugName();
return other->AsLongConstant()->value_ == value_;
}
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
- bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
- bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
- bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
- bool IsOne() const OVERRIDE { return GetValue() == 1; }
+ bool IsMinusOne() const override { return GetValue() == -1; }
+ bool IsArithmeticZero() const override { return GetValue() == 0; }
+ bool IsZeroBitPattern() const override { return GetValue() == 0; }
+ bool IsOne() const override { return GetValue() == 1; }
DECLARE_INSTRUCTION(LongConstant);
@@ -2967,25 +2980,25 @@ class HLongConstant FINAL : public HConstant {
friend class HGraph;
};
-class HFloatConstant FINAL : public HConstant {
+class HFloatConstant final : public HConstant {
public:
float GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE {
+ uint64_t GetValueAsUint64() const override {
return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsFloatConstant()) << other->DebugName();
return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
}
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
- bool IsMinusOne() const OVERRIDE {
+ bool IsMinusOne() const override {
return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
}
- bool IsArithmeticZero() const OVERRIDE {
+ bool IsArithmeticZero() const override {
return std::fpclassify(value_) == FP_ZERO;
}
bool IsArithmeticPositiveZero() const {
@@ -2994,10 +3007,10 @@ class HFloatConstant FINAL : public HConstant {
bool IsArithmeticNegativeZero() const {
return IsArithmeticZero() && std::signbit(value_);
}
- bool IsZeroBitPattern() const OVERRIDE {
+ bool IsZeroBitPattern() const override {
return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(0.0f);
}
- bool IsOne() const OVERRIDE {
+ bool IsOne() const override {
return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
}
bool IsNaN() const {
@@ -3026,23 +3039,23 @@ class HFloatConstant FINAL : public HConstant {
friend class HGraph;
};
-class HDoubleConstant FINAL : public HConstant {
+class HDoubleConstant final : public HConstant {
public:
double GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
+ uint64_t GetValueAsUint64() const override { return bit_cast<uint64_t, double>(value_); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsDoubleConstant()) << other->DebugName();
return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
}
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+ size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
- bool IsMinusOne() const OVERRIDE {
+ bool IsMinusOne() const override {
return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
}
- bool IsArithmeticZero() const OVERRIDE {
+ bool IsArithmeticZero() const override {
return std::fpclassify(value_) == FP_ZERO;
}
bool IsArithmeticPositiveZero() const {
@@ -3051,10 +3064,10 @@ class HDoubleConstant FINAL : public HConstant {
bool IsArithmeticNegativeZero() const {
return IsArithmeticZero() && std::signbit(value_);
}
- bool IsZeroBitPattern() const OVERRIDE {
+ bool IsZeroBitPattern() const override {
return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((0.0));
}
- bool IsOne() const OVERRIDE {
+ bool IsOne() const override {
return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
}
bool IsNaN() const {
@@ -3085,15 +3098,15 @@ class HDoubleConstant FINAL : public HConstant {
// Conditional branch. A block ending with an HIf instruction must have
// two successors.
-class HIf FINAL : public HExpression<1> {
+class HIf final : public HExpression<1> {
public:
explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(kIf, SideEffects::None(), dex_pc) {
SetRawInputAt(0, input);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool IsControlFlow() const override { return true; }
HBasicBlock* IfTrueSuccessor() const {
return GetBlock()->GetSuccessors()[0];
@@ -3115,7 +3128,7 @@ class HIf FINAL : public HExpression<1> {
// non-exceptional control flow.
// Normal-flow successor is stored at index zero, exception handlers under
// higher indices in no particular order.
-class HTryBoundary FINAL : public HExpression<0> {
+class HTryBoundary final : public HExpression<0> {
public:
enum class BoundaryKind {
kEntry,
@@ -3128,7 +3141,7 @@ class HTryBoundary FINAL : public HExpression<0> {
SetPackedField<BoundaryKindField>(kind);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
// Returns the block's non-exceptional successor (index zero).
HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
@@ -3174,7 +3187,7 @@ class HTryBoundary FINAL : public HExpression<0> {
};
// Deoptimize to interpreter, upon checking a condition.
-class HDeoptimize FINAL : public HVariableInputSizeInstruction {
+class HDeoptimize final : public HVariableInputSizeInstruction {
public:
// Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
// across.
@@ -3194,7 +3207,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
SetRawInputAt(0, cond);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
// Use this constructor when the `HDeoptimize` guards an instruction, and any user
// that relies on the deoptimization to pass should have its input be the `HDeoptimize`
@@ -3220,15 +3233,15 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
SetRawInputAt(1, guard);
}
- bool CanBeMoved() const OVERRIDE { return GetPackedFlag<kFieldCanBeMoved>(); }
+ bool CanBeMoved() const override { return GetPackedFlag<kFieldCanBeMoved>(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return (other->CanBeMoved() == CanBeMoved()) && (other->AsDeoptimize()->GetKind() == GetKind());
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); }
@@ -3268,7 +3281,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
// if it's true, starts to do deoptimization.
// It has a 4-byte slot on stack.
// TODO: allocate a register for this flag.
-class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
+class HShouldDeoptimizeFlag final : public HVariableInputSizeInstruction {
public:
// CHA guards are only optimized in a separate pass and it has no side effects
// with regard to other passes.
@@ -3286,7 +3299,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
// further guard elimination/motion since a guard might have been used for justification
// of the elimination of another guard. Therefore, we pretend this guard cannot be moved
// to avoid other optimizations trying to move it.
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
@@ -3297,7 +3310,7 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
// Represents the ArtMethod that was passed as a first argument to
// the method. It is used by instructions that depend on it, like
// instructions that work with the dex cache.
-class HCurrentMethod FINAL : public HExpression<0> {
+class HCurrentMethod final : public HExpression<0> {
public:
explicit HCurrentMethod(DataType::Type type, uint32_t dex_pc = kNoDexPc)
: HExpression(kCurrentMethod, type, SideEffects::None(), dex_pc) {
@@ -3311,7 +3324,7 @@ class HCurrentMethod FINAL : public HExpression<0> {
// Fetches an ArtMethod from the virtual table or the interface method table
// of a class.
-class HClassTableGet FINAL : public HExpression<1> {
+class HClassTableGet final : public HExpression<1> {
public:
enum class TableKind {
kVTable,
@@ -3329,9 +3342,9 @@ class HClassTableGet FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other) const override {
return other->AsClassTableGet()->GetIndex() == index_ &&
other->AsClassTableGet()->GetPackedFields() == GetPackedFields();
}
@@ -3360,7 +3373,7 @@ class HClassTableGet FINAL : public HExpression<1> {
// PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
// have one successor for each entry in the switch table, and the final successor
// will be the block containing the next Dex opcode.
-class HPackedSwitch FINAL : public HExpression<1> {
+class HPackedSwitch final : public HExpression<1> {
public:
HPackedSwitch(int32_t start_value,
uint32_t num_entries,
@@ -3372,9 +3385,9 @@ class HPackedSwitch FINAL : public HExpression<1> {
SetRawInputAt(0, input);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
int32_t GetStartValue() const { return start_value_; }
@@ -3405,13 +3418,13 @@ class HUnaryOperation : public HExpression<1> {
}
// All of the UnaryOperation instructions are clonable.
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
HInstruction* GetInput() const { return InputAt(0); }
DataType::Type GetResultType() const { return GetType(); }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -3446,7 +3459,7 @@ class HBinaryOperation : public HExpression<2> {
}
// All of the BinaryOperation instructions are clonable.
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
HInstruction* GetLeft() const { return InputAt(0); }
HInstruction* GetRight() const { return InputAt(1); }
@@ -3486,8 +3499,8 @@ class HBinaryOperation : public HExpression<2> {
}
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -3568,7 +3581,7 @@ class HCondition : public HBinaryOperation {
ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return GetPackedFields() == other->AsCondition()->GetPackedFields();
}
@@ -3625,42 +3638,42 @@ class HCondition : public HBinaryOperation {
};
// Instruction to check if two inputs are equal to each other.
-class HEqual FINAL : public HCondition {
+class HEqual final : public HCondition {
public:
HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kEqual, first, second, dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
- HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HNullConstant* y ATTRIBUTE_UNUSED) const override {
return MakeConstantCondition(true, GetDexPc());
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HEqual instruction; evaluate it as
// `Compare(x, y) == 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0),
GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(Equal);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondEQ;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondNE;
}
@@ -3671,42 +3684,42 @@ class HEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x == y; }
};
-class HNotEqual FINAL : public HCondition {
+class HNotEqual final : public HCondition {
public:
HNotEqual(HInstruction* first, HInstruction* second,
uint32_t dex_pc = kNoDexPc)
: HCondition(kNotEqual, first, second, dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
- HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HNullConstant* y ATTRIBUTE_UNUSED) const override {
return MakeConstantCondition(false, GetDexPc());
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HNotEqual instruction; evaluate it as
// `Compare(x, y) != 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(NotEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondNE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondEQ;
}
@@ -3717,36 +3730,36 @@ class HNotEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x != y; }
};
-class HLessThan FINAL : public HCondition {
+class HLessThan final : public HCondition {
public:
HLessThan(HInstruction* first, HInstruction* second,
uint32_t dex_pc = kNoDexPc)
: HCondition(kLessThan, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HLessThan instruction; evaluate it as
// `Compare(x, y) < 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(LessThan);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondLT;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondGE;
}
@@ -3757,36 +3770,36 @@ class HLessThan FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x < y; }
};
-class HLessThanOrEqual FINAL : public HCondition {
+class HLessThanOrEqual final : public HCondition {
public:
HLessThanOrEqual(HInstruction* first, HInstruction* second,
uint32_t dex_pc = kNoDexPc)
: HCondition(kLessThanOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HLessThanOrEqual instruction; evaluate it as
// `Compare(x, y) <= 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(LessThanOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondLE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondGT;
}
@@ -3797,35 +3810,35 @@ class HLessThanOrEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x <= y; }
};
-class HGreaterThan FINAL : public HCondition {
+class HGreaterThan final : public HCondition {
public:
HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kGreaterThan, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HGreaterThan instruction; evaluate it as
// `Compare(x, y) > 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThan);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondGT;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondLE;
}
@@ -3836,35 +3849,35 @@ class HGreaterThan FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x > y; }
};
-class HGreaterThanOrEqual FINAL : public HCondition {
+class HGreaterThanOrEqual final : public HCondition {
public:
HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kGreaterThanOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
// In the following Evaluate methods, a HCompare instruction has
// been merged into this HGreaterThanOrEqual instruction; evaluate it as
// `Compare(x, y) >= 0`.
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThanOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondGE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondLT;
}
@@ -3875,36 +3888,36 @@ class HGreaterThanOrEqual FINAL : public HCondition {
template <typename T> static bool Compute(T x, T y) { return x >= y; }
};
-class HBelow FINAL : public HCondition {
+class HBelow final : public HCondition {
public:
HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kBelow, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(Below);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondB;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondAE;
}
@@ -3917,36 +3930,36 @@ class HBelow FINAL : public HCondition {
}
};
-class HBelowOrEqual FINAL : public HCondition {
+class HBelowOrEqual final : public HCondition {
public:
HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kBelowOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(BelowOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondBE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondA;
}
@@ -3959,36 +3972,36 @@ class HBelowOrEqual FINAL : public HCondition {
}
};
-class HAbove FINAL : public HCondition {
+class HAbove final : public HCondition {
public:
HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kAbove, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(Above);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondA;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondBE;
}
@@ -4001,36 +4014,36 @@ class HAbove FINAL : public HCondition {
}
};
-class HAboveOrEqual FINAL : public HCondition {
+class HAboveOrEqual final : public HCondition {
public:
HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(kAboveOrEqual, first, second, dex_pc) {
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
DECLARE_INSTRUCTION(AboveOrEqual);
- IfCondition GetCondition() const OVERRIDE {
+ IfCondition GetCondition() const override {
return kCondAE;
}
- IfCondition GetOppositeCondition() const OVERRIDE {
+ IfCondition GetOppositeCondition() const override {
return kCondB;
}
@@ -4045,7 +4058,7 @@ class HAboveOrEqual FINAL : public HCondition {
// Instruction to check how two inputs compare to each other.
// Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
-class HCompare FINAL : public HBinaryOperation {
+class HCompare final : public HBinaryOperation {
public:
// Note that `comparison_type` is the type of comparison performed
// between the comparison's inputs, not the type of the instantiated
@@ -4077,7 +4090,7 @@ class HCompare FINAL : public HBinaryOperation {
return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y);
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
// Note that there is no "cmp-int" Dex instruction so we shouldn't
// reach this code path when processing a freshly built HIR
// graph. However HCompare integer instructions can be synthesized
@@ -4085,17 +4098,17 @@ class HCompare FINAL : public HBinaryOperation {
// IntegerSignum intrinsics, so we have to handle this case.
return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return GetPackedFields() == other->AsCompare()->GetPackedFields();
}
@@ -4134,7 +4147,7 @@ class HCompare FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Compare);
};
-class HNewInstance FINAL : public HExpression<1> {
+class HNewInstance final : public HExpression<1> {
public:
HNewInstance(HInstruction* cls,
uint32_t dex_pc,
@@ -4153,16 +4166,16 @@ class HNewInstance FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
// Calls runtime so needs an environment.
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
// Can throw errors when out-of-memory or if it's not instantiable/accessible.
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
bool NeedsChecks() const {
return entrypoint_ == kQuickAllocObjectWithChecks;
@@ -4170,7 +4183,7 @@ class HNewInstance FINAL : public HExpression<1> {
bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
@@ -4224,7 +4237,7 @@ enum IntrinsicExceptions {
class HInvoke : public HVariableInputSizeInstruction {
public:
- bool NeedsEnvironment() const OVERRIDE;
+ bool NeedsEnvironment() const override;
void SetArgumentAt(size_t index, HInstruction* argument) {
SetRawInputAt(index, argument);
@@ -4257,15 +4270,15 @@ class HInvoke : public HVariableInputSizeInstruction {
void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
- bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
+ bool CanThrow() const override { return GetPackedFlag<kFlagCanThrow>(); }
void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); }
- bool AlwaysThrows() const OVERRIDE { return GetPackedFlag<kFlagAlwaysThrows>(); }
+ bool AlwaysThrows() const override { return GetPackedFlag<kFlagAlwaysThrows>(); }
- bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); }
+ bool CanBeMoved() const override { return IsIntrinsic() && !DoesAnyWrite(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_;
}
@@ -4331,7 +4344,7 @@ class HInvoke : public HVariableInputSizeInstruction {
uint32_t intrinsic_optimizations_;
};
-class HInvokeUnresolved FINAL : public HInvoke {
+class HInvokeUnresolved final : public HInvoke {
public:
HInvokeUnresolved(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4350,7 +4363,7 @@ class HInvokeUnresolved FINAL : public HInvoke {
invoke_type) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
DECLARE_INSTRUCTION(InvokeUnresolved);
@@ -4358,7 +4371,7 @@ class HInvokeUnresolved FINAL : public HInvoke {
DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved);
};
-class HInvokePolymorphic FINAL : public HInvoke {
+class HInvokePolymorphic final : public HInvoke {
public:
HInvokePolymorphic(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4376,7 +4389,7 @@ class HInvokePolymorphic FINAL : public HInvoke {
kVirtual) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
DECLARE_INSTRUCTION(InvokePolymorphic);
@@ -4384,7 +4397,7 @@ class HInvokePolymorphic FINAL : public HInvoke {
DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
};
-class HInvokeCustom FINAL : public HInvoke {
+class HInvokeCustom final : public HInvoke {
public:
HInvokeCustom(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4405,7 +4418,7 @@ class HInvokeCustom FINAL : public HInvoke {
uint32_t GetCallSiteIndex() const { return call_site_index_; }
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
DECLARE_INSTRUCTION(InvokeCustom);
@@ -4416,7 +4429,7 @@ class HInvokeCustom FINAL : public HInvoke {
uint32_t call_site_index_;
};
-class HInvokeStaticOrDirect FINAL : public HInvoke {
+class HInvokeStaticOrDirect final : public HInvoke {
public:
// Requirements of this method call regarding the class
// initialization (clinit) check of its declaring class.
@@ -4505,7 +4518,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
void SetDispatchInfo(const DispatchInfo& dispatch_info) {
bool had_current_method_input = HasCurrentMethodInput();
@@ -4535,7 +4548,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
ArrayRef<HUserRecord<HInstruction*>> input_records = HInvoke::GetInputRecords();
if (kIsDebugBuild && IsStaticWithExplicitClinitCheck()) {
DCHECK(!input_records.empty());
@@ -4553,13 +4566,13 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
return input_records;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
}
- bool CanBeNull() const OVERRIDE {
+ bool CanBeNull() const override {
return GetType() == DataType::Type::kReference && !IsStringInit();
}
@@ -4574,7 +4587,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
+ bool NeedsDexCacheOfDeclaringClass() const override;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
bool HasPcRelativeMethodLoadKind() const {
@@ -4675,7 +4688,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
-class HInvokeVirtual FINAL : public HInvoke {
+class HInvokeVirtual final : public HInvoke {
public:
HInvokeVirtual(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4696,9 +4709,9 @@ class HInvokeVirtual FINAL : public HInvoke {
vtable_index_(vtable_index) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool CanBeNull() const OVERRIDE {
+ bool CanBeNull() const override {
switch (GetIntrinsic()) {
case Intrinsics::kThreadCurrentThread:
case Intrinsics::kStringBufferAppend:
@@ -4711,9 +4724,9 @@ class HInvokeVirtual FINAL : public HInvoke {
}
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
// TODO: Add implicit null checks in intrinsics.
- return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
+ return (obj == InputAt(0)) && !IsIntrinsic();
}
uint32_t GetVTableIndex() const { return vtable_index_; }
@@ -4728,7 +4741,7 @@ class HInvokeVirtual FINAL : public HInvoke {
const uint32_t vtable_index_;
};
-class HInvokeInterface FINAL : public HInvoke {
+class HInvokeInterface final : public HInvoke {
public:
HInvokeInterface(ArenaAllocator* allocator,
uint32_t number_of_arguments,
@@ -4749,14 +4762,14 @@ class HInvokeInterface FINAL : public HInvoke {
imt_index_(imt_index) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
// TODO: Add implicit null checks in intrinsics.
- return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
+ return (obj == InputAt(0)) && !IsIntrinsic();
}
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ bool NeedsDexCacheOfDeclaringClass() const override {
// The assembly stub currently needs it.
return true;
}
@@ -4773,7 +4786,7 @@ class HInvokeInterface FINAL : public HInvoke {
const uint32_t imt_index_;
};
-class HNeg FINAL : public HUnaryOperation {
+class HNeg final : public HUnaryOperation {
public:
HNeg(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kNeg, result_type, input, dex_pc) {
@@ -4782,16 +4795,16 @@ class HNeg FINAL : public HUnaryOperation {
template <typename T> static T Compute(T x) { return -x; }
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x) const override {
return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x) const override {
return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue()), GetDexPc());
}
@@ -4801,7 +4814,7 @@ class HNeg FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Neg);
};
-class HNewArray FINAL : public HExpression<2> {
+class HNewArray final : public HExpression<2> {
public:
HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
: HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
@@ -4809,15 +4822,15 @@ class HNewArray FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
// Calls runtime so needs an environment.
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
// May throw NegativeArraySizeException, OutOfMemoryError, etc.
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
HLoadClass* GetLoadClass() const {
DCHECK(InputAt(0)->IsLoadClass());
@@ -4834,7 +4847,7 @@ class HNewArray FINAL : public HExpression<2> {
DEFAULT_COPY_CONSTRUCTOR(NewArray);
};
-class HAdd FINAL : public HBinaryOperation {
+class HAdd final : public HBinaryOperation {
public:
HAdd(DataType::Type result_type,
HInstruction* left,
@@ -4843,23 +4856,23 @@ class HAdd FINAL : public HBinaryOperation {
: HBinaryOperation(kAdd, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x + y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -4870,7 +4883,7 @@ class HAdd FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Add);
};
-class HSub FINAL : public HBinaryOperation {
+class HSub final : public HBinaryOperation {
public:
HSub(DataType::Type result_type,
HInstruction* left,
@@ -4881,19 +4894,19 @@ class HSub FINAL : public HBinaryOperation {
template <typename T> static T Compute(T x, T y) { return x - y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -4904,7 +4917,7 @@ class HSub FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Sub);
};
-class HMul FINAL : public HBinaryOperation {
+class HMul final : public HBinaryOperation {
public:
HMul(DataType::Type result_type,
HInstruction* left,
@@ -4913,23 +4926,23 @@ class HMul FINAL : public HBinaryOperation {
: HBinaryOperation(kMul, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x * y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -4940,7 +4953,7 @@ class HMul FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Mul);
};
-class HDiv FINAL : public HBinaryOperation {
+class HDiv final : public HBinaryOperation {
public:
HDiv(DataType::Type result_type,
HInstruction* left,
@@ -4965,19 +4978,19 @@ class HDiv FINAL : public HBinaryOperation {
return x / y;
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -4988,7 +5001,7 @@ class HDiv FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Div);
};
-class HRem FINAL : public HBinaryOperation {
+class HRem final : public HBinaryOperation {
public:
HRem(DataType::Type result_type,
HInstruction* left,
@@ -5013,19 +5026,19 @@ class HRem FINAL : public HBinaryOperation {
return std::fmod(x, y);
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
@@ -5036,7 +5049,7 @@ class HRem FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Rem);
};
-class HMin FINAL : public HBinaryOperation {
+class HMin final : public HBinaryOperation {
public:
HMin(DataType::Type result_type,
HInstruction* left,
@@ -5044,26 +5057,26 @@ class HMin FINAL : public HBinaryOperation {
uint32_t dex_pc)
: HBinaryOperation(kMin, result_type, left, right, SideEffects::None(), dex_pc) {}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
// Evaluation for integral values.
template <typename T> static T ComputeIntegral(T x, T y) {
return (x <= y) ? x : y;
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
// TODO: Evaluation for floating-point values.
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
DECLARE_INSTRUCTION(Min);
@@ -5071,7 +5084,7 @@ class HMin FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Min);
};
-class HMax FINAL : public HBinaryOperation {
+class HMax final : public HBinaryOperation {
public:
HMax(DataType::Type result_type,
HInstruction* left,
@@ -5079,26 +5092,26 @@ class HMax FINAL : public HBinaryOperation {
uint32_t dex_pc)
: HBinaryOperation(kMax, result_type, left, right, SideEffects::None(), dex_pc) {}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
// Evaluation for integral values.
template <typename T> static T ComputeIntegral(T x, T y) {
return (x >= y) ? x : y;
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
// TODO: Evaluation for floating-point values.
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
DECLARE_INSTRUCTION(Max);
@@ -5106,7 +5119,7 @@ class HMax FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Max);
};
-class HAbs FINAL : public HUnaryOperation {
+class HAbs final : public HUnaryOperation {
public:
HAbs(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kAbs, result_type, input, dex_pc) {}
@@ -5126,17 +5139,17 @@ class HAbs FINAL : public HUnaryOperation {
return bit_cast<T, S>(bits & std::numeric_limits<S>::max());
}
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x) const override {
return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x) const override {
return GetBlock()->GetGraph()->GetFloatConstant(
ComputeFP<float, int32_t>(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x) const override {
return GetBlock()->GetGraph()->GetDoubleConstant(
ComputeFP<double, int64_t>(x->GetValue()), GetDexPc());
}
@@ -5147,7 +5160,7 @@ class HAbs FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Abs);
};
-class HDivZeroCheck FINAL : public HExpression<1> {
+class HDivZeroCheck final : public HExpression<1> {
public:
// `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
// constructor.
@@ -5156,15 +5169,15 @@ class HDivZeroCheck FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DECLARE_INSTRUCTION(DivZeroCheck);
@@ -5172,7 +5185,7 @@ class HDivZeroCheck FINAL : public HExpression<1> {
DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck);
};
-class HShl FINAL : public HBinaryOperation {
+class HShl final : public HBinaryOperation {
public:
HShl(DataType::Type result_type,
HInstruction* value,
@@ -5188,26 +5201,26 @@ class HShl FINAL : public HBinaryOperation {
return value << (distance & max_shift_distance);
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5218,7 +5231,7 @@ class HShl FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Shl);
};
-class HShr FINAL : public HBinaryOperation {
+class HShr final : public HBinaryOperation {
public:
HShr(DataType::Type result_type,
HInstruction* value,
@@ -5234,26 +5247,26 @@ class HShr FINAL : public HBinaryOperation {
return value >> (distance & max_shift_distance);
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5264,7 +5277,7 @@ class HShr FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Shr);
};
-class HUShr FINAL : public HBinaryOperation {
+class HUShr final : public HBinaryOperation {
public:
HUShr(DataType::Type result_type,
HInstruction* value,
@@ -5282,26 +5295,26 @@ class HUShr FINAL : public HBinaryOperation {
return static_cast<T>(ux >> (distance & max_shift_distance));
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5312,7 +5325,7 @@ class HUShr FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(UShr);
};
-class HAnd FINAL : public HBinaryOperation {
+class HAnd final : public HBinaryOperation {
public:
HAnd(DataType::Type result_type,
HInstruction* left,
@@ -5321,25 +5334,25 @@ class HAnd FINAL : public HBinaryOperation {
: HBinaryOperation(kAnd, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x & y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5350,7 +5363,7 @@ class HAnd FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(And);
};
-class HOr FINAL : public HBinaryOperation {
+class HOr final : public HBinaryOperation {
public:
HOr(DataType::Type result_type,
HInstruction* left,
@@ -5359,25 +5372,25 @@ class HOr FINAL : public HBinaryOperation {
: HBinaryOperation(kOr, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x | y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5388,7 +5401,7 @@ class HOr FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Or);
};
-class HXor FINAL : public HBinaryOperation {
+class HXor final : public HBinaryOperation {
public:
HXor(DataType::Type result_type,
HInstruction* left,
@@ -5397,25 +5410,25 @@ class HXor FINAL : public HBinaryOperation {
: HBinaryOperation(kXor, result_type, left, right, SideEffects::None(), dex_pc) {
}
- bool IsCommutative() const OVERRIDE { return true; }
+ bool IsCommutative() const override { return true; }
template <typename T> static T Compute(T x, T y) { return x ^ y; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5426,7 +5439,7 @@ class HXor FINAL : public HBinaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Xor);
};
-class HRor FINAL : public HBinaryOperation {
+class HRor final : public HBinaryOperation {
public:
HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance)
: HBinaryOperation(kRor, result_type, value, distance) {
@@ -5447,26 +5460,26 @@ class HRor FINAL : public HBinaryOperation {
}
}
- HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
}
HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
- HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HLongConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
UNREACHABLE();
}
HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
- HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
- HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5479,7 +5492,7 @@ class HRor FINAL : public HBinaryOperation {
// The value of a parameter in this method. Its location depends on
// the calling convention.
-class HParameterValue FINAL : public HExpression<0> {
+class HParameterValue final : public HExpression<0> {
public:
HParameterValue(const DexFile& dex_file,
dex::TypeIndex type_index,
@@ -5499,7 +5512,7 @@ class HParameterValue FINAL : public HExpression<0> {
uint8_t GetIndex() const { return index_; }
bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); }
- bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+ bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
DECLARE_INSTRUCTION(ParameterValue);
@@ -5522,30 +5535,30 @@ class HParameterValue FINAL : public HExpression<0> {
const uint8_t index_;
};
-class HNot FINAL : public HUnaryOperation {
+class HNot final : public HUnaryOperation {
public:
HNot(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kNot, result_type, input, dex_pc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
template <typename T> static T Compute(T x) { return ~x; }
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x) const override {
return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
- HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5556,14 +5569,14 @@ class HNot FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(Not);
};
-class HBooleanNot FINAL : public HUnaryOperation {
+class HBooleanNot final : public HUnaryOperation {
public:
explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HUnaryOperation(kBooleanNot, DataType::Type::kBool, input, dex_pc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -5572,18 +5585,18 @@ class HBooleanNot FINAL : public HUnaryOperation {
return !x;
}
- HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x) const override {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for long values";
UNREACHABLE();
}
- HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
- HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -5594,7 +5607,7 @@ class HBooleanNot FINAL : public HUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(BooleanNot);
};
-class HTypeConversion FINAL : public HExpression<1> {
+class HTypeConversion final : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
@@ -5608,9 +5621,9 @@ class HTypeConversion FINAL : public HExpression<1> {
DataType::Type GetInputType() const { return GetInput()->GetType(); }
DataType::Type GetResultType() const { return GetType(); }
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
@@ -5626,7 +5639,7 @@ class HTypeConversion FINAL : public HExpression<1> {
static constexpr uint32_t kNoRegNumber = -1;
-class HNullCheck FINAL : public HExpression<1> {
+class HNullCheck final : public HExpression<1> {
public:
// `HNullCheck` can trigger GC, as it may call the `NullPointerException`
// constructor.
@@ -5635,17 +5648,17 @@ class HNullCheck FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
DECLARE_INSTRUCTION(NullCheck);
@@ -5690,7 +5703,7 @@ class FieldInfo : public ValueObject {
const DexFile& dex_file_;
};
-class HInstanceFieldGet FINAL : public HExpression<1> {
+class HInstanceFieldGet final : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
ArtField* field,
@@ -5715,19 +5728,19 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return !IsVolatile(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
const HInstanceFieldGet* other_get = other->AsInstanceFieldGet();
return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
- size_t ComputeHashCode() const OVERRIDE {
+ size_t ComputeHashCode() const override {
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
@@ -5752,7 +5765,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
const FieldInfo field_info_;
};
-class HInstanceFieldSet FINAL : public HExpression<2> {
+class HInstanceFieldSet final : public HExpression<2> {
public:
HInstanceFieldSet(HInstruction* object,
HInstruction* value,
@@ -5779,9 +5792,9 @@ class HInstanceFieldSet FINAL : public HExpression<2> {
SetRawInputAt(1, value);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
@@ -5807,7 +5820,7 @@ class HInstanceFieldSet FINAL : public HExpression<2> {
const FieldInfo field_info_;
};
-class HArrayGet FINAL : public HExpression<2> {
+class HArrayGet final : public HExpression<2> {
public:
HArrayGet(HInstruction* array,
HInstruction* index,
@@ -5833,12 +5846,12 @@ class HArrayGet FINAL : public HExpression<2> {
SetRawInputAt(1, index);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
// TODO: We can be smarter here.
// Currently, unless the array is the result of NewArray, the array access is always
// preceded by some form of null NullCheck necessary for the bounds check, usually
@@ -5898,7 +5911,7 @@ class HArrayGet FINAL : public HExpression<2> {
"Too many packed fields.");
};
-class HArraySet FINAL : public HExpression<3> {
+class HArraySet final : public HExpression<3> {
public:
HArraySet(HInstruction* array,
HInstruction* index,
@@ -5930,17 +5943,17 @@ class HArraySet FINAL : public HExpression<3> {
SetRawInputAt(2, value);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
// We call a runtime method to throw ArrayStoreException.
return NeedsTypeCheck();
}
// Can throw ArrayStoreException.
- bool CanThrow() const OVERRIDE { return NeedsTypeCheck(); }
+ bool CanThrow() const override { return NeedsTypeCheck(); }
- bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
// TODO: Same as for ArrayGet.
return false;
}
@@ -6017,7 +6030,7 @@ class HArraySet FINAL : public HExpression<3> {
BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
};
-class HArrayLength FINAL : public HExpression<1> {
+class HArrayLength final : public HExpression<1> {
public:
HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false)
: HExpression(kArrayLength, DataType::Type::kInt32, SideEffects::None(), dex_pc) {
@@ -6027,12 +6040,12 @@ class HArrayLength FINAL : public HExpression<1> {
SetRawInputAt(0, array);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
return obj == InputAt(0);
}
@@ -6055,7 +6068,7 @@ class HArrayLength FINAL : public HExpression<1> {
"Too many packed fields.");
};
-class HBoundsCheck FINAL : public HExpression<2> {
+class HBoundsCheck final : public HExpression<2> {
public:
// `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
// constructor.
@@ -6070,15 +6083,15 @@ class HBoundsCheck FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
@@ -6093,16 +6106,16 @@ class HBoundsCheck FINAL : public HExpression<2> {
static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
};
-class HSuspendCheck FINAL : public HExpression<0> {
+class HSuspendCheck final : public HExpression<0> {
public:
explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc)
: HExpression(kSuspendCheck, SideEffects::CanTriggerGC(), dex_pc),
slow_path_(nullptr) {
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return true;
}
@@ -6128,7 +6141,7 @@ class HNativeDebugInfo : public HExpression<0> {
: HExpression<0>(kNativeDebugInfo, SideEffects::None(), dex_pc) {
}
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return true;
}
@@ -6141,7 +6154,7 @@ class HNativeDebugInfo : public HExpression<0> {
/**
* Instruction to load a Class object.
*/
-class HLoadClass FINAL : public HInstruction {
+class HLoadClass final : public HInstruction {
public:
// Determines how to load the Class.
enum class LoadKind {
@@ -6204,7 +6217,7 @@ class HLoadClass FINAL : public HInstruction {
SetPackedFlag<kFlagValidLoadedClassRTI>(false);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
void SetLoadKind(LoadKind load_kind);
@@ -6218,15 +6231,15 @@ class HLoadClass FINAL : public HInstruction {
GetLoadKind() == LoadKind::kBssEntry;
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
bool InstructionDataEquals(const HInstruction* other) const;
- size_t ComputeHashCode() const OVERRIDE { return type_index_.index_; }
+ size_t ComputeHashCode() const override { return type_index_.index_; }
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return CanCallRuntime();
}
@@ -6244,7 +6257,7 @@ class HLoadClass FINAL : public HInstruction {
GetLoadKind() == LoadKind::kBssEntry;
}
- bool CanThrow() const OVERRIDE {
+ bool CanThrow() const override {
return NeedsAccessCheck() ||
MustGenerateClinitCheck() ||
// If the class is in the boot image, the lookup in the runtime call cannot throw.
@@ -6271,7 +6284,7 @@ class HLoadClass FINAL : public HInstruction {
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ bool NeedsDexCacheOfDeclaringClass() const override {
return GetLoadKind() == LoadKind::kRuntimeCall;
}
@@ -6298,7 +6311,7 @@ class HLoadClass FINAL : public HInstruction {
void AddSpecialInput(HInstruction* special_input);
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
@@ -6379,7 +6392,7 @@ inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
special_input->AddUseAt(this, 0);
}
-class HLoadString FINAL : public HInstruction {
+class HLoadString final : public HInstruction {
public:
// Determines how to load the String.
enum class LoadKind {
@@ -6423,7 +6436,7 @@ class HLoadString FINAL : public HInstruction {
SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
void SetLoadKind(LoadKind load_kind);
@@ -6453,15 +6466,15 @@ class HLoadString FINAL : public HInstruction {
string_ = str;
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
+ bool InstructionDataEquals(const HInstruction* other) const override;
- size_t ComputeHashCode() const OVERRIDE { return string_index_.index_; }
+ size_t ComputeHashCode() const override { return string_index_.index_; }
// Will call the runtime if we need to load the string through
// the dex cache and the string is not guaranteed to be there yet.
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
LoadKind load_kind = GetLoadKind();
if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
load_kind == LoadKind::kBootImageRelRo ||
@@ -6472,12 +6485,12 @@ class HLoadString FINAL : public HInstruction {
return true;
}
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ bool NeedsDexCacheOfDeclaringClass() const override {
return GetLoadKind() == LoadKind::kRuntimeCall;
}
- bool CanBeNull() const OVERRIDE { return false; }
- bool CanThrow() const OVERRIDE { return NeedsEnvironment(); }
+ bool CanBeNull() const override { return false; }
+ bool CanThrow() const override { return NeedsEnvironment(); }
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
@@ -6486,7 +6499,7 @@ class HLoadString FINAL : public HInstruction {
void AddSpecialInput(HInstruction* special_input);
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
@@ -6548,7 +6561,7 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
special_input->AddUseAt(this, 0);
}
-class HLoadMethodHandle FINAL : public HInstruction {
+class HLoadMethodHandle final : public HInstruction {
public:
HLoadMethodHandle(HCurrentMethod* current_method,
uint16_t method_handle_idx,
@@ -6564,12 +6577,12 @@ class HLoadMethodHandle FINAL : public HInstruction {
}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
uint16_t GetMethodHandleIndex() const { return method_handle_idx_; }
@@ -6592,7 +6605,7 @@ class HLoadMethodHandle FINAL : public HInstruction {
const DexFile& dex_file_;
};
-class HLoadMethodType FINAL : public HInstruction {
+class HLoadMethodType final : public HInstruction {
public:
HLoadMethodType(HCurrentMethod* current_method,
dex::ProtoIndex proto_index,
@@ -6608,12 +6621,12 @@ class HLoadMethodType FINAL : public HInstruction {
}
using HInstruction::GetInputRecords; // Keep the const version visible.
- ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+ ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
return ArrayRef<HUserRecord<HInstruction*>>(
&special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
dex::ProtoIndex GetProtoIndex() const { return proto_index_; }
@@ -6639,7 +6652,7 @@ class HLoadMethodType FINAL : public HInstruction {
/**
* Performs an initialization check on its Class object input.
*/
-class HClinitCheck FINAL : public HExpression<1> {
+class HClinitCheck final : public HExpression<1> {
public:
HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
: HExpression(
@@ -6650,17 +6663,17 @@ class HClinitCheck FINAL : public HExpression<1> {
SetRawInputAt(0, constant);
}
// TODO: Make ClinitCheck clonable.
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
// May call runtime to initialize the class.
return true;
}
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
HLoadClass* GetLoadClass() const {
DCHECK(InputAt(0)->IsLoadClass());
@@ -6674,7 +6687,7 @@ class HClinitCheck FINAL : public HExpression<1> {
DEFAULT_COPY_CONSTRUCTOR(ClinitCheck);
};
-class HStaticFieldGet FINAL : public HExpression<1> {
+class HStaticFieldGet final : public HExpression<1> {
public:
HStaticFieldGet(HInstruction* cls,
ArtField* field,
@@ -6700,15 +6713,15 @@ class HStaticFieldGet FINAL : public HExpression<1> {
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return !IsVolatile(); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
const HStaticFieldGet* other_get = other->AsStaticFieldGet();
return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
}
- size_t ComputeHashCode() const OVERRIDE {
+ size_t ComputeHashCode() const override {
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
@@ -6733,7 +6746,7 @@ class HStaticFieldGet FINAL : public HExpression<1> {
const FieldInfo field_info_;
};
-class HStaticFieldSet FINAL : public HExpression<2> {
+class HStaticFieldSet final : public HExpression<2> {
public:
HStaticFieldSet(HInstruction* cls,
HInstruction* value,
@@ -6760,7 +6773,7 @@ class HStaticFieldSet FINAL : public HExpression<2> {
SetRawInputAt(1, value);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
const FieldInfo& GetFieldInfo() const { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
@@ -6784,7 +6797,7 @@ class HStaticFieldSet FINAL : public HExpression<2> {
const FieldInfo field_info_;
};
-class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
+class HUnresolvedInstanceFieldGet final : public HExpression<1> {
public:
HUnresolvedInstanceFieldGet(HInstruction* obj,
DataType::Type field_type,
@@ -6798,9 +6811,9 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, obj);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetType(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6814,7 +6827,7 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
const uint32_t field_index_;
};
-class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
+class HUnresolvedInstanceFieldSet final : public HExpression<2> {
public:
HUnresolvedInstanceFieldSet(HInstruction* obj,
HInstruction* value,
@@ -6829,9 +6842,9 @@ class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
SetRawInputAt(1, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6854,7 +6867,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
const uint32_t field_index_;
};
-class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
+class HUnresolvedStaticFieldGet final : public HExpression<0> {
public:
HUnresolvedStaticFieldGet(DataType::Type field_type,
uint32_t field_index,
@@ -6866,9 +6879,9 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
field_index_(field_index) {
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetType(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6882,7 +6895,7 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
const uint32_t field_index_;
};
-class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
+class HUnresolvedStaticFieldSet final : public HExpression<1> {
public:
HUnresolvedStaticFieldSet(HInstruction* value,
DataType::Type field_type,
@@ -6895,9 +6908,9 @@ class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override { return true; }
+ bool CanThrow() const override { return true; }
DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
uint32_t GetFieldIndex() const { return field_index_; }
@@ -6921,13 +6934,13 @@ class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
};
// Implement the move-exception DEX instruction.
-class HLoadException FINAL : public HExpression<0> {
+class HLoadException final : public HExpression<0> {
public:
explicit HLoadException(uint32_t dex_pc = kNoDexPc)
: HExpression(kLoadException, DataType::Type::kReference, SideEffects::None(), dex_pc) {
}
- bool CanBeNull() const OVERRIDE { return false; }
+ bool CanBeNull() const override { return false; }
DECLARE_INSTRUCTION(LoadException);
@@ -6937,7 +6950,7 @@ class HLoadException FINAL : public HExpression<0> {
// Implicit part of move-exception which clears thread-local exception storage.
// Must not be removed because the runtime expects the TLS to get cleared.
-class HClearException FINAL : public HExpression<0> {
+class HClearException final : public HExpression<0> {
public:
explicit HClearException(uint32_t dex_pc = kNoDexPc)
: HExpression(kClearException, SideEffects::AllWrites(), dex_pc) {
@@ -6949,20 +6962,20 @@ class HClearException FINAL : public HExpression<0> {
DEFAULT_COPY_CONSTRUCTOR(ClearException);
};
-class HThrow FINAL : public HExpression<1> {
+class HThrow final : public HExpression<1> {
public:
HThrow(HInstruction* exception, uint32_t dex_pc)
: HExpression(kThrow, SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, exception);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
- bool AlwaysThrows() const OVERRIDE { return true; }
+ bool AlwaysThrows() const override { return true; }
DECLARE_INSTRUCTION(Throw);
@@ -7049,10 +7062,10 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
return static_cast<uint32_t>(mask->AsIntConstant()->GetValue());
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsInstanceOf() || other->IsCheckCast()) << other->DebugName();
return GetPackedFields() == down_cast<const HTypeCheckInstruction*>(other)->GetPackedFields();
}
@@ -7097,7 +7110,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
Handle<mirror::Class> klass_;
};
-class HInstanceOf FINAL : public HTypeCheckInstruction {
+class HInstanceOf final : public HTypeCheckInstruction {
public:
HInstanceOf(HInstruction* object,
HInstruction* target_class_or_null,
@@ -7119,9 +7132,9 @@ class HInstanceOf FINAL : public HTypeCheckInstruction {
bitstring_mask,
SideEffectsForArchRuntimeCalls(check_kind)) {}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool NeedsEnvironment() const override {
return CanCallRuntime(GetTypeCheckKind());
}
@@ -7140,7 +7153,7 @@ class HInstanceOf FINAL : public HTypeCheckInstruction {
DEFAULT_COPY_CONSTRUCTOR(InstanceOf);
};
-class HBoundType FINAL : public HExpression<1> {
+class HBoundType final : public HExpression<1> {
public:
explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(kBoundType, DataType::Type::kReference, SideEffects::None(), dex_pc),
@@ -7151,8 +7164,8 @@ class HBoundType FINAL : public HExpression<1> {
SetRawInputAt(0, input);
}
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
- bool IsClonable() const OVERRIDE { return true; }
+ bool InstructionDataEquals(const HInstruction* other) const override;
+ bool IsClonable() const override { return true; }
// {Get,Set}Upper* should only be used in reference type propagation.
const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
@@ -7164,7 +7177,7 @@ class HBoundType FINAL : public HExpression<1> {
SetPackedFlag<kFlagCanBeNull>(can_be_null);
}
- bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+ bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
DECLARE_INSTRUCTION(BoundType);
@@ -7188,7 +7201,7 @@ class HBoundType FINAL : public HExpression<1> {
ReferenceTypeInfo upper_bound_;
};
-class HCheckCast FINAL : public HTypeCheckInstruction {
+class HCheckCast final : public HTypeCheckInstruction {
public:
HCheckCast(HInstruction* object,
HInstruction* target_class_or_null,
@@ -7210,13 +7223,13 @@ class HCheckCast FINAL : public HTypeCheckInstruction {
bitstring_mask,
SideEffects::CanTriggerGC()) {}
- bool IsClonable() const OVERRIDE { return true; }
- bool NeedsEnvironment() const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool NeedsEnvironment() const override {
// Instruction may throw a CheckCastError.
return true;
}
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const override { return true; }
DECLARE_INSTRUCTION(CheckCast);
@@ -7250,7 +7263,7 @@ enum MemBarrierKind {
};
std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
-class HMemoryBarrier FINAL : public HExpression<0> {
+class HMemoryBarrier final : public HExpression<0> {
public:
explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
: HExpression(kMemoryBarrier,
@@ -7259,7 +7272,7 @@ class HMemoryBarrier FINAL : public HExpression<0> {
SetPackedField<BarrierKindField>(barrier_kind);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
@@ -7335,7 +7348,7 @@ class HMemoryBarrier FINAL : public HExpression<0> {
// * CompilerDriver::RequiresConstructorBarrier
// * QuasiAtomic::ThreadFenceForConstructor
//
-class HConstructorFence FINAL : public HVariableInputSizeInstruction {
+class HConstructorFence final : public HVariableInputSizeInstruction {
// A fence has variable inputs because the inputs can be removed
// after prepare_for_register_allocation phase.
// (TODO: In the future a fence could freeze multiple objects
@@ -7432,7 +7445,7 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction {
DEFAULT_COPY_CONSTRUCTOR(ConstructorFence);
};
-class HMonitorOperation FINAL : public HExpression<1> {
+class HMonitorOperation final : public HExpression<1> {
public:
enum class OperationKind {
kEnter,
@@ -7449,9 +7462,9 @@ class HMonitorOperation FINAL : public HExpression<1> {
}
// Instruction may go into runtime, so we need an environment.
- bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const override { return true; }
- bool CanThrow() const OVERRIDE {
+ bool CanThrow() const override {
// Verifier guarantees that monitor-exit cannot throw.
// This is important because it allows the HGraphBuilder to remove
// a dead throw-catch loop generated for `synchronized` blocks/methods.
@@ -7477,7 +7490,7 @@ class HMonitorOperation FINAL : public HExpression<1> {
using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
};
-class HSelect FINAL : public HExpression<3> {
+class HSelect final : public HExpression<3> {
public:
HSelect(HInstruction* condition,
HInstruction* true_value,
@@ -7495,17 +7508,17 @@ class HSelect FINAL : public HExpression<3> {
SetRawInputAt(2, condition);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
HInstruction* GetFalseValue() const { return InputAt(0); }
HInstruction* GetTrueValue() const { return InputAt(1); }
HInstruction* GetCondition() const { return InputAt(2); }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool CanBeNull() const OVERRIDE {
+ bool CanBeNull() const override {
return GetTrueValue()->CanBeNull() || GetFalseValue()->CanBeNull();
}
@@ -7593,7 +7606,7 @@ std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs);
static constexpr size_t kDefaultNumberOfMoves = 4;
-class HParallelMove FINAL : public HExpression<0> {
+class HParallelMove final : public HExpression<0> {
public:
explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
: HExpression(kParallelMove, SideEffects::None(), dex_pc),
@@ -7655,7 +7668,7 @@ class HParallelMove FINAL : public HExpression<0> {
// never used across anything that can trigger GC.
// The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
// So we represent it by the type `DataType::Type::kInt`.
-class HIntermediateAddress FINAL : public HExpression<2> {
+class HIntermediateAddress final : public HExpression<2> {
public:
HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
: HExpression(kIntermediateAddress,
@@ -7669,12 +7682,12 @@ class HIntermediateAddress FINAL : public HExpression<2> {
SetRawInputAt(1, offset);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool IsActualObject() const OVERRIDE { return false; }
+ bool IsActualObject() const override { return false; }
HInstruction* GetBaseAddress() const { return InputAt(0); }
HInstruction* GetOffset() const { return InputAt(1); }
@@ -7747,7 +7760,7 @@ class HGraphDelegateVisitor : public HGraphVisitor {
// Visit functions that delegate to to super class.
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
+ void Visit##name(H##name* instr) override { Visit##super(instr); }
FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -7769,7 +7782,7 @@ class CloneAndReplaceInstructionVisitor : public HGraphDelegateVisitor {
explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
: HGraphDelegateVisitor(graph), instr_replaced_by_clones_count_(0) {}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
if (instruction->IsClonable()) {
ReplaceInstrOrPhiByClone(instruction);
instr_replaced_by_clones_count_++;
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index 05b27a7810..4993f5737e 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -30,7 +30,7 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> {
kNoDexPc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress);
@@ -39,7 +39,7 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> {
};
// Mips version of HPackedSwitch that holds a pointer to the base method address.
-class HMipsPackedSwitch FINAL : public HExpression<2> {
+class HMipsPackedSwitch final : public HExpression<2> {
public:
HMipsPackedSwitch(int32_t start_value,
int32_t num_entries,
@@ -53,7 +53,7 @@ class HMipsPackedSwitch FINAL : public HExpression<2> {
SetRawInputAt(1, method_base);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
int32_t GetStartValue() const { return start_value_; }
@@ -91,7 +91,7 @@ class HMipsPackedSwitch FINAL : public HExpression<2> {
//
// Note: as the instruction doesn't involve base array address into computations it has no side
// effects.
-class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
+class HIntermediateArrayAddressIndex final : public HExpression<2> {
public:
HIntermediateArrayAddressIndex(HInstruction* index, HInstruction* shift, uint32_t dex_pc)
: HExpression(kIntermediateArrayAddressIndex,
@@ -102,11 +102,11 @@ class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
SetRawInputAt(1, shift);
}
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool IsActualObject() const OVERRIDE { return false; }
+ bool IsActualObject() const override { return false; }
HInstruction* GetIndex() const { return InputAt(0); }
HInstruction* GetShift() const { return InputAt(1); }
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 29358e1141..7dcac1787e 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -24,7 +24,7 @@
namespace art {
-class HMultiplyAccumulate FINAL : public HExpression<3> {
+class HMultiplyAccumulate final : public HExpression<3> {
public:
HMultiplyAccumulate(DataType::Type type,
InstructionKind op,
@@ -39,14 +39,14 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
SetRawInputAt(kInputMulRightIndex, mul_right);
}
- bool IsClonable() const OVERRIDE { return true; }
+ bool IsClonable() const override { return true; }
static constexpr int kInputAccumulatorIndex = 0;
static constexpr int kInputMulLeftIndex = 1;
static constexpr int kInputMulRightIndex = 2;
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other) const override {
return op_kind_ == other->AsMultiplyAccumulate()->op_kind_;
}
@@ -62,7 +62,7 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
const InstructionKind op_kind_;
};
-class HBitwiseNegatedRight FINAL : public HBinaryOperation {
+class HBitwiseNegatedRight final : public HBinaryOperation {
public:
HBitwiseNegatedRight(DataType::Type result_type,
InstructionKind op,
@@ -97,21 +97,21 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
}
}
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
- HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for float values";
UNREACHABLE();
}
HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
- HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
LOG(FATAL) << DebugName() << " is not defined for double values";
UNREACHABLE();
}
@@ -145,7 +145,7 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
//
// Note: as the instruction doesn't involve base array address into computations it has no side
// effects (in comparison of HIntermediateAddress).
-class HIntermediateAddressIndex FINAL : public HExpression<3> {
+class HIntermediateAddressIndex final : public HExpression<3> {
public:
HIntermediateAddressIndex(
HInstruction* index, HInstruction* offset, HInstruction* shift, uint32_t dex_pc)
@@ -158,12 +158,12 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
SetRawInputAt(2, shift);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
- bool IsActualObject() const OVERRIDE { return false; }
+ bool IsActualObject() const override { return false; }
HInstruction* GetIndex() const { return InputAt(0); }
HInstruction* GetOffset() const { return InputAt(1); }
@@ -175,7 +175,7 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex);
};
-class HDataProcWithShifterOp FINAL : public HExpression<2> {
+class HDataProcWithShifterOp final : public HExpression<2> {
public:
enum OpKind {
kLSL, // Logical shift left.
@@ -212,9 +212,9 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> {
SetRawInputAt(1, right);
}
- bool IsClonable() const OVERRIDE { return true; }
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE {
+ bool IsClonable() const override { return true; }
+ bool CanBeMoved() const override { return true; }
+ bool InstructionDataEquals(const HInstruction* other_instr) const override {
const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp();
return instr_kind_ == other->instr_kind_ &&
op_kind_ == other->op_kind_ &&
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 95fb5ab76a..c7539f2846 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -117,12 +117,12 @@ class HVecOperation : public HVariableInputSizeInstruction {
// Note: For newly introduced vector instructions HScheduler${ARCH}::IsSchedulingBarrier must be
// altered to return true if the instruction might reside outside the SIMD loop body since SIMD
// registers are not kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
// Tests if all data of a vector node (vector length and packed type) is equal.
// Each concrete implementation that adds more fields should test equality of
// those fields in its own method *and* call all super methods.
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecOperation());
const HVecOperation* o = other->AsVecOperation();
return GetVectorLength() == o->GetVectorLength() && GetPackedType() == o->GetPackedType();
@@ -280,7 +280,7 @@ class HVecMemoryOperation : public HVecOperation {
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecMemoryOperation());
const HVecMemoryOperation* o = other->AsVecMemoryOperation();
return HVecOperation::InstructionDataEquals(o) && GetAlignment() == o->GetAlignment();
@@ -315,7 +315,7 @@ inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type
// Replicates the given scalar into a vector,
// viz. replicate(x) = [ x, .. , x ].
-class HVecReplicateScalar FINAL : public HVecUnaryOperation {
+class HVecReplicateScalar final : public HVecUnaryOperation {
public:
HVecReplicateScalar(ArenaAllocator* allocator,
HInstruction* scalar,
@@ -329,7 +329,7 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation {
// A replicate needs to stay in place, since SIMD registers are not
// kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecReplicateScalar);
@@ -341,7 +341,7 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation {
// viz. extract[ x1, .. , xn ] = x_i.
//
// TODO: for now only i == 1 case supported.
-class HVecExtractScalar FINAL : public HVecUnaryOperation {
+class HVecExtractScalar final : public HVecUnaryOperation {
public:
HVecExtractScalar(ArenaAllocator* allocator,
HInstruction* input,
@@ -361,7 +361,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation {
// An extract needs to stay in place, since SIMD registers are not
// kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecExtractScalar);
@@ -372,7 +372,7 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation {
// Reduces the given vector into the first element as sum/min/max,
// viz. sum-reduce[ x1, .. , xn ] = [ y, ---- ], where y = sum xi
// and the "-" denotes "don't care" (implementation dependent).
-class HVecReduce FINAL : public HVecUnaryOperation {
+class HVecReduce final : public HVecUnaryOperation {
public:
enum ReductionKind {
kSum = 1,
@@ -393,9 +393,9 @@ class HVecReduce FINAL : public HVecUnaryOperation {
ReductionKind GetKind() const { return kind_; }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecReduce());
const HVecReduce* o = other->AsVecReduce();
return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind();
@@ -412,7 +412,7 @@ class HVecReduce FINAL : public HVecUnaryOperation {
// Converts every component in the vector,
// viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ].
-class HVecCnv FINAL : public HVecUnaryOperation {
+class HVecCnv final : public HVecUnaryOperation {
public:
HVecCnv(ArenaAllocator* allocator,
HInstruction* input,
@@ -427,7 +427,7 @@ class HVecCnv FINAL : public HVecUnaryOperation {
DataType::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); }
DataType::Type GetResultType() const { return GetPackedType(); }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecCnv);
@@ -437,7 +437,7 @@ class HVecCnv FINAL : public HVecUnaryOperation {
// Negates every component in the vector,
// viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ].
-class HVecNeg FINAL : public HVecUnaryOperation {
+class HVecNeg final : public HVecUnaryOperation {
public:
HVecNeg(ArenaAllocator* allocator,
HInstruction* input,
@@ -448,7 +448,7 @@ class HVecNeg FINAL : public HVecUnaryOperation {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecNeg);
@@ -459,7 +459,7 @@ class HVecNeg FINAL : public HVecUnaryOperation {
// Takes absolute value of every component in the vector,
// viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ]
// for signed operand x.
-class HVecAbs FINAL : public HVecUnaryOperation {
+class HVecAbs final : public HVecUnaryOperation {
public:
HVecAbs(ArenaAllocator* allocator,
HInstruction* input,
@@ -470,7 +470,7 @@ class HVecAbs FINAL : public HVecUnaryOperation {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAbs);
@@ -481,7 +481,7 @@ class HVecAbs FINAL : public HVecUnaryOperation {
// Bitwise- or boolean-nots every component in the vector,
// viz. not[ x1, .. , xn ] = [ ~x1, .. , ~xn ], or
// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
-class HVecNot FINAL : public HVecUnaryOperation {
+class HVecNot final : public HVecUnaryOperation {
public:
HVecNot(ArenaAllocator* allocator,
HInstruction* input,
@@ -492,7 +492,7 @@ class HVecNot FINAL : public HVecUnaryOperation {
DCHECK(input->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecNot);
@@ -506,7 +506,7 @@ class HVecNot FINAL : public HVecUnaryOperation {
// Adds every component in the two vectors,
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
-class HVecAdd FINAL : public HVecBinaryOperation {
+class HVecAdd final : public HVecBinaryOperation {
public:
HVecAdd(ArenaAllocator* allocator,
HInstruction* left,
@@ -519,7 +519,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAdd);
@@ -530,7 +530,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
// Adds every component in the two vectors using saturation arithmetic,
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 +_sat y1, .. , xn +_sat yn ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationAdd FINAL : public HVecBinaryOperation {
+class HVecSaturationAdd final : public HVecBinaryOperation {
public:
HVecSaturationAdd(ArenaAllocator* allocator,
HInstruction* left,
@@ -544,7 +544,7 @@ class HVecSaturationAdd FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecSaturationAdd);
@@ -556,7 +556,7 @@ class HVecSaturationAdd FINAL : public HVecBinaryOperation {
// rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
// truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecHalvingAdd FINAL : public HVecBinaryOperation {
+class HVecHalvingAdd final : public HVecBinaryOperation {
public:
HVecHalvingAdd(ArenaAllocator* allocator,
HInstruction* left,
@@ -574,9 +574,9 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecHalvingAdd());
const HVecHalvingAdd* o = other->AsVecHalvingAdd();
return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
@@ -596,7 +596,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
// Subtracts every component in the two vectors,
// viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
-class HVecSub FINAL : public HVecBinaryOperation {
+class HVecSub final : public HVecBinaryOperation {
public:
HVecSub(ArenaAllocator* allocator,
HInstruction* left,
@@ -609,7 +609,7 @@ class HVecSub FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecSub);
@@ -620,7 +620,7 @@ class HVecSub FINAL : public HVecBinaryOperation {
// Subtracts every component in the two vectors using saturation arithmetic,
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 -_sat y1, .. , xn -_sat yn ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationSub FINAL : public HVecBinaryOperation {
+class HVecSaturationSub final : public HVecBinaryOperation {
public:
HVecSaturationSub(ArenaAllocator* allocator,
HInstruction* left,
@@ -634,7 +634,7 @@ class HVecSaturationSub FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecSaturationSub);
@@ -644,7 +644,7 @@ class HVecSaturationSub FINAL : public HVecBinaryOperation {
// Multiplies every component in the two vectors,
// viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
-class HVecMul FINAL : public HVecBinaryOperation {
+class HVecMul final : public HVecBinaryOperation {
public:
HVecMul(ArenaAllocator* allocator,
HInstruction* left,
@@ -657,7 +657,7 @@ class HVecMul FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecMul);
@@ -667,7 +667,7 @@ class HVecMul FINAL : public HVecBinaryOperation {
// Divides every component in the two vectors,
// viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
-class HVecDiv FINAL : public HVecBinaryOperation {
+class HVecDiv final : public HVecBinaryOperation {
public:
HVecDiv(ArenaAllocator* allocator,
HInstruction* left,
@@ -680,7 +680,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecDiv);
@@ -691,7 +691,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
// Takes minimum of every component in the two vectors,
// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMin FINAL : public HVecBinaryOperation {
+class HVecMin final : public HVecBinaryOperation {
public:
HVecMin(ArenaAllocator* allocator,
HInstruction* left,
@@ -704,7 +704,7 @@ class HVecMin FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecMin);
@@ -715,7 +715,7 @@ class HVecMin FINAL : public HVecBinaryOperation {
// Takes maximum of every component in the two vectors,
// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
// for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMax FINAL : public HVecBinaryOperation {
+class HVecMax final : public HVecBinaryOperation {
public:
HVecMax(ArenaAllocator* allocator,
HInstruction* left,
@@ -728,7 +728,7 @@ class HVecMax FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecMax);
@@ -738,7 +738,7 @@ class HVecMax FINAL : public HVecBinaryOperation {
// Bitwise-ands every component in the two vectors,
// viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
-class HVecAnd FINAL : public HVecBinaryOperation {
+class HVecAnd final : public HVecBinaryOperation {
public:
HVecAnd(ArenaAllocator* allocator,
HInstruction* left,
@@ -750,7 +750,7 @@ class HVecAnd FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAnd);
@@ -760,7 +760,7 @@ class HVecAnd FINAL : public HVecBinaryOperation {
// Bitwise-and-nots every component in the two vectors,
// viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
-class HVecAndNot FINAL : public HVecBinaryOperation {
+class HVecAndNot final : public HVecBinaryOperation {
public:
HVecAndNot(ArenaAllocator* allocator,
HInstruction* left,
@@ -773,7 +773,7 @@ class HVecAndNot FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecAndNot);
@@ -783,7 +783,7 @@ class HVecAndNot FINAL : public HVecBinaryOperation {
// Bitwise-ors every component in the two vectors,
// viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
-class HVecOr FINAL : public HVecBinaryOperation {
+class HVecOr final : public HVecBinaryOperation {
public:
HVecOr(ArenaAllocator* allocator,
HInstruction* left,
@@ -795,7 +795,7 @@ class HVecOr FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecOr);
@@ -805,7 +805,7 @@ class HVecOr FINAL : public HVecBinaryOperation {
// Bitwise-xors every component in the two vectors,
// viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
-class HVecXor FINAL : public HVecBinaryOperation {
+class HVecXor final : public HVecBinaryOperation {
public:
HVecXor(ArenaAllocator* allocator,
HInstruction* left,
@@ -817,7 +817,7 @@ class HVecXor FINAL : public HVecBinaryOperation {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecXor);
@@ -827,7 +827,7 @@ class HVecXor FINAL : public HVecBinaryOperation {
// Logically shifts every component in the vector left by the given distance,
// viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
-class HVecShl FINAL : public HVecBinaryOperation {
+class HVecShl final : public HVecBinaryOperation {
public:
HVecShl(ArenaAllocator* allocator,
HInstruction* left,
@@ -839,7 +839,7 @@ class HVecShl FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecShl);
@@ -849,7 +849,7 @@ class HVecShl FINAL : public HVecBinaryOperation {
// Arithmetically shifts every component in the vector right by the given distance,
// viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
-class HVecShr FINAL : public HVecBinaryOperation {
+class HVecShr final : public HVecBinaryOperation {
public:
HVecShr(ArenaAllocator* allocator,
HInstruction* left,
@@ -861,7 +861,7 @@ class HVecShr FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecShr);
@@ -871,7 +871,7 @@ class HVecShr FINAL : public HVecBinaryOperation {
// Logically shifts every component in the vector right by the given distance,
// viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
-class HVecUShr FINAL : public HVecBinaryOperation {
+class HVecUShr final : public HVecBinaryOperation {
public:
HVecUShr(ArenaAllocator* allocator,
HInstruction* left,
@@ -883,7 +883,7 @@ class HVecUShr FINAL : public HVecBinaryOperation {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(VecUShr);
@@ -898,7 +898,7 @@ class HVecUShr FINAL : public HVecBinaryOperation {
// Assigns the given scalar elements to a vector,
// viz. set( array(x1, .. , xn) ) = [ x1, .. , xn ] if n == m,
// set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m < n.
-class HVecSetScalars FINAL : public HVecOperation {
+class HVecSetScalars final : public HVecOperation {
public:
HVecSetScalars(ArenaAllocator* allocator,
HInstruction* scalars[],
@@ -921,7 +921,7 @@ class HVecSetScalars FINAL : public HVecOperation {
// Setting scalars needs to stay in place, since SIMD registers are not
// kept alive across vector loop boundaries (yet).
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecSetScalars);
@@ -934,7 +934,7 @@ class HVecSetScalars FINAL : public HVecOperation {
// For floating point types, Java rounding behavior must be preserved; the products are rounded to
// the proper precision before being added. "Fused" multiply-add operations available on several
// architectures are not usable since they would violate Java language rules.
-class HVecMultiplyAccumulate FINAL : public HVecOperation {
+class HVecMultiplyAccumulate final : public HVecOperation {
public:
HVecMultiplyAccumulate(ArenaAllocator* allocator,
InstructionKind op,
@@ -964,9 +964,9 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
SetRawInputAt(2, mul_right);
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecMultiplyAccumulate());
const HVecMultiplyAccumulate* o = other->AsVecMultiplyAccumulate();
return HVecOperation::InstructionDataEquals(o) && GetOpKind() == o->GetOpKind();
@@ -989,7 +989,7 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
// viz. SAD([ a1, .. , am ], [ x1, .. , xn ], [ y1, .. , yn ]) =
// [ a1 + sum abs(xi-yi), .. , am + sum abs(xj-yj) ],
// for m <= n, non-overlapping sums, and signed operands x, y.
-class HVecSADAccumulate FINAL : public HVecOperation {
+class HVecSADAccumulate final : public HVecOperation {
public:
HVecSADAccumulate(ArenaAllocator* allocator,
HInstruction* accumulator,
@@ -1023,7 +1023,7 @@ class HVecSADAccumulate FINAL : public HVecOperation {
// Loads a vector from memory, viz. load(mem, 1)
// yield the vector [ mem(1), .. , mem(n) ].
-class HVecLoad FINAL : public HVecMemoryOperation {
+class HVecLoad final : public HVecMemoryOperation {
public:
HVecLoad(ArenaAllocator* allocator,
HInstruction* base,
@@ -1047,9 +1047,9 @@ class HVecLoad FINAL : public HVecMemoryOperation {
bool IsStringCharAt() const { return GetPackedFlag<kFieldIsStringCharAt>(); }
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+ bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecLoad());
const HVecLoad* o = other->AsVecLoad();
return HVecMemoryOperation::InstructionDataEquals(o) && IsStringCharAt() == o->IsStringCharAt();
@@ -1069,7 +1069,7 @@ class HVecLoad FINAL : public HVecMemoryOperation {
// Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
// sets mem(1) = x1, .. , mem(n) = xn.
-class HVecStore FINAL : public HVecMemoryOperation {
+class HVecStore final : public HVecMemoryOperation {
public:
HVecStore(ArenaAllocator* allocator,
HInstruction* base,
@@ -1093,7 +1093,7 @@ class HVecStore FINAL : public HVecMemoryOperation {
}
// A store needs to stay in place.
- bool CanBeMoved() const OVERRIDE { return false; }
+ bool CanBeMoved() const override { return false; }
DECLARE_INSTRUCTION(VecStore);
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index d1e7f68edb..a55110426b 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -20,7 +20,7 @@
namespace art {
// Compute the address of the method for X86 Constant area support.
-class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
+class HX86ComputeBaseMethodAddress final : public HExpression<0> {
public:
// Treat the value as an int32_t, but it is really a 32 bit native pointer.
HX86ComputeBaseMethodAddress()
@@ -30,7 +30,7 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
kNoDexPc) {
}
- bool CanBeMoved() const OVERRIDE { return true; }
+ bool CanBeMoved() const override { return true; }
DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
@@ -39,7 +39,7 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
};
// Load a constant value from the constant table.
-class HX86LoadFromConstantTable FINAL : public HExpression<2> {
+class HX86LoadFromConstantTable final : public HExpression<2> {
public:
HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
HConstant* constant)
@@ -66,7 +66,7 @@ class HX86LoadFromConstantTable FINAL : public HExpression<2> {
};
// Version of HNeg with access to the constant table for FP types.
-class HX86FPNeg FINAL : public HExpression<2> {
+class HX86FPNeg final : public HExpression<2> {
public:
HX86FPNeg(DataType::Type result_type,
HInstruction* input,
@@ -89,7 +89,7 @@ class HX86FPNeg FINAL : public HExpression<2> {
};
// X86 version of HPackedSwitch that holds a pointer to the base method address.
-class HX86PackedSwitch FINAL : public HExpression<2> {
+class HX86PackedSwitch final : public HExpression<2> {
public:
HX86PackedSwitch(int32_t start_value,
int32_t num_entries,
@@ -103,7 +103,7 @@ class HX86PackedSwitch FINAL : public HExpression<2> {
SetRawInputAt(1, method_base);
}
- bool IsControlFlow() const OVERRIDE { return true; }
+ bool IsControlFlow() const override { return true; }
int32_t GetStartValue() const { return start_value_; }
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 04301f5366..be1f7ea5b4 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -133,7 +133,7 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
return memory_.data();
}
- ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+ ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
private:
std::vector<uint8_t> memory_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c40cbcf52a..0a747053cf 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -74,7 +74,7 @@ static constexpr const char* kPassNameSeparator = "$";
/**
* Used by the code generator, to allocate the code in a vector.
*/
-class CodeVectorAllocator FINAL : public CodeAllocator {
+class CodeVectorAllocator final : public CodeAllocator {
public:
explicit CodeVectorAllocator(ArenaAllocator* allocator)
: memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
@@ -84,7 +84,7 @@ class CodeVectorAllocator FINAL : public CodeAllocator {
return &memory_[0];
}
- ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+ ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
uint8_t* GetData() { return memory_.data(); }
private:
@@ -264,12 +264,12 @@ class PassScope : public ValueObject {
PassObserver* const pass_observer_;
};
-class OptimizingCompiler FINAL : public Compiler {
+class OptimizingCompiler final : public Compiler {
public:
explicit OptimizingCompiler(CompilerDriver* driver);
- ~OptimizingCompiler() OVERRIDE;
+ ~OptimizingCompiler() override;
- bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
+ bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
CompiledMethod* Compile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -278,29 +278,29 @@ class OptimizingCompiler FINAL : public Compiler {
uint32_t method_idx,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+ Handle<mirror::DexCache> dex_cache) const override;
CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+ Handle<mirror::DexCache> dex_cache) const override;
- uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
+ uintptr_t GetEntryPointOf(ArtMethod* method) const override
REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetCompilerOptions().GetInstructionSet())));
}
- void Init() OVERRIDE;
+ void Init() override;
- void UnInit() const OVERRIDE;
+ void UnInit() const override;
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
bool osr,
jit::JitLogger* jit_logger)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -570,7 +570,7 @@ static void AllocateRegisters(HGraph* graph,
{
PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
pass_observer);
- PrepareForRegisterAllocation(graph, stats).Run();
+ PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions(), stats).Run();
}
// Use local allocator shared by SSA liveness analysis and register allocator.
// (Register allocator creates new objects in the liveness data.)
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index e6e069f96e..5fadcab402 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -58,7 +58,7 @@ class ParallelMoveResolverWithSwap : public ParallelMoveResolver {
virtual ~ParallelMoveResolverWithSwap() {}
// Resolve a set of parallel moves, emitting assembler instructions.
- void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+ void EmitNativeCode(HParallelMove* parallel_move) override;
protected:
class ScratchRegisterScope : public ValueObject {
@@ -133,7 +133,7 @@ class ParallelMoveResolverNoSwap : public ParallelMoveResolver {
virtual ~ParallelMoveResolverNoSwap() {}
// Resolve a set of parallel moves, emitting assembler instructions.
- void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+ void EmitNativeCode(HParallelMove* parallel_move) override;
protected:
// Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index be35201166..399a6d8cbd 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -56,7 +56,7 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
explicit TestParallelMoveResolverWithSwap(ArenaAllocator* allocator)
: ParallelMoveResolverWithSwap(allocator) {}
- void EmitMove(size_t index) OVERRIDE {
+ void EmitMove(size_t index) override {
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
@@ -68,7 +68,7 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
message_ << ")";
}
- void EmitSwap(size_t index) OVERRIDE {
+ void EmitSwap(size_t index) override {
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
@@ -80,8 +80,8 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
message_ << ")";
}
- void SpillScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
- void RestoreScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
+ void SpillScratch(int reg ATTRIBUTE_UNUSED) override {}
+ void RestoreScratch(int reg ATTRIBUTE_UNUSED) override {}
std::string GetMessage() const {
return message_.str();
@@ -99,13 +99,13 @@ class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap {
explicit TestParallelMoveResolverNoSwap(ArenaAllocator* allocator)
: ParallelMoveResolverNoSwap(allocator), scratch_index_(kScratchRegisterStartIndexForTest) {}
- void PrepareForEmitNativeCode() OVERRIDE {
+ void PrepareForEmitNativeCode() override {
scratch_index_ = kScratchRegisterStartIndexForTest;
}
- void FinishEmitNativeCode() OVERRIDE {}
+ void FinishEmitNativeCode() override {}
- Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE {
+ Location AllocateScratchLocationFor(Location::Kind kind) override {
if (kind == Location::kStackSlot || kind == Location::kFpuRegister ||
kind == Location::kRegister) {
kind = Location::kRegister;
@@ -125,9 +125,9 @@ class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap {
return scratch;
}
- void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) OVERRIDE {}
+ void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) override {}
- void EmitMove(size_t index) OVERRIDE {
+ void EmitMove(size_t index) override {
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index a7e97a1ce5..05208ff65c 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -58,7 +58,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
DCHECK(base_ != nullptr);
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
// If this is an invoke with PC-relative load kind,
// we need to add the base as the special input.
if (invoke->HasPcRelativeMethodLoadKind() &&
@@ -70,7 +70,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ void VisitLoadClass(HLoadClass* load_class) override {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
@@ -86,7 +86,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ void VisitLoadString(HLoadString* load_string) override {
HLoadString::LoadKind load_kind = load_string->GetLoadKind();
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
@@ -102,7 +102,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
if (switch_insn->GetNumEntries() <=
InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
return;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 6dd1ee0db2..872370bcb7 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization {
static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 41f2f776fc..4b07d5b621 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -42,53 +42,53 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
private:
- void VisitAdd(HAdd* add) OVERRIDE {
+ void VisitAdd(HAdd* add) override {
BinaryFP(add);
}
- void VisitSub(HSub* sub) OVERRIDE {
+ void VisitSub(HSub* sub) override {
BinaryFP(sub);
}
- void VisitMul(HMul* mul) OVERRIDE {
+ void VisitMul(HMul* mul) override {
BinaryFP(mul);
}
- void VisitDiv(HDiv* div) OVERRIDE {
+ void VisitDiv(HDiv* div) override {
BinaryFP(div);
}
- void VisitCompare(HCompare* compare) OVERRIDE {
+ void VisitCompare(HCompare* compare) override {
BinaryFP(compare);
}
- void VisitReturn(HReturn* ret) OVERRIDE {
+ void VisitReturn(HReturn* ret) override {
HConstant* value = ret->InputAt(0)->AsConstant();
if ((value != nullptr && DataType::IsFloatingPointType(value->GetType()))) {
ReplaceInput(ret, value, 0, true);
}
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
HandleInvoke(invoke);
}
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ void VisitInvokeInterface(HInvokeInterface* invoke) override {
HandleInvoke(invoke);
}
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ void VisitLoadClass(HLoadClass* load_class) override {
if (load_class->HasPcRelativeLoadKind()) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_class);
load_class->AddSpecialInput(method_address);
}
}
- void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ void VisitLoadString(HLoadString* load_string) override {
if (load_string->HasPcRelativeLoadKind()) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_string);
load_string->AddSpecialInput(method_address);
@@ -102,31 +102,31 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitEqual(HEqual* cond) OVERRIDE {
+ void VisitEqual(HEqual* cond) override {
BinaryFP(cond);
}
- void VisitNotEqual(HNotEqual* cond) OVERRIDE {
+ void VisitNotEqual(HNotEqual* cond) override {
BinaryFP(cond);
}
- void VisitLessThan(HLessThan* cond) OVERRIDE {
+ void VisitLessThan(HLessThan* cond) override {
BinaryFP(cond);
}
- void VisitLessThanOrEqual(HLessThanOrEqual* cond) OVERRIDE {
+ void VisitLessThanOrEqual(HLessThanOrEqual* cond) override {
BinaryFP(cond);
}
- void VisitGreaterThan(HGreaterThan* cond) OVERRIDE {
+ void VisitGreaterThan(HGreaterThan* cond) override {
BinaryFP(cond);
}
- void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) OVERRIDE {
+ void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) override {
BinaryFP(cond);
}
- void VisitNeg(HNeg* neg) OVERRIDE {
+ void VisitNeg(HNeg* neg) override {
if (DataType::IsFloatingPointType(neg->GetType())) {
// We need to replace the HNeg with a HX86FPNeg in order to address the constant area.
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg);
@@ -141,7 +141,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
if (switch_insn->GetNumEntries() <=
InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) {
return;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index db56b7f053..3b470a6502 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -34,7 +34,7 @@ class PcRelativeFixups : public HOptimization {
static constexpr const char* kPcRelativeFixupsX86PassName = "pc_relative_fixups_x86";
- bool Run() OVERRIDE;
+ bool Run() override;
private:
CodeGenerator* codegen_;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 060613d349..fc81740013 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -17,6 +17,7 @@
#include "prepare_for_register_allocation.h"
#include "dex/dex_file_types.h"
+#include "driver/compiler_options.h"
#include "jni/jni_internal.h"
#include "optimizing_compiler_stats.h"
#include "well_known_classes.h"
@@ -27,7 +28,7 @@ void PrepareForRegisterAllocation::Run() {
// Order does not matter.
for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
// No need to visit the phis.
- for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ for (HInstructionIteratorHandleChanges inst_it(block->GetInstructions()); !inst_it.Done();
inst_it.Advance()) {
inst_it.Current()->Accept(this);
}
@@ -50,6 +51,19 @@ void PrepareForRegisterAllocation::VisitInstanceOf(HInstanceOf* instance_of) {
void PrepareForRegisterAllocation::VisitNullCheck(HNullCheck* check) {
check->ReplaceWith(check->InputAt(0));
+ if (compiler_options_.GetImplicitNullChecks()) {
+ HInstruction* next = check->GetNext();
+
+ // The `PrepareForRegisterAllocation` pass removes `HBoundType` from the graph,
+ // so do it ourselves now to not prevent optimizations.
+ while (next->IsBoundType()) {
+ next = next->GetNext();
+ VisitBoundType(next->GetPrevious()->AsBoundType());
+ }
+ if (next->CanDoImplicitNullCheckOn(check->InputAt(0))) {
+ check->MarkEmittedAtUseSite();
+ }
+ }
}
void PrepareForRegisterAllocation::VisitDivZeroCheck(HDivZeroCheck* check) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index f6e4d3ef99..a8ab256e27 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -21,6 +21,7 @@
namespace art {
+class CompilerOptions;
class OptimizingCompilerStats;
/**
@@ -30,9 +31,11 @@ class OptimizingCompilerStats;
*/
class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
public:
- explicit PrepareForRegisterAllocation(HGraph* graph,
- OptimizingCompilerStats* stats = nullptr)
- : HGraphDelegateVisitor(graph, stats) {}
+ PrepareForRegisterAllocation(HGraph* graph,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr)
+ : HGraphDelegateVisitor(graph, stats),
+ compiler_options_(compiler_options) {}
void Run();
@@ -40,22 +43,24 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
"prepare_for_register_allocation";
private:
- void VisitCheckCast(HCheckCast* check_cast) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE;
- void VisitNullCheck(HNullCheck* check) OVERRIDE;
- void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
- void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
- void VisitBoundType(HBoundType* bound_type) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
- void VisitCondition(HCondition* condition) OVERRIDE;
- void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE;
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
+ void VisitCheckCast(HCheckCast* check_cast) override;
+ void VisitInstanceOf(HInstanceOf* instance_of) override;
+ void VisitNullCheck(HNullCheck* check) override;
+ void VisitDivZeroCheck(HDivZeroCheck* check) override;
+ void VisitBoundsCheck(HBoundsCheck* check) override;
+ void VisitBoundType(HBoundType* bound_type) override;
+ void VisitArraySet(HArraySet* instruction) override;
+ void VisitClinitCheck(HClinitCheck* check) override;
+ void VisitCondition(HCondition* condition) override;
+ void VisitConstructorFence(HConstructorFence* constructor_fence) override;
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+ void VisitDeoptimize(HDeoptimize* deoptimize) override;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
+ const CompilerOptions& compiler_options_;
+
DISALLOW_COPY_AND_ASSIGN(PrepareForRegisterAllocation);
};
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index c6579dc5e0..8ef9ce4e8b 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -33,7 +33,7 @@ class HPrettyPrinter : public HGraphVisitor {
PrintString(": ");
}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
PrintPreInstruction(instruction);
PrintString(instruction->DebugName());
PrintPostInstruction(instruction);
@@ -70,7 +70,7 @@ class HPrettyPrinter : public HGraphVisitor {
PrintNewLine();
}
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
PrintString("BasicBlock ");
PrintInt(block->GetBlockId());
const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
@@ -108,15 +108,15 @@ class StringPrettyPrinter : public HPrettyPrinter {
explicit StringPrettyPrinter(HGraph* graph)
: HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
- void PrintInt(int value) OVERRIDE {
+ void PrintInt(int value) override {
str_ += android::base::StringPrintf("%d", value);
}
- void PrintString(const char* value) OVERRIDE {
+ void PrintString(const char* value) override {
str_ += value;
}
- void PrintNewLine() OVERRIDE {
+ void PrintNewLine() override {
str_ += '\n';
}
@@ -124,12 +124,12 @@ class StringPrettyPrinter : public HPrettyPrinter {
std::string str() const { return str_; }
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ void VisitBasicBlock(HBasicBlock* block) override {
current_block_ = block;
HPrettyPrinter::VisitBasicBlock(block);
}
- void VisitGoto(HGoto* gota) OVERRIDE {
+ void VisitGoto(HGoto* gota) override {
PrintString(" ");
PrintInt(gota->GetId());
PrintString(": Goto ");
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 0d622484ee..a9d590232c 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -94,26 +94,26 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
worklist_.reserve(kDefaultWorklistSize);
}
- void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
- void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
- void VisitInstanceOf(HInstanceOf* load_class) OVERRIDE;
- void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
- void VisitLoadMethodHandle(HLoadMethodHandle* instr) OVERRIDE;
- void VisitLoadMethodType(HLoadMethodType* instr) OVERRIDE;
- void VisitLoadString(HLoadString* instr) OVERRIDE;
- void VisitLoadException(HLoadException* instr) OVERRIDE;
- void VisitNewArray(HNewArray* instr) OVERRIDE;
- void VisitParameterValue(HParameterValue* instr) OVERRIDE;
- void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
- void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
- void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
- void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
- void VisitInvoke(HInvoke* instr) OVERRIDE;
- void VisitArrayGet(HArrayGet* instr) OVERRIDE;
- void VisitCheckCast(HCheckCast* instr) OVERRIDE;
- void VisitBoundType(HBoundType* instr) OVERRIDE;
- void VisitNullCheck(HNullCheck* instr) OVERRIDE;
+ void VisitDeoptimize(HDeoptimize* deopt) override;
+ void VisitNewInstance(HNewInstance* new_instance) override;
+ void VisitLoadClass(HLoadClass* load_class) override;
+ void VisitInstanceOf(HInstanceOf* load_class) override;
+ void VisitClinitCheck(HClinitCheck* clinit_check) override;
+ void VisitLoadMethodHandle(HLoadMethodHandle* instr) override;
+ void VisitLoadMethodType(HLoadMethodType* instr) override;
+ void VisitLoadString(HLoadString* instr) override;
+ void VisitLoadException(HLoadException* instr) override;
+ void VisitNewArray(HNewArray* instr) override;
+ void VisitParameterValue(HParameterValue* instr) override;
+ void VisitInstanceFieldGet(HInstanceFieldGet* instr) override;
+ void VisitStaticFieldGet(HStaticFieldGet* instr) override;
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) override;
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) override;
+ void VisitInvoke(HInvoke* instr) override;
+ void VisitArrayGet(HArrayGet* instr) override;
+ void VisitCheckCast(HCheckCast* instr) override;
+ void VisitBoundType(HBoundType* instr) override;
+ void VisitNullCheck(HNullCheck* instr) override;
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index d36d592708..7c6a048444 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -40,7 +40,7 @@ class ReferenceTypePropagation : public HOptimization {
// Visit a single instruction.
void Visit(HInstruction* instruction);
- bool Run() OVERRIDE;
+ bool Run() override;
// Returns true if klass is admissible to the propagation: non-null and resolved.
// For an array type, we also check if the component type is admissible.
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 3072c92e0f..16131e1c71 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -90,9 +90,9 @@ class RegisterAllocatorGraphColor : public RegisterAllocator {
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis,
bool iterative_move_coalescing = true);
- ~RegisterAllocatorGraphColor() OVERRIDE;
+ ~RegisterAllocatorGraphColor() override;
- void AllocateRegisters() OVERRIDE;
+ void AllocateRegisters() override;
bool Validate(bool log_fatal_on_failure);
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 216fb57a96..1e00003701 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -312,7 +312,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction)
for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
HInstruction* safepoint = safepoints_[safepoint_index - 1u];
- size_t safepoint_position = safepoint->GetLifetimePosition();
+ size_t safepoint_position = SafepointPosition::ComputePosition(safepoint);
// Test that safepoints are ordered in the optimal way.
DCHECK(safepoint_index == safepoints_.size() ||
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 36788b7c3c..4d445c7ff7 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -42,11 +42,11 @@ class RegisterAllocatorLinearScan : public RegisterAllocator {
RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
- ~RegisterAllocatorLinearScan() OVERRIDE;
+ ~RegisterAllocatorLinearScan() override;
- void AllocateRegisters() OVERRIDE;
+ void AllocateRegisters() override;
- bool Validate(bool log_fatal_on_failure) OVERRIDE {
+ bool Validate(bool log_fatal_on_failure) override {
processing_core_registers_ = true;
if (!ValidateInternal(log_fatal_on_failure)) {
return false;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 7144775c2b..db6a760007 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -40,7 +40,7 @@ using Strategy = RegisterAllocator::Strategy;
class RegisterAllocatorTest : public OptimizingUnitTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
// This test is using the x86 ISA.
OverrideInstructionSetFeatures(InstructionSet::kX86, "default");
OptimizingUnitTest::SetUp();
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 1aa16f45bc..df897a4904 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -280,6 +280,23 @@ bool SchedulingGraph::HasSideEffectDependency(HInstruction* node,
return false;
}
+// Check if the specified instruction is a better candidate which more likely will
+// have other instructions depending on it.
+static bool IsBetterCandidateWithMoreLikelyDependencies(HInstruction* new_candidate,
+ HInstruction* old_candidate) {
+ if (!new_candidate->GetSideEffects().Includes(old_candidate->GetSideEffects())) {
+ // Weaker side effects.
+ return false;
+ }
+ if (old_candidate->GetSideEffects().Includes(new_candidate->GetSideEffects())) {
+ // Same side effects, check if `new_candidate` has stronger `CanThrow()`.
+ return new_candidate->CanThrow() && !old_candidate->CanThrow();
+ } else {
+ // Stronger side effects, check if `new_candidate` has at least as strong `CanThrow()`.
+ return new_candidate->CanThrow() || !old_candidate->CanThrow();
+ }
+}
+
void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) {
SchedulingNode* instruction_node = GetNode(instruction);
@@ -331,6 +348,7 @@ void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_schedul
// Side effect dependencies.
if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) {
+ HInstruction* dep_chain_candidate = nullptr;
for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
SchedulingNode* other_node = GetNode(other);
if (other_node->IsSchedulingBarrier()) {
@@ -340,7 +358,18 @@ void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_schedul
break;
}
if (HasSideEffectDependency(other, instruction)) {
- AddOtherDependency(other_node, instruction_node);
+ if (dep_chain_candidate != nullptr &&
+ HasSideEffectDependency(other, dep_chain_candidate)) {
+ // Skip an explicit dependency to reduce memory usage, rely on the transitive dependency.
+ } else {
+ AddOtherDependency(other_node, instruction_node);
+ }
+ // Check if `other` is a better candidate which more likely will have other instructions
+ // depending on it.
+ if (dep_chain_candidate == nullptr ||
+ IsBetterCandidateWithMoreLikelyDependencies(other, dep_chain_candidate)) {
+ dep_chain_candidate = other;
+ }
}
}
}
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index fd48d844e6..48e80f5f8b 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -339,7 +339,7 @@ class SchedulingLatencyVisitor : public HGraphDelegateVisitor {
last_visited_latency_(0),
last_visited_internal_latency_(0) {}
- void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ void VisitInstruction(HInstruction* instruction) override {
LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". "
"Architecture-specific scheduling latency visitors must handle all instructions"
" (potentially by overriding the generic `VisitInstruction()`.";
@@ -392,7 +392,7 @@ class RandomSchedulingNodeSelector : public SchedulingNodeSelector {
}
SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
- const SchedulingGraph& graph) OVERRIDE {
+ const SchedulingGraph& graph) override {
UNUSED(graph);
DCHECK(!nodes->empty());
size_t select = rand_r(&seed_) % nodes->size();
@@ -412,9 +412,9 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
public:
CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
- void Reset() OVERRIDE { prev_select_ = nullptr; }
+ void Reset() override { prev_select_ = nullptr; }
SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
- const SchedulingGraph& graph) OVERRIDE;
+ const SchedulingGraph& graph) override;
protected:
SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
@@ -492,7 +492,7 @@ class HInstructionScheduling : public HOptimization {
codegen_(cg),
instruction_set_(instruction_set) {}
- bool Run() OVERRIDE {
+ bool Run() override {
return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
}
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 2f369486b3..875593bbf0 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -100,7 +100,7 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor {
M(DataProcWithShifterOp, unused)
#define DECLARE_VISIT_INSTRUCTION(type, unused) \
- void Visit##type(H##type* instruction) OVERRIDE;
+ void Visit##type(H##type* instruction) override;
FOR_EACH_SCHEDULED_ARM_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -140,9 +140,9 @@ class HSchedulerARM : public HScheduler {
HSchedulerARM(SchedulingNodeSelector* selector,
SchedulingLatencyVisitorARM* arm_latency_visitor)
: HScheduler(arm_latency_visitor, selector) {}
- ~HSchedulerARM() OVERRIDE {}
+ ~HSchedulerARM() override {}
- bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+ bool IsSchedulable(const HInstruction* instruction) const override {
#define CASE_INSTRUCTION_KIND(type, unused) case \
HInstruction::InstructionKind::k##type:
switch (instruction->GetKind()) {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 0d2f8d9fa0..7f6549dcfe 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -118,7 +118,7 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
M(DataProcWithShifterOp, unused)
#define DECLARE_VISIT_INSTRUCTION(type, unused) \
- void Visit##type(H##type* instruction) OVERRIDE;
+ void Visit##type(H##type* instruction) override;
FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -136,9 +136,9 @@ class HSchedulerARM64 : public HScheduler {
public:
explicit HSchedulerARM64(SchedulingNodeSelector* selector)
: HScheduler(&arm64_latency_visitor_, selector) {}
- ~HSchedulerARM64() OVERRIDE {}
+ ~HSchedulerARM64() override {}
- bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+ bool IsSchedulable(const HInstruction* instruction) const override {
#define CASE_INSTRUCTION_KIND(type, unused) case \
HInstruction::InstructionKind::k##type:
switch (instruction->GetKind()) {
@@ -160,7 +160,7 @@ class HSchedulerARM64 : public HScheduler {
// SIMD&FP registers are callee saved) so don't reorder such vector instructions.
//
// TODO: remove this when a proper support of SIMD registers is introduced to the compiler.
- bool IsSchedulingBarrier(const HInstruction* instr) const OVERRIDE {
+ bool IsSchedulingBarrier(const HInstruction* instr) const override {
return HScheduler::IsSchedulingBarrier(instr) ||
instr->IsVecReduce() ||
instr->IsVecExtractScalar() ||
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index fe23fb4cff..981fcc42a7 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -171,7 +171,9 @@ class SchedulerTest : public OptimizingUnitTest {
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set1, array_get1));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_get2));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_get2, array_set1));
- ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_set1));
+ // Unnecessary dependency is not stored, we rely on transitive dependencies.
+ // The array_set2 -> array_get2 -> array_set1 dependencies are tested above.
+ ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(array_set2, array_set1));
// Env dependency.
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(div_check, mul));
@@ -308,7 +310,9 @@ class SchedulerTest : public OptimizingUnitTest {
loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_j);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
- ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i));
+ // Unnecessary dependency is not stored, we rely on transitive dependencies.
+ // The arr_set_j -> arr_set_sub0 -> arr_set_add0 -> arr_set_i dependencies are tested below.
+ ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i] and array[i+0]
loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
@@ -320,7 +324,10 @@ class SchedulerTest : public OptimizingUnitTest {
loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_sub0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
- ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_i));
+ // Unnecessary dependency is not stored, we rely on transitive dependencies.
+ ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_i));
+ // Instead, we rely on arr_set_sub0 -> arr_set_add0 -> arr_set_i, the latter is tested above.
+ ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_add0));
// Test side effect dependency based on LSA analysis: array[i] and array[i+1]
loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
@@ -335,11 +342,12 @@ class SchedulerTest : public OptimizingUnitTest {
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub1, arr_set_add1));
// Test side effect dependency based on LSA analysis: array[j] and all others array accesses
- ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i));
- ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_add0));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_sub0));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_add1));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_sub1));
+ // Unnecessary dependencies are not stored, we rely on transitive dependencies.
+ ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i));
+ ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_add0));
// Test that ArraySet and FieldSet should not have side effect dependency
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_i, set_field10));
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index d24d2264b2..2889166f60 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -68,7 +68,7 @@ class HSelectGenerator : public HOptimization {
OptimizingCompilerStats* stats,
const char* name = kSelectGeneratorPassName);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kSelectGeneratorPassName = "select_generator";
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index cbac361891..dc55eea683 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -37,7 +37,7 @@ class HSharpening : public HOptimization {
: HOptimization(graph, name),
codegen_(codegen) { }
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kSharpeningPassName = "sharpening";
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 2f782f39fc..62a70d6b12 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -103,9 +103,9 @@ void SsaLivenessAnalysis::ComputeLiveness() {
ComputeLiveInAndLiveOutSets();
}
-static void RecursivelyProcessInputs(HInstruction* current,
- HInstruction* actual_user,
- BitVector* live_in) {
+void SsaLivenessAnalysis::RecursivelyProcessInputs(HInstruction* current,
+ HInstruction* actual_user,
+ BitVector* live_in) {
HInputsRef inputs = current->GetInputs();
for (size_t i = 0; i < inputs.size(); ++i) {
HInstruction* input = inputs[i];
@@ -131,11 +131,40 @@ static void RecursivelyProcessInputs(HInstruction* current,
// Check that the inlined input is not a phi. Recursing on loop phis could
// lead to an infinite loop.
DCHECK(!input->IsPhi());
+ DCHECK(!input->HasEnvironment());
RecursivelyProcessInputs(input, actual_user, live_in);
}
}
}
+void SsaLivenessAnalysis::ProcessEnvironment(HInstruction* current,
+ HInstruction* actual_user,
+ BitVector* live_in) {
+ for (HEnvironment* environment = current->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
+ // Handle environment uses. See statements (b) and (c) of the
+ // SsaLivenessAnalysis.
+ for (size_t i = 0, e = environment->Size(); i < e; ++i) {
+ HInstruction* instruction = environment->GetInstructionAt(i);
+ if (instruction == nullptr) {
+ continue;
+ }
+ bool should_be_live = ShouldBeLiveForEnvironment(current, instruction);
+ // If this environment use does not keep the instruction live, it does not
+ // affect the live range of that instruction.
+ if (should_be_live) {
+ CHECK(instruction->HasSsaIndex()) << instruction->DebugName();
+ live_in->SetBit(instruction->GetSsaIndex());
+ instruction->GetLiveInterval()->AddUse(current,
+ environment,
+ i,
+ actual_user);
+ }
+ }
+ }
+}
+
void SsaLivenessAnalysis::ComputeLiveRanges() {
// Do a post order visit, adding inputs of instructions live in the block where
// that instruction is defined, and killing instructions that are being visited.
@@ -186,32 +215,6 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
current->GetLiveInterval()->SetFrom(current->GetLifetimePosition());
}
- // Process the environment first, because we know their uses come after
- // or at the same liveness position of inputs.
- for (HEnvironment* environment = current->GetEnvironment();
- environment != nullptr;
- environment = environment->GetParent()) {
- // Handle environment uses. See statements (b) and (c) of the
- // SsaLivenessAnalysis.
- for (size_t i = 0, e = environment->Size(); i < e; ++i) {
- HInstruction* instruction = environment->GetInstructionAt(i);
- if (instruction == nullptr) {
- continue;
- }
- bool should_be_live = ShouldBeLiveForEnvironment(current, instruction);
- // If this environment use does not keep the instruction live, it does not
- // affect the live range of that instruction.
- if (should_be_live) {
- CHECK(instruction->HasSsaIndex()) << instruction->DebugName();
- live_in->SetBit(instruction->GetSsaIndex());
- instruction->GetLiveInterval()->AddUse(current,
- environment,
- i,
- /* actual_user */ nullptr);
- }
- }
- }
-
// Process inputs of instructions.
if (current->IsEmittedAtUseSite()) {
if (kIsDebugBuild) {
@@ -224,6 +227,16 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
DCHECK(!current->HasEnvironmentUses());
}
} else {
+ // Process the environment first, because we know their uses come after
+ // or at the same liveness position of inputs.
+ ProcessEnvironment(current, current, live_in);
+
+ // Special case implicit null checks. We want their environment uses to be
+ // emitted at the instruction doing the actual null check.
+ HNullCheck* check = current->GetImplicitNullCheck();
+ if (check != nullptr) {
+ ProcessEnvironment(check, current, live_in);
+ }
RecursivelyProcessInputs(current, current, live_in);
}
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 83ca5bd5fa..92d0b08301 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -60,7 +60,7 @@ class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> {
+class LiveRange final : public ArenaObject<kArenaAllocSsaLiveness> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
@@ -230,12 +230,25 @@ class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> {
: instruction_(instruction),
next_(nullptr) {}
+ static size_t ComputePosition(HInstruction* instruction) {
+ // We special case instructions emitted at use site, as their
+ // safepoint position needs to be at their use.
+ if (instruction->IsEmittedAtUseSite()) {
+ // Currently only applies to implicit null checks, which are emitted
+ // at the next instruction.
+ DCHECK(instruction->IsNullCheck()) << instruction->DebugName();
+ return instruction->GetLifetimePosition() + 2;
+ } else {
+ return instruction->GetLifetimePosition();
+ }
+ }
+
void SetNext(SafepointPosition* next) {
next_ = next;
}
size_t GetPosition() const {
- return instruction_->GetLifetimePosition();
+ return ComputePosition(instruction_);
}
SafepointPosition* GetNext() const {
@@ -922,7 +935,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
if (first_safepoint_ == nullptr) {
first_safepoint_ = last_safepoint_ = safepoint;
} else {
- DCHECK_LT(last_safepoint_->GetPosition(), safepoint->GetPosition());
+ DCHECK_LE(last_safepoint_->GetPosition(), safepoint->GetPosition());
last_safepoint_->SetNext(safepoint);
last_safepoint_ = safepoint;
}
@@ -1252,6 +1265,13 @@ class SsaLivenessAnalysis : public ValueObject {
// Update the live_out set of the block and returns whether it has changed.
bool UpdateLiveOut(const HBasicBlock& block);
+ static void ProcessEnvironment(HInstruction* instruction,
+ HInstruction* actual_user,
+ BitVector* live_in);
+ static void RecursivelyProcessInputs(HInstruction* instruction,
+ HInstruction* actual_user,
+ BitVector* live_in);
+
// Returns whether `instruction` in an HEnvironment held by `env_holder`
// should be kept live by the HEnvironment.
static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, HInstruction* instruction) {
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index a683c698d9..4b525531da 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -29,7 +29,7 @@ namespace art {
class SsaLivenessAnalysisTest : public OptimizingUnitTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
OptimizingUnitTest::SetUp();
graph_ = CreateGraph();
codegen_ = CodeGenerator::Create(graph_, *compiler_options_);
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index ee859e834c..c5cc752ffc 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@ class SsaDeadPhiElimination : public HOptimization {
explicit SsaDeadPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaDeadPhiEliminationPassName) {}
- bool Run() OVERRIDE;
+ bool Run() override;
void MarkDeadPhis();
void EliminateDeadPhis();
@@ -53,7 +53,7 @@ class SsaRedundantPhiElimination : public HOptimization {
explicit SsaRedundantPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaRedundantPhiEliminationPassName) {}
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 85ed06eb9b..e679893af2 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -38,15 +38,15 @@ class SsaPrettyPrinter : public HPrettyPrinter {
public:
explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
- void PrintInt(int value) OVERRIDE {
+ void PrintInt(int value) override {
str_ += android::base::StringPrintf("%d", value);
}
- void PrintString(const char* value) OVERRIDE {
+ void PrintString(const char* value) override {
str_ += value;
}
- void PrintNewLine() OVERRIDE {
+ void PrintNewLine() override {
str_ += '\n';
}
@@ -54,7 +54,7 @@ class SsaPrettyPrinter : public HPrettyPrinter {
std::string str() const { return str_; }
- void VisitIntConstant(HIntConstant* constant) OVERRIDE {
+ void VisitIntConstant(HIntConstant* constant) override {
PrintPreInstruction(constant);
str_ += constant->DebugName();
str_ += " ";
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index f0069c0e09..b1abcf6747 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -31,7 +31,7 @@ class MemoryOperandVisitor : public HGraphVisitor {
do_implicit_null_checks_(do_implicit_null_checks) {}
private:
- void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE {
+ void VisitBoundsCheck(HBoundsCheck* check) override {
// Replace the length by the array itself, so that we can do compares to memory.
HArrayLength* array_len = check->InputAt(1)->AsArrayLength();
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index b254000f28..3f4178d58a 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -31,7 +31,7 @@ class X86MemoryOperandGeneration : public HOptimization {
CodeGenerator* codegen,
OptimizingCompilerStats* stats);
- bool Run() OVERRIDE;
+ bool Run() override;
static constexpr const char* kX86MemoryOperandGenerationPassName =
"x86_memory_operand_generation";
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index b0310f2fb6..98c0191679 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -39,7 +39,7 @@ namespace vixl32 = vixl::aarch32;
namespace art {
namespace arm {
-class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
+class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
public:
// Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
// fewer system calls than a larger default capacity.
@@ -149,7 +149,7 @@ class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
using MacroAssembler::Vmov;
};
-class ArmVIXLAssembler FINAL : public Assembler {
+class ArmVIXLAssembler final : public Assembler {
private:
class ArmException;
public:
@@ -161,19 +161,19 @@ class ArmVIXLAssembler FINAL : public Assembler {
virtual ~ArmVIXLAssembler() {}
ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Size of generated code.
- size_t CodeSize() const OVERRIDE;
- const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+ size_t CodeSize() const override;
+ const uint8_t* CodeBufferBaseAddress() const override;
// Copy instructions out of assembly buffer into the given region of memory.
- void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+ void FinalizeInstructions(const MemoryRegion& region) override;
- void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 4bc5d69f4d..674bf12f89 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -32,7 +32,7 @@
namespace art {
namespace arm {
-class ArmVIXLJNIMacroAssembler FINAL
+class ArmVIXLJNIMacroAssembler final
: public JNIMacroAssemblerFwd<ArmVIXLAssembler, PointerSize::k32> {
private:
class ArmException;
@@ -42,7 +42,7 @@ class ArmVIXLJNIMacroAssembler FINAL
exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
virtual ~ArmVIXLJNIMacroAssembler() {}
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
//
// Overridden common assembler high-level functionality
@@ -52,109 +52,109 @@ class ArmVIXLJNIMacroAssembler FINAL
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister src,
FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
void LoadFromThread(ManagedRegister dest,
ThreadOffset32 src,
- size_t size) OVERRIDE;
+ size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
// Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister mtr) OVERRIDE;
+ void GetCurrentThread(ManagedRegister mtr) override;
void GetCurrentThread(FrameOffset dest_offset,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -163,43 +163,43 @@ class ArmVIXLJNIMacroAssembler FINAL
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src) OVERRIDE;
+ ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+ void MemoryBarrier(ManagedRegister scratch) override;
void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
void Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, size_t size);
@@ -231,7 +231,7 @@ class ArmVIXLJNIMacroAssembler FINAL
friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test;
};
-class ArmVIXLJNIMacroLabel FINAL
+class ArmVIXLJNIMacroLabel final
: public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
vixl32::Label,
InstructionSet::kArm> {
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 8983af2677..74537dd5a3 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -61,7 +61,7 @@ enum StoreOperandType {
kStoreDWord
};
-class Arm64Assembler FINAL : public Assembler {
+class Arm64Assembler final : public Assembler {
public:
explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
@@ -70,11 +70,11 @@ class Arm64Assembler FINAL : public Assembler {
vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
// Finalize the code.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Size of generated code.
- size_t CodeSize() const OVERRIDE;
- const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+ size_t CodeSize() const override;
+ const uint8_t* CodeBufferBaseAddress() const override;
// Copy instructions out of assembly buffer into the given region of memory.
void FinalizeInstructions(const MemoryRegion& region);
@@ -109,10 +109,10 @@ class Arm64Assembler FINAL : public Assembler {
// MaybeGenerateMarkingRegisterCheck and is passed to the BRK instruction.
void GenerateMarkingRegisterCheck(vixl::aarch64::Register temp, int code = 0);
- void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index f531b2aa51..45316ed88e 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -40,7 +40,7 @@
namespace art {
namespace arm64 {
-class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
public:
explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator)
: JNIMacroAssemblerFwd(allocator),
@@ -49,94 +49,94 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
~Arm64JNIMacroAssembler();
// Finalize the code.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ ManagedRegister scratch) override;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister src,
FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+ bool unpoison_reference) override;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
// Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ override;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+ size_t size) override;
+ void MemoryBarrier(ManagedRegister scratch) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -145,40 +145,40 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
private:
class Arm64Exception {
@@ -234,7 +234,7 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
};
-class Arm64JNIMacroLabel FINAL
+class Arm64JNIMacroLabel final
: public JNIMacroLabelCommon<Arm64JNIMacroLabel,
vixl::aarch64::Label,
InstructionSet::kArm64> {
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 379a6396eb..096410de3e 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -283,7 +283,7 @@ class AssemblerBuffer {
// The purpose of this class is to ensure that we do not have to explicitly
// call the AdvancePC method (which is good for convenience and correctness).
-class DebugFrameOpCodeWriterForAssembler FINAL
+class DebugFrameOpCodeWriterForAssembler final
: public dwarf::DebugFrameOpCodeWriter<> {
public:
struct DelayedAdvancePC {
@@ -292,7 +292,7 @@ class DebugFrameOpCodeWriterForAssembler FINAL
};
// This method is called the by the opcode writers.
- virtual void ImplicitlyAdvancePC() FINAL;
+ void ImplicitlyAdvancePC() final;
explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
: dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 7c800b355f..9e23d11116 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -737,7 +737,7 @@ class AssemblerTest : public testing::Test {
protected:
AssemblerTest() {}
- void SetUp() OVERRIDE {
+ void SetUp() override {
allocator_.reset(new ArenaAllocator(&pool_));
assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
@@ -753,7 +753,7 @@ class AssemblerTest : public testing::Test {
SetUpHelpers();
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
allocator_.reset();
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index f5df926749..e6130cfc4c 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -259,19 +259,19 @@ inline JNIMacroLabel::~JNIMacroLabel() {
template <typename T, PointerSize kPointerSize>
class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
public:
- void FinalizeCode() OVERRIDE {
+ void FinalizeCode() override {
asm_.FinalizeCode();
}
- size_t CodeSize() const OVERRIDE {
+ size_t CodeSize() const override {
return asm_.CodeSize();
}
- void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
+ void FinalizeInstructions(const MemoryRegion& region) override {
asm_.FinalizeInstructions(region);
}
- DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE {
+ DebugFrameOpCodeWriterForAssembler& cfi() override {
return asm_.cfi();
}
@@ -299,7 +299,7 @@ class JNIMacroLabelCommon : public JNIMacroLabel {
JNIMacroLabelCommon() : JNIMacroLabel(kIsa) {
}
- virtual ~JNIMacroLabelCommon() OVERRIDE {}
+ ~JNIMacroLabelCommon() override {}
private:
PlatformLabel label_;
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index b70c18b3e2..067a5953b8 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -58,7 +58,7 @@ class JNIMacroAssemblerTest : public testing::Test {
protected:
JNIMacroAssemblerTest() {}
- void SetUp() OVERRIDE {
+ void SetUp() override {
allocator_.reset(new ArenaAllocator(&pool_));
assembler_.reset(CreateAssembler(allocator_.get()));
test_helper_.reset(
@@ -74,7 +74,7 @@ class JNIMacroAssemblerTest : public testing::Test {
SetUpHelpers();
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
test_helper_.reset(); // Clean up the helper.
assembler_.reset();
allocator_.reset();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index af3d7a06ba..8a1e1df777 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -263,7 +263,7 @@ class MipsExceptionSlowPath {
DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
};
-class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
+class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
public:
using JNIBase = JNIMacroAssembler<PointerSize::k32>;
@@ -285,8 +285,8 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
cfi().DelayEmittingAdvancePCs();
}
- size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
- size_t CodePosition() OVERRIDE;
+ size_t CodeSize() const override { return Assembler::CodeSize(); }
+ size_t CodePosition() override;
DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
virtual ~MipsAssembler() {
@@ -1143,10 +1143,10 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
}
}
- void Bind(Label* label) OVERRIDE {
+ void Bind(Label* label) override {
Bind(down_cast<MipsLabel*>(label));
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
}
@@ -1155,25 +1155,25 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
using JNIBase::Jump;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
// Emit a conditional jump to the label by applying a unary condition test to the register.
void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
- ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ ManagedRegister test ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS32";
UNREACHABLE();
}
@@ -1232,108 +1232,108 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
FrameOffset in_off,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
// Load routines.
- void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister mdest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) override;
// Copying routines.
- void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+ void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset32 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
void CopyRawPtrToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -1342,34 +1342,34 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister mscratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
// Emit slow paths queued during assembly and promote short branches to long if needed.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Emit branches and finalize all instructions.
void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index 0f858926df..f9919f52b5 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -61,15 +61,15 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
return " --no-warn -32 -march=mips32r5 -mmsa";
}
- void Pad(std::vector<uint8_t>& data) OVERRIDE {
+ void Pad(std::vector<uint8_t>& data) override {
// The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
// of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
// pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -78,15 +78,15 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
data.insert(data.end(), pad_size, 0);
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa32r5";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips::Register(mips::ZERO));
registers_.push_back(new mips::Register(mips::AT));
@@ -222,7 +222,7 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -234,23 +234,23 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
UNREACHABLE();
}
- std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ std::vector<mips::Register*> GetRegisters() override {
return registers_;
}
- std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips::FRegister*> GetFPRegisters() override {
return fp_registers_;
}
- std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ std::vector<mips::VectorRegister*> GetVectorRegisters() override {
return vec_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips::Register& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 3d876ca613..1ec7a6a3e0 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -61,16 +61,16 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips";
}
- std::string GetAssemblerCmdName() OVERRIDE {
+ std::string GetAssemblerCmdName() override {
// We assemble and link for MIPS32R6. See GetAssemblerParameters() for details.
return "gcc";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
// We assemble and link for MIPS32R6. The reason is that object files produced for MIPS32R6
// (and MIPS64R6) with the GNU assembler don't have correct final offsets in PC-relative
// branches in the .text section and so they require a relocation pass (there's a relocation
@@ -82,7 +82,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
" -Wl,-Ttext=0x1000000 -Wl,-e0x1000000 -nostdlib";
}
- void Pad(std::vector<uint8_t>& data) OVERRIDE {
+ void Pad(std::vector<uint8_t>& data) override {
// The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
// of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
// pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -91,15 +91,15 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
data.insert(data.end(), pad_size, 0);
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa32r6";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips::Register(mips::ZERO));
registers_.push_back(new mips::Register(mips::AT));
@@ -235,7 +235,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -247,23 +247,23 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
UNREACHABLE();
}
- std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ std::vector<mips::Register*> GetRegisters() override {
return registers_;
}
- std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips::FRegister*> GetFPRegisters() override {
return fp_registers_;
}
- std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ std::vector<mips::VectorRegister*> GetVectorRegisters() override {
return vec_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips::Register& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index f94d074299..9527fa6ddd 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -55,19 +55,19 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
return " --no-warn -32 -march=mips32r2";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa32r2";
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips::Register(mips::ZERO));
registers_.push_back(new mips::Register(mips::AT));
@@ -170,7 +170,7 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -181,19 +181,19 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
UNREACHABLE();
}
- std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ std::vector<mips::Register*> GetRegisters() override {
return registers_;
}
- std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips::FRegister*> GetFPRegisters() override {
return fp_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips::Register& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 19f23b7e95..ce447db4fb 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -414,7 +414,7 @@ class Mips64ExceptionSlowPath {
DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
};
-class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class Mips64Assembler final : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
public:
using JNIBase = JNIMacroAssembler<PointerSize::k64>;
@@ -439,7 +439,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
}
}
- size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+ size_t CodeSize() const override { return Assembler::CodeSize(); }
DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
// Emit Machine Instructions.
@@ -920,10 +920,10 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
}
}
- void Bind(Label* label) OVERRIDE {
+ void Bind(Label* label) override {
Bind(down_cast<Mips64Label*>(label));
}
- void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(Label* label ATTRIBUTE_UNUSED) override {
UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS64";
}
@@ -934,25 +934,25 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
using JNIBase::Jump;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
// Emit a conditional jump to the label by applying a unary condition test to the register.
void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
- ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ ManagedRegister test ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Not implemented on MIPS64";
UNREACHABLE();
}
@@ -1322,119 +1322,119 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
// Load routines.
- void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) override;
// Copying routines.
- void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+ void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
void CopyRawPtrToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ ManagedRegister mscratch) override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister mscratch, size_t size) OVERRIDE;
+ ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister mscratch, size_t size) OVERRIDE;
+ ManagedRegister mscratch, size_t size) override;
void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) OVERRIDE;
+ ManagedRegister mscratch, size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+ ManagedRegister in_reg, bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
- mscratch, bool null_allowed) OVERRIDE;
+ mscratch, bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
// Emit slow paths queued during assembly and promote short branches to long if needed.
- void FinalizeCode() OVERRIDE;
+ void FinalizeCode() override;
// Emit branches and finalize all instructions.
void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index a53ff7cc2b..4ceb356910 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -63,16 +63,16 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "mips64";
}
- std::string GetAssemblerCmdName() OVERRIDE {
+ std::string GetAssemblerCmdName() override {
// We assemble and link for MIPS64R6. See GetAssemblerParameters() for details.
return "gcc";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
// We assemble and link for MIPS64R6. The reason is that object files produced for MIPS64R6
// (and MIPS32R6) with the GNU assembler don't have correct final offsets in PC-relative
// branches in the .text section and so they require a relocation pass (there's a relocation
@@ -80,7 +80,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
return " -march=mips64r6 -mmsa -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib";
}
- void Pad(std::vector<uint8_t>& data) OVERRIDE {
+ void Pad(std::vector<uint8_t>& data) override {
// The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
// of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
// pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -89,15 +89,15 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
data.insert(data.end(), pad_size, 0);
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mmips:isa64r6";
}
- mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) override {
return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (registers_.size() == 0) {
registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
registers_.push_back(new mips64::GpuRegister(mips64::AT));
@@ -233,7 +233,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -245,23 +245,23 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
UNREACHABLE();
}
- std::vector<mips64::GpuRegister*> GetRegisters() OVERRIDE {
+ std::vector<mips64::GpuRegister*> GetRegisters() override {
return registers_;
}
- std::vector<mips64::FpuRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<mips64::FpuRegister*> GetFPRegisters() override {
return fp_registers_;
}
- std::vector<mips64::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ std::vector<mips64::VectorRegister*> GetVectorRegisters() override {
return vec_registers_;
}
- uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ uint32_t CreateImmediate(int64_t imm_value) override {
return imm_value;
}
- std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 1f9ad4242d..dee83d1c71 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -141,6 +141,7 @@ void* SwapSpace::Alloc(size_t size) {
it->size -= size;
} else {
// Changing in place would break the std::set<> ordering, we need to remove and insert.
+ // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
free_by_size_.erase(it);
free_by_size_.insert(new_value);
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index e42c4c986a..5ac9236d6b 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -306,7 +306,7 @@ class ConstantArea {
ArenaVector<int32_t> buffer_;
};
-class X86Assembler FINAL : public Assembler {
+class X86Assembler final : public Assembler {
public:
explicit X86Assembler(ArenaAllocator* allocator)
: Assembler(allocator), constant_area_(allocator) {}
@@ -758,8 +758,8 @@ class X86Assembler FINAL : public Assembler {
//
int PreferredLoopAlignment() { return 16; }
void Align(int alignment, int offset);
- void Bind(Label* label) OVERRIDE;
- void Jump(Label* label) OVERRIDE {
+ void Bind(Label* label) override;
+ void Jump(Label* label) override {
jmp(label);
}
void Bind(NearLabel* label);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index cd007b32d4..b03c40aa3e 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -51,19 +51,19 @@ class AssemblerX86Test : public AssemblerTest<x86::X86Assembler,
x86::Immediate> Base;
protected:
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "x86";
}
- std::string GetAssemblerParameters() OVERRIDE {
+ std::string GetAssemblerParameters() override {
return " --32";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mi386 --no-show-raw-insn";
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (addresses_singleton_.size() == 0) {
// One addressing mode to test the repeat drivers.
addresses_singleton_.push_back(x86::Address(x86::EAX, x86::EBX, x86::TIMES_1, 2));
@@ -118,25 +118,25 @@ class AssemblerX86Test : public AssemblerTest<x86::X86Assembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
}
- std::vector<x86::Address> GetAddresses() OVERRIDE {
+ std::vector<x86::Address> GetAddresses() override {
return addresses_;
}
- std::vector<x86::Register*> GetRegisters() OVERRIDE {
+ std::vector<x86::Register*> GetRegisters() override {
return registers_;
}
- std::vector<x86::XmmRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<x86::XmmRegister*> GetFPRegisters() override {
return fp_registers_;
}
- x86::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ x86::Immediate CreateImmediate(int64_t imm_value) override {
return x86::Immediate(imm_value);
}
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index dd99f03aa7..540d72b28d 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -25,10 +25,10 @@ namespace art {
namespace x86 {
// Slowpath entered when Thread::Current()->_exception is non-null
-class X86ExceptionSlowPath FINAL : public SlowPath {
+class X86ExceptionSlowPath final : public SlowPath {
public:
explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ void Emit(Assembler *sp_asm) override;
private:
const size_t stack_adjust_;
};
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 99219d8f88..a701080b4f 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -32,7 +32,7 @@ namespace x86 {
class X86JNIMacroLabel;
-class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
+class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
public:
explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {}
virtual ~X86JNIMacroAssembler() {}
@@ -45,130 +45,130 @@ class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, Poi
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
// Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
+ override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
+ ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
+ ManagedRegister scratch, size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+ ManagedRegister in_reg, bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
+ ManagedRegister scratch, bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
private:
DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
};
-class X86JNIMacroLabel FINAL
+class X86JNIMacroLabel final
: public JNIMacroLabelCommon<X86JNIMacroLabel,
art::Label,
InstructionSet::kX86> {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e4d72a7ba2..e696635e62 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -351,7 +351,7 @@ class NearLabel : private Label {
};
-class X86_64Assembler FINAL : public Assembler {
+class X86_64Assembler final : public Assembler {
public:
explicit X86_64Assembler(ArenaAllocator* allocator)
: Assembler(allocator), constant_area_(allocator) {}
@@ -844,8 +844,8 @@ class X86_64Assembler FINAL : public Assembler {
//
int PreferredLoopAlignment() { return 16; }
void Align(int alignment, int offset);
- void Bind(Label* label) OVERRIDE;
- void Jump(Label* label) OVERRIDE {
+ void Bind(Label* label) override;
+ void Jump(Label* label) override {
jmp(label);
}
void Bind(NearLabel* label);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 0589df55d2..e1de1f172f 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -145,15 +145,15 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "x86_64";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
}
- void SetUpHelpers() OVERRIDE {
+ void SetUpHelpers() override {
if (addresses_singleton_.size() == 0) {
// One addressing mode to test the repeat drivers.
addresses_singleton_.push_back(
@@ -291,7 +291,7 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
}
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
@@ -301,29 +301,29 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
return addresses_;
}
- std::vector<x86_64::CpuRegister*> GetRegisters() OVERRIDE {
+ std::vector<x86_64::CpuRegister*> GetRegisters() override {
return registers_;
}
- std::vector<x86_64::XmmRegister*> GetFPRegisters() OVERRIDE {
+ std::vector<x86_64::XmmRegister*> GetFPRegisters() override {
return fp_registers_;
}
- x86_64::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ x86_64::Immediate CreateImmediate(int64_t imm_value) override {
return x86_64::Immediate(imm_value);
}
- std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
}
- std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) override {
CHECK(tertiary_register_names_.find(reg) != tertiary_register_names_.end());
return tertiary_register_names_[reg];
}
- std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) override {
CHECK(quaternary_register_names_.find(reg) != quaternary_register_names_.end());
return quaternary_register_names_[reg];
}
@@ -2002,11 +2002,11 @@ class JNIMacroAssemblerX86_64Test : public JNIMacroAssemblerTest<x86_64::X86_64J
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
- std::string GetArchitectureString() OVERRIDE {
+ std::string GetArchitectureString() override {
return "x86_64";
}
- std::string GetDisassembleParameters() OVERRIDE {
+ std::string GetDisassembleParameters() override {
return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
}
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index f6b2f9df34..5924a8bd08 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -574,10 +574,10 @@ void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegist
}
// Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
+class X86_64ExceptionSlowPath final : public SlowPath {
public:
explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ void Emit(Assembler *sp_asm) override;
private:
const size_t stack_adjust_;
};
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index d766ad4716..465ebbe6c3 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -31,7 +31,7 @@
namespace art {
namespace x86_64 {
-class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assembler,
PointerSize::k64> {
public:
explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator)
@@ -46,107 +46,107 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble
void BuildFrame(size_t frame_size,
ManagedRegister method_reg,
ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+ const ManagedRegisterEntrySpills& entry_spills) override;
// Emit code that will remove an activation from the stack
void RemoveFrame(size_t frame_size,
ArrayRef<const ManagedRegister> callee_save_regs,
- bool may_suspend) OVERRIDE;
+ bool may_suspend) override;
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
+ void IncreaseFrameSize(size_t adjust) override;
+ void DecreaseFrameSize(size_t adjust) override;
// Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+ void StoreRef(FrameOffset dest, ManagedRegister src) override;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
void StoreSpanning(FrameOffset dest,
ManagedRegister src,
FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
// Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
- void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) override;
void LoadRef(ManagedRegister dest,
ManagedRegister base,
MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
+ bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size);
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
+ ManagedRegister scratch) override;
void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
+ override;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
void Copy(FrameOffset dest,
ManagedRegister src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest_base,
Offset dest_offset,
FrameOffset src,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
FrameOffset src_base,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(ManagedRegister dest,
Offset dest_offset,
ManagedRegister src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
void Copy(FrameOffset dest,
Offset dest_offset,
FrameOffset src,
Offset src_offset,
ManagedRegister scratch,
- size_t size) OVERRIDE;
+ size_t size) override;
- void MemoryBarrier(ManagedRegister) OVERRIDE;
+ void MemoryBarrier(ManagedRegister) override;
// Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void SignExtend(ManagedRegister mreg, size_t size) override;
// Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+ void ZeroExtend(ManagedRegister mreg, size_t size) override;
// Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+ void GetCurrentThread(ManagedRegister tr) override;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
@@ -155,46 +155,46 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble
void CreateHandleScopeEntry(ManagedRegister out_reg,
FrameOffset handlescope_offset,
ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off,
FrameOffset handlescope_offset,
ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ bool null_allowed) override;
// src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+ void VerifyObject(ManagedRegister src, bool could_be_null) override;
+ void VerifyObject(FrameOffset src, bool could_be_null) override;
// Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
- std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ std::unique_ptr<JNIMacroLabel> CreateLabel() override;
// Emit an unconditional jump to the label.
- void Jump(JNIMacroLabel* label) OVERRIDE;
+ void Jump(JNIMacroLabel* label) override;
// Emit a conditional jump to the label by applying a unary condition test to the register.
- void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
// Code at this offset will serve as the target for the Jump call.
- void Bind(JNIMacroLabel* label) OVERRIDE;
+ void Bind(JNIMacroLabel* label) override;
private:
DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
};
-class X86_64JNIMacroLabel FINAL
+class X86_64JNIMacroLabel final
: public JNIMacroLabelCommon<X86_64JNIMacroLabel,
art::Label,
InstructionSet::kX86_64> {
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index c223549710..136066d074 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -47,11 +47,11 @@ class VerifierDepsCompilerCallbacks : public CompilerCallbacks {
: CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp),
deps_(nullptr) {}
- void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {}
- void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
- bool IsRelocationPossible() OVERRIDE { return false; }
+ void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
+ bool IsRelocationPossible() override { return false; }
- verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return deps_; }
+ verifier::VerifierDeps* GetVerifierDeps() const override { return deps_; }
void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; }
private:
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0b68620e6e..5655b3c91d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -613,7 +613,7 @@ class WatchDog {
bool shutting_down_;
};
-class Dex2Oat FINAL {
+class Dex2Oat final {
public:
explicit Dex2Oat(TimingLogger* timings) :
compiler_kind_(Compiler::kOptimizing),
@@ -669,9 +669,7 @@ class Dex2Oat FINAL {
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
dex_file.release();
}
- for (std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
- map.release();
- }
+ new std::vector<MemMap>(std::move(opened_dex_files_maps_)); // Leak MemMaps.
for (std::unique_ptr<File>& vdex_file : vdex_files_) {
vdex_file.release();
}
@@ -1449,14 +1447,14 @@ class Dex2Oat FINAL {
LOG(INFO) << "No " << VdexFile::kVdexNameInDmFile << " file in DexMetadata archive. "
<< "Not doing fast verification.";
} else {
- std::unique_ptr<MemMap> input_file(zip_entry->MapDirectlyOrExtract(
+ MemMap input_file = zip_entry->MapDirectlyOrExtract(
VdexFile::kVdexNameInDmFile,
kDexMetadata,
- &error_msg));
- if (input_file == nullptr) {
+ &error_msg);
+ if (!input_file.IsValid()) {
LOG(WARNING) << "Could not open vdex file in DexMetadata archive: " << error_msg;
} else {
- input_vdex_file_ = std::make_unique<VdexFile>(input_file.release());
+ input_vdex_file_ = std::make_unique<VdexFile>(std::move(input_file));
}
}
}
@@ -1631,7 +1629,7 @@ class Dex2Oat FINAL {
for (size_t i = 0, size = oat_writers_.size(); i != size; ++i) {
rodata_.push_back(elf_writers_[i]->StartRoData());
// Unzip or copy dex files straight to the oat file.
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
+ std::vector<MemMap> opened_dex_files_map;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// No need to verify the dex file when we have a vdex file, which means it was already
// verified.
@@ -1651,7 +1649,7 @@ class Dex2Oat FINAL {
if (opened_dex_files_map.empty()) {
DCHECK(opened_dex_files.empty());
} else {
- for (std::unique_ptr<MemMap>& map : opened_dex_files_map) {
+ for (MemMap& map : opened_dex_files_map) {
opened_dex_files_maps_.push_back(std::move(map));
}
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
@@ -1732,8 +1730,8 @@ class Dex2Oat FINAL {
}
// Ensure opened dex files are writable for dex-to-dex transformations.
- for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
- if (!map->Protect(PROT_READ | PROT_WRITE)) {
+ for (MemMap& map : opened_dex_files_maps_) {
+ if (!map.Protect(PROT_READ | PROT_WRITE)) {
PLOG(ERROR) << "Failed to make .dex files writeable.";
return dex2oat::ReturnCode::kOther;
}
@@ -2002,9 +2000,9 @@ class Dex2Oat FINAL {
TimingLogger::ScopedTiming t("dex2oat Oat", timings_);
// Sync the data to the file, in case we did dex2dex transformations.
- for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
- if (!map->Sync()) {
- PLOG(ERROR) << "Failed to Sync() dex2dex output. Map: " << map->GetName();
+ for (MemMap& map : opened_dex_files_maps_) {
+ if (!map.Sync()) {
+ PLOG(ERROR) << "Failed to Sync() dex2dex output. Map: " << map.GetName();
return false;
}
}
@@ -2737,16 +2735,13 @@ class Dex2Oat FINAL {
zip_filename, error_msg->c_str());
return nullptr;
}
- std::unique_ptr<MemMap> input_file(zip_entry->ExtractToMemMap(zip_filename,
- input_filename,
- error_msg));
- if (input_file.get() == nullptr) {
+ MemMap input_file = zip_entry->ExtractToMemMap(zip_filename, input_filename, error_msg);
+ if (!input_file.IsValid()) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", input_filename,
zip_filename, error_msg->c_str());
return nullptr;
}
- const std::string input_string(reinterpret_cast<char*>(input_file->Begin()),
- input_file->Size());
+ const std::string input_string(reinterpret_cast<char*>(input_file.Begin()), input_file.Size());
std::istringstream input_stream(input_string);
return ReadCommentedInputStream<T>(input_stream, process);
}
@@ -2873,7 +2868,7 @@ class Dex2Oat FINAL {
std::unique_ptr<linker::ImageWriter> image_writer_;
std::unique_ptr<CompilerDriver> driver_;
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
+ std::vector<MemMap> opened_dex_files_maps_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
bool avoid_storing_invocation_;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 4247e176aa..e047b4f925 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -52,7 +52,7 @@ std::ostream& operator<<(std::ostream& os, const ImageSizes& sizes) {
class Dex2oatImageTest : public CommonRuntimeTest {
public:
- virtual void TearDown() OVERRIDE {}
+ void TearDown() override {}
protected:
// Visitors take method and type references
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 2b96684fdd..fb19a277e9 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -55,7 +55,7 @@ using android::base::StringPrintf;
class Dex2oatTest : public Dex2oatEnvironmentTest {
public:
- virtual void TearDown() OVERRIDE {
+ void TearDown() override {
Dex2oatEnvironmentTest::TearDown();
output_ = "";
@@ -139,11 +139,11 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
@@ -159,11 +159,11 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() == nullptr);
}
@@ -349,7 +349,7 @@ TEST_F(Dex2oatSwapTest, DoUseSwapSingleSmall) {
class Dex2oatSwapUseTest : public Dex2oatSwapTest {
protected:
- void CheckHostResult(bool expect_use) OVERRIDE {
+ void CheckHostResult(bool expect_use) override {
if (!kIsTargetBuild) {
if (expect_use) {
EXPECT_NE(output_.find("Large app, accepted running with swap."), std::string::npos)
@@ -361,7 +361,7 @@ class Dex2oatSwapUseTest : public Dex2oatSwapTest {
}
}
- std::string GetTestDexFileName() OVERRIDE {
+ std::string GetTestDexFileName() override {
// Use Statics as it has a handful of functions.
return CommonRuntimeTest::GetTestDexFileName("Statics");
}
@@ -474,7 +474,7 @@ TEST_F(Dex2oatSwapUseTest, CheckSwapUsage) {
class Dex2oatVeryLargeTest : public Dex2oatTest {
protected:
void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
- CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+ CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
// Ignore, we'll do our own checks.
}
@@ -516,11 +516,11 @@ class Dex2oatVeryLargeTest : public Dex2oatTest {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_GT(app_image_file.length(), 0u);
@@ -627,7 +627,7 @@ TEST_F(Dex2oatVeryLargeTest, SpeedProfileNoProfile) {
class Dex2oatLayoutTest : public Dex2oatTest {
protected:
void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
- CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+ CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
// Ignore, we'll do our own checks.
}
@@ -787,11 +787,11 @@ class Dex2oatLayoutTest : public Dex2oatTest {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
@@ -949,11 +949,11 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
ASSERT_GE(odex_file->GetOatDexFiles().size(), 1u);
@@ -1329,11 +1329,11 @@ TEST_F(Dex2oatTest, LayoutSections) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex->GetLocation().c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1439,11 +1439,11 @@ TEST_F(Dex2oatTest, GenerateCompactDex) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1684,11 +1684,11 @@ TEST_F(Dex2oatTest, CompactDexGenerationFailure) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
temp_dex.GetFilename().c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1762,11 +1762,11 @@ TEST_F(Dex2oatTest, VerifyCompilationReason) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_STREQ("install", odex_file->GetCompilationReason());
@@ -1788,11 +1788,11 @@ TEST_F(Dex2oatTest, VerifyNoCompilationReason) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_EQ(nullptr, odex_file->GetCompilationReason());
@@ -1826,11 +1826,11 @@ TEST_F(Dex2oatTest, DontExtract) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/ false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr) << dex_location;
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -2065,11 +2065,11 @@ TEST_F(Dex2oatTest, AppImageNoProfile) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
odex_location.c_str(),
odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
odex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ImageHeader header = {};
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.h b/dex2oat/linker/arm/relative_patcher_arm_base.h
index f5a1395bdd..0eb4417771 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.h
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.h
@@ -31,10 +31,10 @@ class ArmBaseRelativePatcher : public RelativePatcher {
public:
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
protected:
ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2.h b/dex2oat/linker/arm/relative_patcher_thumb2.h
index 3a42928466..dbf64a13da 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2.h
+++ b/dex2oat/linker/arm/relative_patcher_thumb2.h
@@ -29,7 +29,7 @@ class ArmVIXLAssembler;
namespace linker {
-class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Thumb2RelativePatcher final : public ArmBaseRelativePatcher {
public:
explicit Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
RelativePatcherTargetProvider* target_provider);
@@ -37,18 +37,18 @@ class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
protected:
- uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
- uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+ uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+ uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
private:
void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.h b/dex2oat/linker/arm64/relative_patcher_arm64.h
index f7f673c1ba..e95d0fee6f 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.h
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.h
@@ -28,7 +28,7 @@ class Arm64Assembler;
namespace linker {
-class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Arm64RelativePatcher final : public ArmBaseRelativePatcher {
public:
Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
RelativePatcherTargetProvider* target_provider,
@@ -36,24 +36,24 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
protected:
- uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
- uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+ uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+ uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
private:
static uint32_t PatchAdrp(uint32_t adrp, uint32_t disp);
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 4e7d636dbf..852293bdb8 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -94,35 +94,35 @@ class DebugInfoTask : public Task {
};
template <typename ElfTypes>
-class ElfWriterQuick FINAL : public ElfWriter {
+class ElfWriterQuick final : public ElfWriter {
public:
ElfWriterQuick(const CompilerOptions& compiler_options,
File* elf_file);
~ElfWriterQuick();
- void Start() OVERRIDE;
+ void Start() override;
void PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t data_bimg_rel_ro_size,
size_t bss_size,
size_t bss_methods_offset,
size_t bss_roots_offset,
- size_t dex_section_size) OVERRIDE;
- void PrepareDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
- OutputStream* StartRoData() OVERRIDE;
- void EndRoData(OutputStream* rodata) OVERRIDE;
- OutputStream* StartText() OVERRIDE;
- void EndText(OutputStream* text) OVERRIDE;
- OutputStream* StartDataBimgRelRo() OVERRIDE;
- void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) OVERRIDE;
- void WriteDynamicSection() OVERRIDE;
- void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
- bool StripDebugInfo() OVERRIDE;
- bool End() OVERRIDE;
-
- virtual OutputStream* GetStream() OVERRIDE;
-
- size_t GetLoadedSize() OVERRIDE;
+ size_t dex_section_size) override;
+ void PrepareDebugInfo(const debug::DebugInfo& debug_info) override;
+ OutputStream* StartRoData() override;
+ void EndRoData(OutputStream* rodata) override;
+ OutputStream* StartText() override;
+ void EndText(OutputStream* text) override;
+ OutputStream* StartDataBimgRelRo() override;
+ void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) override;
+ void WriteDynamicSection() override;
+ void WriteDebugInfo(const debug::DebugInfo& debug_info) override;
+ bool StripDebugInfo() override;
+ bool End() override;
+
+ OutputStream* GetStream() override;
+
+ size_t GetLoadedSize() override;
static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
std::vector<uint8_t>* buffer);
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index b2be003b5d..40495f33ee 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -14,9 +14,12 @@
* limitations under the License.
*/
+#include <sys/mman.h> // For the PROT_NONE constant.
+
#include "elf_file.h"
#include "base/file_utils.h"
+#include "base/mem_map.h"
#include "base/unix_file/fd_file.h"
#include "base/utils.h"
#include "common_compiler_test.h"
@@ -65,8 +68,8 @@ TEST_F(ElfWriterTest, dlsym) {
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- false,
- false,
+ /* writable */ false,
+ /* program_header_only */ false,
/*low_4gb*/false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
@@ -77,9 +80,9 @@ TEST_F(ElfWriterTest, dlsym) {
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- false,
- false,
- /*low_4gb*/false,
+ /* writable */ false,
+ /* program_header_only */ false,
+ /* low_4gb */ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
@@ -87,16 +90,28 @@ TEST_F(ElfWriterTest, dlsym) {
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", true);
}
{
- uint8_t* base = reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS);
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- false,
- true,
- /*low_4gb*/false,
- &error_msg,
- base));
+ /* writable */ false,
+ /* program_header_only */ true,
+ /* low_4gb */ false,
+ &error_msg));
CHECK(ef.get() != nullptr) << error_msg;
- CHECK(ef->Load(file.get(), false, /*low_4gb*/false, &error_msg)) << error_msg;
+ size_t size;
+ bool success = ef->GetLoadedSize(&size, &error_msg);
+ CHECK(success) << error_msg;
+ MemMap reservation = MemMap::MapAnonymous("ElfWriterTest#dlsym reservation",
+ /* addr */ nullptr,
+ RoundUp(size, kPageSize),
+ PROT_NONE,
+ /* low_4gb */ true,
+ &error_msg);
+ CHECK(reservation.IsValid()) << error_msg;
+ uint8_t* base = reservation.Begin();
+ success =
+ ef->Load(file.get(), /* executable */ false, /* low_4gb */ false, &reservation, &error_msg);
+ CHECK(success) << error_msg;
+ CHECK(!reservation.IsValid());
EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatdata) + reinterpret_cast<uintptr_t>(base),
reinterpret_cast<uintptr_t>(ef->FindDynamicSymbolAddress("oatdata")));
EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatexec) + reinterpret_cast<uintptr_t>(base),
@@ -116,9 +131,9 @@ TEST_F(ElfWriterTest, CheckBuildIdPresent) {
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
- false,
- false,
- /*low_4gb*/false,
+ /* writable */ false,
+ /* program_header_only */ false,
+ /* low_4gb */ false,
&error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_TRUE(ef->HasSection(".note.gnu.build-id"));
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index fa8c7784f5..d575420f9b 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -83,7 +83,7 @@ class ImageTest : public CommonCompilerTest {
const std::string& extra_dex = "",
const std::initializer_list<std::string>& image_classes = {});
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonCompilerTest::SetUpRuntimeOptions(options);
QuickCompilerCallbacks* new_callbacks =
new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileBootImage);
@@ -92,7 +92,7 @@ class ImageTest : public CommonCompilerTest {
options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
}
- std::unique_ptr<HashSet<std::string>> GetImageClasses() OVERRIDE {
+ std::unique_ptr<HashSet<std::string>> GetImageClasses() override {
return std::make_unique<HashSet<std::string>>(image_classes_);
}
@@ -252,7 +252,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
}
std::vector<OutputStream*> rodata;
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
+ std::vector<MemMap> opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// Now that we have finalized key_value_store_, start writing the oat file.
for (size_t i = 0, size = oat_writers.size(); i != size; ++i) {
@@ -265,7 +265,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
dex_file->GetLocation().c_str(),
dex_file->GetLocationChecksum());
- std::vector<std::unique_ptr<MemMap>> cur_opened_dex_files_maps;
+ std::vector<MemMap> cur_opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
out_helper.vdex_files[i].GetFile(),
@@ -279,7 +279,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
ASSERT_TRUE(dex_files_ok);
if (!cur_opened_dex_files_maps.empty()) {
- for (std::unique_ptr<MemMap>& cur_map : cur_opened_dex_files_maps) {
+ for (MemMap& cur_map : cur_opened_dex_files_maps) {
opened_dex_files_maps.push_back(std::move(cur_map));
}
for (std::unique_ptr<const DexFile>& cur_dex_file : cur_opened_dex_files) {
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index e10f9b3feb..6a134548a9 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -303,8 +303,8 @@ bool ImageWriter::Write(int image_fd,
}
// Image data size excludes the bitmap and the header.
- ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
- ArrayRef<const uint8_t> raw_image_data(image_info.image_->Begin() + sizeof(ImageHeader),
+ ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_.Begin());
+ ArrayRef<const uint8_t> raw_image_data(image_info.image_.Begin() + sizeof(ImageHeader),
image_header->GetImageSize() - sizeof(ImageHeader));
CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
@@ -362,7 +362,7 @@ bool ImageWriter::Write(int image_fd,
// We do not want to have a corrupted image with a valid header.
// The header is uncompressed since it contains whether the image is compressed or not.
image_header->data_size_ = image_data.size();
- if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_->Begin()),
+ if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_.Begin()),
sizeof(ImageHeader),
0)) {
PLOG(ERROR) << "Failed to write image file header " << image_filename;
@@ -730,14 +730,13 @@ bool ImageWriter::AllocMemory() {
image_info.CreateImageSections(unused_sections), kPageSize);
std::string error_msg;
- image_info.image_.reset(MemMap::MapAnonymous("image writer image",
- nullptr,
- length,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- if (UNLIKELY(image_info.image_.get() == nullptr)) {
+ image_info.image_ = MemMap::MapAnonymous("image writer image",
+ /* addr */ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ if (UNLIKELY(!image_info.image_.IsValid())) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
}
@@ -745,7 +744,7 @@ bool ImageWriter::AllocMemory() {
// Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
CHECK_LE(image_info.image_end_, length);
image_info.image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
- "image bitmap", image_info.image_->Begin(), RoundUp(image_info.image_end_, kPageSize)));
+ "image bitmap", image_info.image_.Begin(), RoundUp(image_info.image_end_, kPageSize)));
if (image_info.image_bitmap_.get() == nullptr) {
LOG(ERROR) << "Failed to allocate memory for image bitmap";
return false;
@@ -756,7 +755,7 @@ bool ImageWriter::AllocMemory() {
class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
public:
- bool operator()(ObjPtr<Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(Thread::Current());
mirror::Class::ComputeName(hs.NewHandle(c));
return true;
@@ -988,7 +987,7 @@ class ImageWriter::PruneClassesVisitor : public ClassVisitor {
classes_to_prune_(),
defined_class_count_(0u) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!image_writer_->KeepClass(klass.Ptr())) {
classes_to_prune_.insert(klass.Ptr());
if (klass->GetClassLoader() == class_loader_) {
@@ -1023,7 +1022,7 @@ class ImageWriter::PruneClassLoaderClassesVisitor : public ClassLoaderVisitor {
explicit PruneClassLoaderClassesVisitor(ImageWriter* image_writer)
: image_writer_(image_writer), removed_class_count_(0) {}
- virtual void Visit(ObjPtr<mirror::ClassLoader> class_loader) OVERRIDE
+ void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
REQUIRES_SHARED(Locks::mutator_lock_) {
PruneClassesVisitor classes_visitor(image_writer_, class_loader);
ClassTable* class_table =
@@ -1678,7 +1677,7 @@ class ImageWriter::GetRootsVisitor : public RootVisitor {
void VisitRoots(mirror::Object*** roots,
size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+ const RootInfo& info ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots_->push_back(*roots[i]);
@@ -1687,7 +1686,7 @@ class ImageWriter::GetRootsVisitor : public RootVisitor {
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+ const RootInfo& info ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots_->push_back(roots[i]->AsMirrorPtr());
@@ -2025,7 +2024,7 @@ void ImageWriter::CreateHeader(size_t oat_index) {
// Create the header, leave 0 for data size since we will fill this in as we are writing the
// image.
- ImageHeader* header = new (image_info.image_->Begin()) ImageHeader(
+ ImageHeader* header = new (image_info.image_.Begin()) ImageHeader(
PointerToLowMemUInt32(image_info.image_begin_),
image_end,
sections,
@@ -2105,14 +2104,14 @@ class ImageWriter::FixupRootVisitor : public RootVisitor {
void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED,
size_t count ATTRIBUTE_UNUSED,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(FATAL) << "Unsupported";
}
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
// Copy the reference. Since we do not have the address for recording the relocation,
// it needs to be recorded explicitly by the user of FixupRootVisitor.
@@ -2163,8 +2162,8 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
if (relocation.oat_index != oat_index) {
continue;
}
- auto* dest = image_info.image_->Begin() + relocation.offset;
- DCHECK_GE(dest, image_info.image_->Begin() + image_info.image_end_);
+ auto* dest = image_info.image_.Begin() + relocation.offset;
+ DCHECK_GE(dest, image_info.image_.Begin() + image_info.image_end_);
DCHECK(!IsInBootImage(pair.first));
switch (relocation.type) {
case NativeObjectRelocationType::kArtField: {
@@ -2219,7 +2218,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
}
}
// Fixup the image method roots.
- auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
+ auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_.Begin());
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
@@ -2235,7 +2234,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
const ImageSection& intern_table_section = image_header->GetInternedStringsSection();
InternTable* const intern_table = image_info.intern_table_.get();
uint8_t* const intern_table_memory_ptr =
- image_info.image_->Begin() + intern_table_section.Offset();
+ image_info.image_.Begin() + intern_table_section.Offset();
const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
// Fixup the pointers in the newly written intern table to contain image addresses.
@@ -2260,7 +2259,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
if (image_info.class_table_bytes_ > 0u) {
const ImageSection& class_table_section = image_header->GetClassTableSection();
uint8_t* const class_table_memory_ptr =
- image_info.image_->Begin() + class_table_section.Offset();
+ image_info.image_.Begin() + class_table_section.Offset();
Thread* self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -2342,14 +2341,14 @@ void ImageWriter::CopyAndFixupObject(Object* obj) {
size_t offset = GetImageOffset(obj);
size_t oat_index = GetOatIndex(obj);
ImageInfo& image_info = GetImageInfo(oat_index);
- auto* dst = reinterpret_cast<Object*>(image_info.image_->Begin() + offset);
+ auto* dst = reinterpret_cast<Object*>(image_info.image_.Begin() + offset);
DCHECK_LT(offset, image_info.image_end_);
const auto* src = reinterpret_cast<const uint8_t*>(obj);
image_info.image_bitmap_->Set(dst); // Mark the obj as live.
const size_t n = obj->SizeOf();
- DCHECK_LE(offset + n, image_info.image_->Size());
+ DCHECK_LE(offset + n, image_info.image_.Size());
memcpy(dst, src, n);
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
@@ -2402,7 +2401,7 @@ class ImageWriter::FixupVisitor {
size_t oat_index_;
};
-class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor {
+class ImageWriter::FixupClassVisitor final : public FixupVisitor {
public:
FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
: FixupVisitor(image_writer, copy, oat_index) {}
@@ -2456,7 +2455,7 @@ template <typename T>
T* ImageWriter::NativeCopyLocation(T* obj) {
const NativeObjectRelocation relocation = GetNativeRelocation(obj);
const ImageInfo& image_info = GetImageInfo(relocation.oat_index);
- return reinterpret_cast<T*>(image_info.image_->Begin() + relocation.offset);
+ return reinterpret_cast<T*>(image_info.image_.Begin() + relocation.offset);
}
class ImageWriter::NativeLocationVisitor {
@@ -3011,12 +3010,12 @@ void ImageWriter::RecordImageRelocation(const void* dest,
}
// Calculate the offset within the image.
ImageInfo* image_info = &image_infos_[oat_index];
- DCHECK(image_info->image_->HasAddress(dest))
- << "MemMap range " << static_cast<const void*>(image_info->image_->Begin())
- << "-" << static_cast<const void*>(image_info->image_->End())
+ DCHECK(image_info->image_.HasAddress(dest))
+ << "MemMap range " << static_cast<const void*>(image_info->image_.Begin())
+ << "-" << static_cast<const void*>(image_info->image_.End())
<< " does not contain " << dest;
- size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_->Begin();
- ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_->Begin());
+ size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_.Begin();
+ ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_.Begin());
size_t image_end = image_header->GetClassTableSection().End();
DCHECK_LT(offset, image_end);
// Calculate the location index.
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 9ab9c3eb6f..e45023e6dc 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -73,7 +73,7 @@ static constexpr int kInvalidFd = -1;
namespace linker {
// Write a Space built during compilation for use during execution.
-class ImageWriter FINAL {
+class ImageWriter final {
public:
ImageWriter(const CompilerOptions& compiler_options,
uintptr_t image_begin,
@@ -307,7 +307,7 @@ class ImageWriter FINAL {
// Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
size_t GetBinSizeSum(Bin up_to) const;
- std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
+ MemMap image_; // Memory mapped for generating the image.
// Target begin of this image. Notes: It is not valid to write here, this is the address
// of the target image, not necessarily where image_ is mapped. The address is only valid
@@ -408,7 +408,7 @@ class ImageWriter FINAL {
size_t offset = GetImageOffset(object);
size_t oat_index = GetOatIndex(object);
const ImageInfo& image_info = GetImageInfo(oat_index);
- uint8_t* dst = image_info.image_->Begin() + offset;
+ uint8_t* dst = image_info.image_.Begin() + offset;
return reinterpret_cast<mirror::Object*>(dst);
}
diff --git a/dex2oat/linker/mips/relative_patcher_mips.h b/dex2oat/linker/mips/relative_patcher_mips.h
index d3a4c5a14f..4c385a3fec 100644
--- a/dex2oat/linker/mips/relative_patcher_mips.h
+++ b/dex2oat/linker/mips/relative_patcher_mips.h
@@ -23,28 +23,28 @@
namespace art {
namespace linker {
-class MipsRelativePatcher FINAL : public RelativePatcher {
+class MipsRelativePatcher final : public RelativePatcher {
public:
explicit MipsRelativePatcher(const MipsInstructionSetFeatures* features)
: is_r6(features->IsR6()) {}
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ uint32_t patch_offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
private:
bool is_r6;
diff --git a/dex2oat/linker/mips64/relative_patcher_mips64.h b/dex2oat/linker/mips64/relative_patcher_mips64.h
index 9f5a125408..7b7c2ccd9f 100644
--- a/dex2oat/linker/mips64/relative_patcher_mips64.h
+++ b/dex2oat/linker/mips64/relative_patcher_mips64.h
@@ -22,27 +22,27 @@
namespace art {
namespace linker {
-class Mips64RelativePatcher FINAL : public RelativePatcher {
+class Mips64RelativePatcher final : public RelativePatcher {
public:
Mips64RelativePatcher() {}
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ uint32_t patch_offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
private:
DISALLOW_COPY_AND_ASSIGN(Mips64RelativePatcher);
diff --git a/dex2oat/linker/multi_oat_relative_patcher.h b/dex2oat/linker/multi_oat_relative_patcher.h
index 60fcfe8b58..9b47a0d5b0 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.h
+++ b/dex2oat/linker/multi_oat_relative_patcher.h
@@ -35,7 +35,7 @@ namespace linker {
// any number of oat files. It provides storage for method code offsets
// and wraps RelativePatcher calls, adjusting relative offsets according
// to the value set by SetAdjustment().
-class MultiOatRelativePatcher FINAL {
+class MultiOatRelativePatcher final {
public:
using const_iterator = SafeMap<MethodReference, uint32_t>::const_iterator;
@@ -139,7 +139,7 @@ class MultiOatRelativePatcher FINAL {
void GetThunkCode(const LinkerPatch& patch,
/*out*/ ArrayRef<const uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE;
+ /*out*/ std::string* debug_name) override;
private:
CompiledMethodStorage* storage_;
@@ -149,7 +149,7 @@ class MultiOatRelativePatcher FINAL {
// Wrap the map in a class implementing RelativePatcherTargetProvider.
class MethodOffsetMap : public RelativePatcherTargetProvider {
public:
- std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE;
+ std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override;
SafeMap<MethodReference, uint32_t> map;
};
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index 05fe36a590..a5831b64b3 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -35,7 +35,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
- MethodReference method_ref) OVERRIDE {
+ MethodReference method_ref) override {
last_reserve_offset_ = offset;
last_reserve_method_ = method_ref;
offset += next_reserve_adjustment_;
@@ -43,7 +43,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
return offset;
}
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+ uint32_t ReserveSpaceEnd(uint32_t offset) override {
last_reserve_offset_ = offset;
last_reserve_method_ = kNullMethodRef;
offset += next_reserve_adjustment_;
@@ -51,7 +51,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
return offset;
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override {
last_write_offset_ = offset;
if (next_write_alignment_ != 0u) {
offset += next_write_alignment_;
@@ -79,7 +79,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ uint32_t target_offset) override {
last_literal_offset_ = literal_offset;
last_patch_offset_ = patch_offset;
last_target_offset_ = target_offset;
@@ -88,7 +88,7 @@ class MultiOatRelativePatcherTest : public testing::Test {
void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ uint32_t target_offset) override {
last_literal_offset_ = patch.LiteralOffset();
last_patch_offset_ = patch_offset;
last_target_offset_ = target_offset;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 8bac7206c6..e8f57f5025 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -103,16 +103,16 @@ class ChecksumUpdatingOutputStream : public OutputStream {
ChecksumUpdatingOutputStream(OutputStream* out, OatHeader* oat_header)
: OutputStream(out->GetLocation()), out_(out), oat_header_(oat_header) { }
- bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+ bool WriteFully(const void* buffer, size_t byte_count) override {
oat_header_->UpdateChecksum(buffer, byte_count);
return out_->WriteFully(buffer, byte_count);
}
- off_t Seek(off_t offset, Whence whence) OVERRIDE {
+ off_t Seek(off_t offset, Whence whence) override {
return out_->Seek(offset, whence);
}
- bool Flush() OVERRIDE {
+ bool Flush() override {
return out_->Flush();
}
@@ -654,7 +654,7 @@ bool OatWriter::WriteAndOpenDexFiles(
bool verify,
bool update_input_vdex,
CopyOption copy_dex_files,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
CHECK(write_state_ == WriteState::kAddingDexFileSources);
@@ -663,7 +663,7 @@ bool OatWriter::WriteAndOpenDexFiles(
return false;
}
- std::vector<std::unique_ptr<MemMap>> dex_files_map;
+ std::vector<MemMap> dex_files_map;
std::vector<std::unique_ptr<const DexFile>> dex_files;
// Initialize VDEX and OAT headers.
@@ -826,7 +826,7 @@ class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
oat_class_index_(0u),
method_offsets_index_(0u) {}
- bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
DexMethodVisitor::StartClass(dex_file, class_def_index);
if (kIsDebugBuild && writer_->MayHaveCompiledMethods()) {
// There are no oat classes if there aren't any compiled methods.
@@ -836,7 +836,7 @@ class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
return true;
}
- bool EndClass() OVERRIDE {
+ bool EndClass() override {
++oat_class_index_;
return DexMethodVisitor::EndClass();
}
@@ -862,7 +862,7 @@ class OatWriter::InitBssLayoutMethodVisitor : public DexMethodVisitor {
: DexMethodVisitor(writer, /* offset */ 0u) {}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassAccessor::Method& method) OVERRIDE {
+ const ClassAccessor::Method& method) override {
// Look for patches with .bss references and prepare maps with placeholders for their offsets.
CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(
MethodReference(dex_file_, method.GetIndex()));
@@ -936,7 +936,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
DCHECK(num_classes == 0u || IsAligned<4u>(offset));
}
- bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
DexMethodVisitor::StartClass(dex_file, class_def_index);
compiled_methods_.clear();
compiled_methods_with_code_ = 0u;
@@ -944,7 +944,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassAccessor::Method& method) OVERRIDE {
+ const ClassAccessor::Method& method) override {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// compiled_methods_with_code_ since we only want to allocate
@@ -959,7 +959,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
return true;
}
- bool EndClass() OVERRIDE {
+ bool EndClass() override {
ClassReference class_ref(dex_file_, class_def_index_);
ClassStatus status;
bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
@@ -1145,14 +1145,14 @@ class OatWriter::LayoutCodeMethodVisitor : public OatDexMethodVisitor {
: OatDexMethodVisitor(writer, offset) {
}
- bool EndClass() OVERRIDE {
+ bool EndClass() override {
OatDexMethodVisitor::EndClass();
return true;
}
bool VisitMethod(size_t class_def_method_index,
const ClassAccessor::Method& method)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -1248,7 +1248,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
std::move(ordered_methods)) {
}
- virtual bool VisitComplete() OVERRIDE {
+ bool VisitComplete() override {
offset_ = writer_->relative_patcher_->ReserveSpaceEnd(offset_);
if (generate_debug_info_) {
std::vector<debug::MethodDebugInfo> thunk_infos =
@@ -1260,8 +1260,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
return true;
}
- virtual bool VisitMethod(const OrderedMethodData& method_data)
- OVERRIDE
+ bool VisitMethod(const OrderedMethodData& method_data) override
REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = method_data.oat_class;
CompiledMethod* compiled_method = method_data.compiled_method;
@@ -1445,7 +1444,7 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
bool VisitMethod(size_t class_def_method_index,
const ClassAccessor::Method& method ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1495,7 +1494,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
// in the same oat file. If the origin and the copied methods are
// in different oat files don't touch the copied method.
// References to other oat files are not supported yet.
- bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) override
REQUIRES_SHARED(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
// Skip classes that are not in the image.
@@ -1533,7 +1532,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) OVERRIDE
+ bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) override
REQUIRES_SHARED(Locks::mutator_lock_) {
// Skip methods that are not in the image.
if (!IsImageClass()) {
@@ -1652,7 +1651,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
}
}
- virtual bool VisitStart() OVERRIDE {
+ bool VisitStart() override {
return true;
}
@@ -1681,7 +1680,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
return true;
}
- virtual bool VisitMethod(const OrderedMethodData& method_data) OVERRIDE
+ bool VisitMethod(const OrderedMethodData& method_data) override
REQUIRES_SHARED(Locks::mutator_lock_) {
const MethodReference& method_ref = method_data.method_reference;
UpdateDexFileAndDexCache(method_ref.dex_file);
@@ -3424,12 +3423,12 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
const ArtDexFileLoader dex_file_loader;
if (oat_dex_file->source_.IsZipEntry()) {
ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
- std::unique_ptr<MemMap> mem_map;
+ MemMap mem_map;
{
TimingLogger::ScopedTiming extract("Unzip", timings_);
- mem_map.reset(zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg));
+ mem_map = zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg);
}
- if (mem_map == nullptr) {
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
return false;
}
@@ -3684,7 +3683,7 @@ bool OatWriter::WriteDexFile(OutputStream* out,
bool OatWriter::OpenDexFiles(
File* file,
bool verify,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
TimingLogger::ScopedTiming split("OpenDexFiles", timings_);
@@ -3695,16 +3694,16 @@ bool OatWriter::OpenDexFiles(
if (!extract_dex_files_into_vdex_) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
- std::vector<std::unique_ptr<MemMap>> maps;
+ std::vector<MemMap> maps;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
std::string error_msg;
- MemMap* map = oat_dex_file.source_.GetZipEntry()->MapDirectlyOrExtract(
- oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg);
- if (map == nullptr) {
+ maps.emplace_back(oat_dex_file.source_.GetZipEntry()->MapDirectlyOrExtract(
+ oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg));
+ MemMap* map = &maps.back();
+ if (!map->IsValid()) {
LOG(ERROR) << error_msg;
return false;
}
- maps.emplace_back(map);
// Now, open the dex file.
const ArtDexFileLoader dex_file_loader;
dex_files.emplace_back(dex_file_loader.Open(map->Begin(),
@@ -3735,7 +3734,7 @@ bool OatWriter::OpenDexFiles(
size_t length = vdex_size_ - map_offset;
std::string error_msg;
- std::unique_ptr<MemMap> dex_files_map(MemMap::MapFile(
+ MemMap dex_files_map = MemMap::MapFile(
length,
PROT_READ | PROT_WRITE,
MAP_SHARED,
@@ -3743,8 +3742,8 @@ bool OatWriter::OpenDexFiles(
map_offset,
/* low_4gb */ false,
file->GetPath().c_str(),
- &error_msg));
- if (dex_files_map == nullptr) {
+ &error_msg);
+ if (!dex_files_map.IsValid()) {
LOG(ERROR) << "Failed to mmap() dex files from oat file. File: " << file->GetPath()
<< " error: " << error_msg;
return false;
@@ -3753,7 +3752,7 @@ bool OatWriter::OpenDexFiles(
std::vector<std::unique_ptr<const DexFile>> dex_files;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
const uint8_t* raw_dex_file =
- dex_files_map->Begin() + oat_dex_file.dex_file_offset_ - map_offset;
+ dex_files_map.Begin() + oat_dex_file.dex_file_offset_ - map_offset;
if (kIsDebugBuild) {
// Sanity check our input files.
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 9470f8c874..5202d39960 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -177,7 +177,7 @@ class OatWriter {
bool verify,
bool update_input_vdex,
CopyOption copy_dex_files,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
// Initialize the writer with the given parameters.
void Initialize(const CompilerDriver* compiler_driver,
@@ -315,7 +315,7 @@ class OatWriter {
bool update_input_vdex);
bool OpenDexFiles(File* file,
bool verify,
- /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+ /*out*/ std::vector<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
size_t InitOatHeader(uint32_t num_dex_files, SafeMap<std::string, std::string>* key_value_store);
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index d73f10a6ed..bd09f239ba 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -169,7 +169,7 @@ class OatTest : public CommonCompilerTest {
oat_file);
elf_writer->Start();
OutputStream* oat_rodata = elf_writer->StartRoData();
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
+ std::vector<MemMap> opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
if (!oat_writer.WriteAndOpenDexFiles(
vdex_file,
@@ -246,7 +246,7 @@ class OatTest : public CommonCompilerTest {
return false;
}
- for (std::unique_ptr<MemMap>& map : opened_dex_files_maps) {
+ for (MemMap& map : opened_dex_files_maps) {
opened_dex_files_maps_.emplace_back(std::move(map));
}
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
@@ -261,7 +261,7 @@ class OatTest : public CommonCompilerTest {
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
+ std::vector<MemMap> opened_dex_files_maps_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
};
@@ -406,11 +406,11 @@ TEST_F(OatTest, WriteRead) {
std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/true,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ true,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
@@ -529,11 +529,11 @@ TEST_F(OatTest, EmptyTextSection) {
std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(oat_file != nullptr);
EXPECT_LT(static_cast<size_t>(oat_file->Size()),
@@ -607,11 +607,11 @@ void OatTest::TestDexFileInput(bool verify, bool low_4gb, bool use_profile) {
std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- nullptr,
- nullptr,
- false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
low_4gb,
- nullptr,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
if (low_4gb) {
@@ -738,11 +738,11 @@ void OatTest::TestZipFileInput(bool verify) {
std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -788,11 +788,11 @@ void OatTest::TestZipFileInput(bool verify) {
std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
diff --git a/dex2oat/linker/relative_patcher.cc b/dex2oat/linker/relative_patcher.cc
index b6135c9b5f..564cf30f19 100644
--- a/dex2oat/linker/relative_patcher.cc
+++ b/dex2oat/linker/relative_patcher.cc
@@ -45,35 +45,35 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
const InstructionSetFeatures* features,
RelativePatcherThunkProvider* thunk_provider,
RelativePatcherTargetProvider* target_provider) {
- class RelativePatcherNone FINAL : public RelativePatcher {
+ class RelativePatcherNone final : public RelativePatcher {
public:
RelativePatcherNone() { }
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
- MethodReference method_ref ATTRIBUTE_UNUSED) OVERRIDE {
+ MethodReference method_ref ATTRIBUTE_UNUSED) override {
return offset; // No space reserved; no patches expected.
}
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+ uint32_t ReserveSpaceEnd(uint32_t offset) override {
return offset; // No space reserved; no patches expected.
}
- uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) override {
return offset; // No thunks added; no patches expected.
}
void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
uint32_t literal_offset ATTRIBUTE_UNUSED,
uint32_t patch_offset ATTRIBUTE_UNUSED,
- uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+ uint32_t target_offset ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Unexpected relative call patch.";
}
void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
const LinkerPatch& patch ATTRIBUTE_UNUSED,
uint32_t patch_offset ATTRIBUTE_UNUSED,
- uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+ uint32_t target_offset ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Unexpected relative dex cache array patch.";
}
@@ -84,7 +84,7 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
}
std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(
- uint32_t executable_offset ATTRIBUTE_UNUSED) OVERRIDE {
+ uint32_t executable_offset ATTRIBUTE_UNUSED) override {
return std::vector<debug::MethodDebugInfo>(); // No thunks added.
}
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index 9556c5f557..9725570570 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -57,7 +57,7 @@ class RelativePatcherTest : public CommonCompilerTest {
patched_code_.reserve(16 * KB);
}
- void SetUp() OVERRIDE {
+ void SetUp() override {
OverrideInstructionSetFeatures(instruction_set_, variant_);
CommonCompilerTest::SetUp();
@@ -67,7 +67,7 @@ class RelativePatcherTest : public CommonCompilerTest {
&method_offset_map_);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
compiled_methods_.clear();
patcher_.reset();
CommonCompilerTest::TearDown();
@@ -260,7 +260,7 @@ class RelativePatcherTest : public CommonCompilerTest {
void GetThunkCode(const LinkerPatch& patch,
/*out*/ ArrayRef<const uint8_t>* code,
- /*out*/ std::string* debug_name) OVERRIDE {
+ /*out*/ std::string* debug_name) override {
auto it = thunk_map_.find(ThunkKey(patch));
CHECK(it != thunk_map_.end());
const ThunkValue& value = it->second;
@@ -316,9 +316,9 @@ class RelativePatcherTest : public CommonCompilerTest {
// Map method reference to assinged offset.
// Wrap the map in a class implementing RelativePatcherTargetProvider.
- class MethodOffsetMap FINAL : public RelativePatcherTargetProvider {
+ class MethodOffsetMap final : public RelativePatcherTargetProvider {
public:
- std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE {
+ std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override {
auto it = map.find(ref);
if (it == map.end()) {
return std::pair<bool, uint32_t>(false, 0u);
diff --git a/dex2oat/linker/x86/relative_patcher_x86.h b/dex2oat/linker/x86/relative_patcher_x86.h
index e723580dae..3da62fb23b 100644
--- a/dex2oat/linker/x86/relative_patcher_x86.h
+++ b/dex2oat/linker/x86/relative_patcher_x86.h
@@ -22,17 +22,17 @@
namespace art {
namespace linker {
-class X86RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86RelativePatcher final : public X86BaseRelativePatcher {
public:
X86RelativePatcher() { }
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
};
} // namespace linker
diff --git a/dex2oat/linker/x86/relative_patcher_x86_base.h b/dex2oat/linker/x86/relative_patcher_x86_base.h
index 4cc7b07d2d..a1925e0995 100644
--- a/dex2oat/linker/x86/relative_patcher_x86_base.h
+++ b/dex2oat/linker/x86/relative_patcher_x86_base.h
@@ -26,14 +26,14 @@ class X86BaseRelativePatcher : public RelativePatcher {
public:
uint32_t ReserveSpace(uint32_t offset,
const CompiledMethod* compiled_method,
- MethodReference method_ref) OVERRIDE;
- uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ MethodReference method_ref) override;
+ uint32_t ReserveSpaceEnd(uint32_t offset) override;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
void PatchCall(std::vector<uint8_t>* code,
uint32_t literal_offset,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
- std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+ uint32_t target_offset) override;
+ std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
protected:
X86BaseRelativePatcher() { }
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.h b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
index a31e1ebfbb..a82fef3b56 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.h
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
@@ -22,17 +22,17 @@
namespace art {
namespace linker {
-class X86_64RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86_64RelativePatcher final : public X86BaseRelativePatcher {
public:
X86_64RelativePatcher() { }
void PatchPcRelativeReference(std::vector<uint8_t>* code,
const LinkerPatch& patch,
uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE;
+ uint32_t target_offset) override;
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
const LinkerPatch& patch,
- uint32_t patch_offset) OVERRIDE;
+ uint32_t patch_offset) override;
};
} // namespace linker
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index f8274e2f9a..e9b64028de 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -123,8 +123,7 @@ static const char* primitiveTypeLabel(char typeChar) {
/*
* Converts a type descriptor to human-readable "dotted" form. For
* example, "Ljava/lang/String;" becomes "java.lang.String", and
- * "[I" becomes "int[]". Also converts '$' to '.', which means this
- * form can't be converted back to a descriptor.
+ * "[I" becomes "int[]".
*/
static std::unique_ptr<char[]> descriptorToDot(const char* str) {
int targetLen = strlen(str);
@@ -157,7 +156,7 @@ static std::unique_ptr<char[]> descriptorToDot(const char* str) {
int i = 0;
for (; i < targetLen; i++) {
const char ch = str[offset + i];
- newStr[i] = (ch == '/' || ch == '$') ? '.' : ch;
+ newStr[i] = (ch == '/') ? '.' : ch;
} // for
// Add the appropriate number of brackets for arrays.
@@ -171,10 +170,9 @@ static std::unique_ptr<char[]> descriptorToDot(const char* str) {
}
/*
- * Converts the class name portion of a type descriptor to human-readable
- * "dotted" form. For example, "Ljava/lang/String;" becomes "String".
+ * Retrieves the class name portion of a type descriptor.
*/
-static std::unique_ptr<char[]> descriptorClassToDot(const char* str) {
+static std::unique_ptr<char[]> descriptorClassToName(const char* str) {
// Reduce to just the class name prefix.
const char* lastSlash = strrchr(str, '/');
if (lastSlash == nullptr) {
@@ -187,8 +185,7 @@ static std::unique_ptr<char[]> descriptorClassToDot(const char* str) {
const int targetLen = strlen(lastSlash);
std::unique_ptr<char[]> newStr(new char[targetLen]);
for (int i = 0; i < targetLen - 1; i++) {
- const char ch = lastSlash[i];
- newStr[i] = ch == '$' ? '.' : ch;
+ newStr[i] = lastSlash[i];
} // for
newStr[targetLen - 1] = '\0';
return newStr;
@@ -1250,7 +1247,7 @@ static void dumpMethod(const ClassAccessor::Method& method, int i) {
// Method name and prototype.
if (constructor) {
- std::unique_ptr<char[]> dot(descriptorClassToDot(backDescriptor));
+ std::unique_ptr<char[]> dot(descriptorClassToName(backDescriptor));
fprintf(gOutFile, "<constructor name=\"%s\"\n", dot.get());
dot = descriptorToDot(backDescriptor);
fprintf(gOutFile, " type=\"%s\"\n", dot.get());
@@ -1469,7 +1466,7 @@ static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) {
}
fprintf(gOutFile, " Interfaces -\n");
} else {
- std::unique_ptr<char[]> dot(descriptorClassToDot(classDescriptor));
+ std::unique_ptr<char[]> dot(descriptorClassToName(classDescriptor));
fprintf(gOutFile, "<class name=\"%s\"\n", dot.get());
if (superclassDescriptor != nullptr) {
dot = descriptorToDot(superclassDescriptor);
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index e7d5ed953d..c81d0c722d 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -112,15 +112,15 @@ class CompactDexWriter : public DexWriter {
public:
class Container : public DexContainer {
public:
- Section* GetMainSection() OVERRIDE {
+ Section* GetMainSection() override {
return &main_section_;
}
- Section* GetDataSection() OVERRIDE {
+ Section* GetDataSection() override {
return &data_section_;
}
- bool IsCompactDexContainer() const OVERRIDE {
+ bool IsCompactDexContainer() const override {
return true;
}
@@ -139,21 +139,21 @@ class CompactDexWriter : public DexWriter {
// Return true if we can generate compact dex for the IR.
bool CanGenerateCompactDex(std::string* error_msg);
- bool Write(DexContainer* output, std::string* error_msg) OVERRIDE;
+ bool Write(DexContainer* output, std::string* error_msg) override;
- std::unique_ptr<DexContainer> CreateDexContainer() const OVERRIDE;
+ std::unique_ptr<DexContainer> CreateDexContainer() const override;
- void WriteHeader(Stream* stream) OVERRIDE;
+ void WriteHeader(Stream* stream) override;
- size_t GetHeaderSize() const OVERRIDE;
+ size_t GetHeaderSize() const override;
uint32_t WriteDebugInfoOffsetTable(Stream* stream);
- void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) OVERRIDE;
+ void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) override;
- void WriteStringData(Stream* stream, dex_ir::StringData* string_data) OVERRIDE;
+ void WriteStringData(Stream* stream, dex_ir::StringData* string_data) override;
- void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) OVERRIDE;
+ void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) override;
void SortDebugInfosByMethodIndex();
diff --git a/dexlayout/dex_container.h b/dexlayout/dex_container.h
index 2b9a5f9959..2d742b0dbe 100644
--- a/dexlayout/dex_container.h
+++ b/dexlayout/dex_container.h
@@ -57,19 +57,19 @@ class DexContainer {
public:
virtual ~VectorSection() {}
- uint8_t* Begin() OVERRIDE {
+ uint8_t* Begin() override {
return &data_[0];
}
- size_t Size() const OVERRIDE {
+ size_t Size() const override {
return data_.size();
}
- void Resize(size_t size) OVERRIDE {
+ void Resize(size_t size) override {
data_.resize(size, 0u);
}
- void Clear() OVERRIDE {
+ void Clear() override {
data_.clear();
}
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 8f853eaeb5..178a4d4df1 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -233,7 +233,7 @@ template<class T> class CollectionVector : public CollectionBase {
// Preallocate so that assignment does not invalidate pointers into the vector.
collection_.reserve(size);
}
- virtual ~CollectionVector() OVERRIDE { }
+ ~CollectionVector() override { }
template<class... Args>
T* CreateAndAddItem(Args&&... args) {
@@ -242,7 +242,7 @@ template<class T> class CollectionVector : public CollectionBase {
return object;
}
- virtual uint32_t Size() const OVERRIDE { return collection_.size(); }
+ uint32_t Size() const override { return collection_.size(); }
Iterator<ElementType> begin() const { return Iterator<ElementType>(collection_, 0U, Size()); }
Iterator<ElementType> end() const { return Iterator<ElementType>(collection_, Size(), Size()); }
@@ -406,7 +406,7 @@ class Header : public Item {
data_size,
data_offset);
}
- ~Header() OVERRIDE { }
+ ~Header() override { }
static size_t ItemSize() { return kHeaderItemSize; }
@@ -590,7 +590,7 @@ class StringId : public IndexedItem {
explicit StringId(StringData* string_data) : string_data_(string_data) {
size_ = kStringIdItemSize;
}
- ~StringId() OVERRIDE { }
+ ~StringId() override { }
static size_t ItemSize() { return kStringIdItemSize; }
@@ -608,7 +608,7 @@ class StringId : public IndexedItem {
class TypeId : public IndexedItem {
public:
explicit TypeId(StringId* string_id) : string_id_(string_id) { size_ = kTypeIdItemSize; }
- ~TypeId() OVERRIDE { }
+ ~TypeId() override { }
static size_t ItemSize() { return kTypeIdItemSize; }
@@ -629,7 +629,7 @@ class TypeList : public Item {
explicit TypeList(TypeIdVector* type_list) : type_list_(type_list) {
size_ = sizeof(uint32_t) + (type_list->size() * sizeof(uint16_t));
}
- ~TypeList() OVERRIDE { }
+ ~TypeList() override { }
const TypeIdVector* GetTypeList() const { return type_list_.get(); }
@@ -644,7 +644,7 @@ class ProtoId : public IndexedItem {
ProtoId(const StringId* shorty, const TypeId* return_type, TypeList* parameters)
: shorty_(shorty), return_type_(return_type), parameters_(parameters)
{ size_ = kProtoIdItemSize; }
- ~ProtoId() OVERRIDE { }
+ ~ProtoId() override { }
static size_t ItemSize() { return kProtoIdItemSize; }
@@ -666,7 +666,7 @@ class FieldId : public IndexedItem {
public:
FieldId(const TypeId* klass, const TypeId* type, const StringId* name)
: class_(klass), type_(type), name_(name) { size_ = kFieldIdItemSize; }
- ~FieldId() OVERRIDE { }
+ ~FieldId() override { }
static size_t ItemSize() { return kFieldIdItemSize; }
@@ -688,7 +688,7 @@ class MethodId : public IndexedItem {
public:
MethodId(const TypeId* klass, const ProtoId* proto, const StringId* name)
: class_(klass), proto_(proto), name_(name) { size_ = kMethodIdItemSize; }
- ~MethodId() OVERRIDE { }
+ ~MethodId() override { }
static size_t ItemSize() { return kMethodIdItemSize; }
@@ -710,7 +710,7 @@ class FieldItem : public Item {
public:
FieldItem(uint32_t access_flags, const FieldId* field_id)
: access_flags_(access_flags), field_id_(field_id) { }
- ~FieldItem() OVERRIDE { }
+ ~FieldItem() override { }
FieldItem(FieldItem&&) = default;
@@ -732,7 +732,7 @@ class MethodItem : public Item {
public:
MethodItem(uint32_t access_flags, const MethodId* method_id, CodeItem* code)
: access_flags_(access_flags), method_id_(method_id), code_(code) { }
- ~MethodItem() OVERRIDE { }
+ ~MethodItem() override { }
MethodItem(MethodItem&&) = default;
@@ -876,7 +876,7 @@ class ClassData : public Item {
direct_methods_(direct_methods),
virtual_methods_(virtual_methods) { }
- ~ClassData() OVERRIDE = default;
+ ~ClassData() override = default;
FieldItemVector* StaticFields() { return static_fields_.get(); }
FieldItemVector* InstanceFields() { return instance_fields_.get(); }
MethodItemVector* DirectMethods() { return direct_methods_.get(); }
@@ -912,7 +912,7 @@ class ClassDef : public IndexedItem {
class_data_(class_data),
static_values_(static_values) { size_ = kClassDefItemSize; }
- ~ClassDef() OVERRIDE { }
+ ~ClassDef() override { }
static size_t ItemSize() { return kClassDefItemSize; }
@@ -980,7 +980,7 @@ class TryItem : public Item {
public:
TryItem(uint32_t start_addr, uint16_t insn_count, const CatchHandler* handlers)
: start_addr_(start_addr), insn_count_(insn_count), handlers_(handlers) { }
- ~TryItem() OVERRIDE { }
+ ~TryItem() override { }
uint32_t StartAddr() const { return start_addr_; }
uint16_t InsnCount() const { return insn_count_; }
@@ -1042,7 +1042,7 @@ class CodeItem : public Item {
tries_(tries),
handlers_(handlers) { }
- ~CodeItem() OVERRIDE { }
+ ~CodeItem() override { }
uint16_t RegistersSize() const { return registers_size_; }
uint16_t InsSize() const { return ins_size_; }
@@ -1115,7 +1115,7 @@ class AnnotationSetItem : public Item {
explicit AnnotationSetItem(std::vector<AnnotationItem*>* items) : items_(items) {
size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
}
- ~AnnotationSetItem() OVERRIDE { }
+ ~AnnotationSetItem() override { }
std::vector<AnnotationItem*>* GetItems() { return items_.get(); }
@@ -1132,7 +1132,7 @@ class AnnotationSetRefList : public Item {
explicit AnnotationSetRefList(std::vector<AnnotationSetItem*>* items) : items_(items) {
size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
}
- ~AnnotationSetRefList() OVERRIDE { }
+ ~AnnotationSetRefList() override { }
std::vector<AnnotationSetItem*>* GetItems() { return items_.get(); }
@@ -1227,7 +1227,7 @@ class CallSiteId : public IndexedItem {
explicit CallSiteId(EncodedArrayItem* call_site_item) : call_site_item_(call_site_item) {
size_ = kCallSiteIdItemSize;
}
- ~CallSiteId() OVERRIDE { }
+ ~CallSiteId() override { }
static size_t ItemSize() { return kCallSiteIdItemSize; }
@@ -1248,7 +1248,7 @@ class MethodHandleItem : public IndexedItem {
field_or_method_id_(field_or_method_id) {
size_ = kMethodHandleItemSize;
}
- ~MethodHandleItem() OVERRIDE { }
+ ~MethodHandleItem() override { }
static size_t ItemSize() { return kMethodHandleItemSize; }
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index a83a46b7e2..ca6ff9e514 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -92,7 +92,7 @@ static uint32_t GetDebugInfoStreamSize(const uint8_t* debug_info_stream) {
template<class T> class CollectionMap : public CollectionBase {
public:
CollectionMap() = default;
- virtual ~CollectionMap() OVERRIDE { }
+ ~CollectionMap() override { }
template <class... Args>
T* CreateAndAddItem(CollectionVector<T>& vector,
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index db1898bf26..dd2ebad26f 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -192,15 +192,15 @@ class DexWriter {
class Container : public DexContainer {
public:
- Section* GetMainSection() OVERRIDE {
+ Section* GetMainSection() override {
return &main_section_;
}
- Section* GetDataSection() OVERRIDE {
+ Section* GetDataSection() override {
return &data_section_;
}
- bool IsCompactDexContainer() const OVERRIDE {
+ bool IsCompactDexContainer() const override {
return false;
}
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index 53145c22fa..60dd7e42a4 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -71,11 +71,11 @@ class DexDiagTest : public CommonRuntimeTest {
std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
oat_location.c_str(),
oat_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
EXPECT_TRUE(oat != nullptr) << error_msg;
return oat;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index d6dd9d1829..52d355b570 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -71,26 +71,10 @@ static inline uint16_t Get2LE(unsigned char const* src) {
}
/*
- * Converts a type descriptor to human-readable "dotted" form. For
- * example, "Ljava/lang/String;" becomes "java.lang.String", and
- * "[I" becomes "int[]". Also converts '$' to '.', which means this
- * form can't be converted back to a descriptor.
- */
-static std::string DescriptorToDotWrapper(const char* descriptor) {
- std::string result = DescriptorToDot(descriptor);
- size_t found = result.find('$');
- while (found != std::string::npos) {
- result[found] = '.';
- found = result.find('$', found);
- }
- return result;
-}
-
-/*
* Converts the class name portion of a type descriptor to human-readable
* "dotted" form. For example, "Ljava/lang/String;" becomes "String".
*/
-static std::string DescriptorClassToDot(const char* str) {
+static std::string DescriptorClassToName(const char* str) {
std::string descriptor(str);
// Reduce to just the class name prefix.
size_t last_slash = descriptor.rfind('/');
@@ -104,13 +88,6 @@ static std::string DescriptorClassToDot(const char* str) {
size_t size = descriptor.size() - 1 - last_slash;
std::string result(descriptor.substr(last_slash, size));
- // Replace '$' with '.'.
- size_t dollar_sign = result.find('$');
- while (dollar_sign != std::string::npos) {
- result[dollar_sign] = '.';
- dollar_sign = result.find('$', dollar_sign);
- }
-
return result;
}
@@ -786,7 +763,7 @@ void DexLayout::DumpInterface(const dex_ir::TypeId* type_item, int i) {
if (options_.output_format_ == kOutputPlain) {
fprintf(out_file_, " #%d : '%s'\n", i, interface_name);
} else {
- std::string dot(DescriptorToDotWrapper(interface_name));
+ std::string dot(DescriptorToDot(interface_name));
fprintf(out_file_, "<implements name=\"%s\">\n</implements>\n", dot.c_str());
}
}
@@ -1044,7 +1021,7 @@ void DexLayout::DumpBytecodes(uint32_t idx, const dex_ir::CodeItem* code, uint32
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
// Generate header.
- std::string dot(DescriptorToDotWrapper(back_descriptor));
+ std::string dot(DescriptorToDot(back_descriptor));
fprintf(out_file_, "%06x: |[%06x] %s.%s:%s\n",
code_offset, code_offset, dot.c_str(), name, type_descriptor.c_str());
@@ -1212,9 +1189,9 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
// Method name and prototype.
if (constructor) {
- std::string dot(DescriptorClassToDot(back_descriptor));
+ std::string dot(DescriptorClassToName(back_descriptor));
fprintf(out_file_, "<constructor name=\"%s\"\n", dot.c_str());
- dot = DescriptorToDotWrapper(back_descriptor);
+ dot = DescriptorToDot(back_descriptor);
fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
} else {
fprintf(out_file_, "<method name=\"%s\"\n", name);
@@ -1223,7 +1200,7 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
LOG(ERROR) << "bad method type descriptor '" << type_descriptor << "'";
goto bail;
}
- std::string dot(DescriptorToDotWrapper(return_type + 1));
+ std::string dot(DescriptorToDot(return_type + 1));
fprintf(out_file_, " return=\"%s\"\n", dot.c_str());
fprintf(out_file_, " abstract=%s\n", QuotedBool((flags & kAccAbstract) != 0));
fprintf(out_file_, " native=%s\n", QuotedBool((flags & kAccNative) != 0));
@@ -1265,7 +1242,7 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
}
// Null terminate and display.
*cp++ = '\0';
- std::string dot(DescriptorToDotWrapper(tmp_buf));
+ std::string dot(DescriptorToDot(tmp_buf));
fprintf(out_file_, "<parameter name=\"arg%d\" type=\"%s\">\n"
"</parameter>\n", arg_num++, dot.c_str());
} // while
@@ -1309,7 +1286,7 @@ void DexLayout::DumpSField(uint32_t idx, uint32_t flags, int i, dex_ir::EncodedV
}
} else if (options_.output_format_ == kOutputXml) {
fprintf(out_file_, "<field name=\"%s\"\n", name);
- std::string dot(DescriptorToDotWrapper(type_descriptor));
+ std::string dot(DescriptorToDot(type_descriptor));
fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
fprintf(out_file_, " transient=%s\n", QuotedBool((flags & kAccTransient) != 0));
fprintf(out_file_, " volatile=%s\n", QuotedBool((flags & kAccVolatile) != 0));
@@ -1415,10 +1392,10 @@ void DexLayout::DumpClass(int idx, char** last_package) {
}
fprintf(out_file_, " Interfaces -\n");
} else {
- std::string dot(DescriptorClassToDot(class_descriptor));
+ std::string dot(DescriptorClassToName(class_descriptor));
fprintf(out_file_, "<class name=\"%s\"\n", dot.c_str());
if (superclass_descriptor != nullptr) {
- dot = DescriptorToDotWrapper(superclass_descriptor);
+ dot = DescriptorToDot(superclass_descriptor);
fprintf(out_file_, " extends=\"%s\"\n", dot.c_str());
}
fprintf(out_file_, " interface=%s\n",
@@ -1817,14 +1794,20 @@ bool DexLayout::OutputDexFile(const DexFile* input_dex_file,
// If options_.output_dex_directory_ is non null, we are outputting to a file.
if (options_.output_dex_directory_ != nullptr) {
std::string output_location(options_.output_dex_directory_);
- size_t last_slash = dex_file_location.rfind('/');
+ const size_t last_slash = dex_file_location.rfind('/');
std::string dex_file_directory = dex_file_location.substr(0, last_slash + 1);
if (output_location == dex_file_directory) {
output_location = dex_file_location + ".new";
- } else if (last_slash != std::string::npos) {
- output_location += dex_file_location.substr(last_slash);
} else {
- output_location += "/" + dex_file_location + ".new";
+ if (!output_location.empty() && output_location.back() != '/') {
+ output_location += "/";
+ }
+ const size_t separator = dex_file_location.rfind('!');
+ if (separator != std::string::npos) {
+ output_location += dex_file_location.substr(separator + 1);
+ } else {
+ output_location += "classes.dex";
+ }
}
new_file.reset(OS::CreateEmptyFile(output_location.c_str()));
if (new_file == nullptr) {
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 71e56d19ea..9f73347354 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -60,6 +60,7 @@ static void Usage(void) {
LOG(ERROR) << " -p : profile file name (defaults to no profile)";
LOG(ERROR) << " -s : visualize reference pattern";
LOG(ERROR) << " -t : display file section sizes";
+ LOG(ERROR) << " -u : update dex checksums";
LOG(ERROR) << " -v : verify output file is canonical to input (IR level comparison)";
LOG(ERROR) << " -w : output dex directory";
LOG(ERROR) << " -x : compact dex generation level, either 'none' or 'fast'";
@@ -85,7 +86,7 @@ int DexlayoutDriver(int argc, char** argv) {
// Parse all arguments.
while (1) {
- const int ic = getopt(argc, argv, "abcdefghil:o:p:stvw:x:");
+ const int ic = getopt(argc, argv, "abcdefghil:o:p:stuvw:x:");
if (ic < 0) {
break; // done
}
@@ -138,6 +139,9 @@ int DexlayoutDriver(int argc, char** argv) {
options.show_section_statistics_ = true;
options.verbose_ = false;
break;
+ case 'u': // update checksum
+ options.update_checksum_ = true;
+ break;
case 'v': // verify output
options.verify_output_ = true;
break;
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index a20930b28b..187c68790a 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -267,7 +267,7 @@ class DexLayoutTest : public CommonRuntimeTest {
ScratchFile dexlayout_output;
const std::string& dexlayout_filename = dexlayout_output.GetFilename();
- for (const std::string &dex_file : GetLibCoreDexFileNames()) {
+ for (const std::string& dex_file : GetLibCoreDexFileNames()) {
std::vector<std::string> dexdump_exec_argv =
{ dexdump, "-d", "-f", "-h", "-l", "plain", "-o", dexdump_filename, dex_file };
std::vector<std::string> dexlayout_args =
@@ -293,31 +293,33 @@ class DexLayoutTest : public CommonRuntimeTest {
const std::string& tmp_name = tmp_file.GetFilename();
size_t tmp_last_slash = tmp_name.rfind('/');
std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
+ std::string unzip_dir = tmp_dir + "unzip/";
- for (const std::string &dex_file : GetLibCoreDexFileNames()) {
+ for (const std::string& dex_file : GetLibCoreDexFileNames()) {
std::vector<std::string> dexlayout_args =
{ "-w", tmp_dir, "-o", tmp_name, dex_file };
if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
- size_t dex_file_last_slash = dex_file.rfind('/');
- std::string dex_file_name = dex_file.substr(dex_file_last_slash + 1);
+ std::string dex_file_name = "classes.dex";
std::vector<std::string> unzip_exec_argv =
- { "/usr/bin/unzip", dex_file, "classes.dex", "-d", tmp_dir};
+ { "/usr/bin/unzip", dex_file, "classes.dex", "-d", unzip_dir};
if (!::art::Exec(unzip_exec_argv, error_msg)) {
return false;
}
std::vector<std::string> diff_exec_argv =
- { "/usr/bin/diff", tmp_dir + "classes.dex" , tmp_dir + dex_file_name };
+ { "/usr/bin/diff", tmp_dir + "classes.dex" , unzip_dir + dex_file_name };
if (!::art::Exec(diff_exec_argv, error_msg)) {
return false;
}
- if (!UnlinkFile(tmp_dir + "classes.dex")) {
+ if (!UnlinkFile(unzip_dir + "classes.dex")) {
return false;
}
if (!UnlinkFile(tmp_dir + dex_file_name)) {
return false;
}
+ // Remove the unzip temp directory so that unlinking android_data doesn't fail.
+ EXPECT_EQ(rmdir(unzip_dir.c_str()), 0);
}
return true;
}
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 871cd081e7..00b8ef2254 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -132,7 +132,7 @@ NO_RETURN static void Usage(const char *fmt, ...) {
exit(kErrorInvalidArguments);
}
-class DexoptAnalyzer FINAL {
+class DexoptAnalyzer final {
public:
DexoptAnalyzer() :
assume_profile_changed_(false),
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 49f92499e3..c1a6f59341 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -39,15 +39,15 @@ using vixl::aarch32::pc;
static const vixl::aarch32::Register tr(TR);
-class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
- class CustomDisassemblerStream FINAL : public DisassemblerStream {
+class DisassemblerArm::CustomDisassembler final : public PrintDisassembler {
+ class CustomDisassemblerStream final : public DisassemblerStream {
public:
CustomDisassemblerStream(std::ostream& os,
const CustomDisassembler* disasm,
const DisassemblerOptions* options)
: DisassemblerStream(os), disasm_(disasm), options_(options) {}
- DisassemblerStream& operator<<(const PrintLabel& label) OVERRIDE {
+ DisassemblerStream& operator<<(const PrintLabel& label) override {
const LocationType type = label.GetLocationType();
switch (type) {
@@ -73,7 +73,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
}
}
- DisassemblerStream& operator<<(vixl::aarch32::Register reg) OVERRIDE {
+ DisassemblerStream& operator<<(vixl::aarch32::Register reg) override {
if (reg.Is(tr)) {
os() << "tr";
return *this;
@@ -82,7 +82,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
}
}
- DisassemblerStream& operator<<(const MemOperand& operand) OVERRIDE {
+ DisassemblerStream& operator<<(const MemOperand& operand) override {
// VIXL must use a PrintLabel object whenever the base register is PC;
// the following check verifies this invariant, and guards against bugs.
DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -96,7 +96,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
return *this;
}
- DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) OVERRIDE {
+ DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) override {
// VIXL must use a PrintLabel object whenever the base register is PC;
// the following check verifies this invariant, and guards against bugs.
DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -116,7 +116,7 @@ class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
disassembler_stream_(os, this, options),
is_t32_(true) {}
- void PrintCodeAddress(uint32_t prog_ctr) OVERRIDE {
+ void PrintCodeAddress(uint32_t prog_ctr) override {
os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": ";
}
diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h
index 237b577bc2..dd6621d344 100644
--- a/disassembler/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -26,14 +26,14 @@
namespace art {
namespace arm {
-class DisassemblerArm FINAL : public Disassembler {
+class DisassemblerArm final : public Disassembler {
class CustomDisassembler;
public:
explicit DisassemblerArm(DisassemblerOptions* options);
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
uintptr_t GetPc(uintptr_t instr_ptr) const {
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 19e4dfb486..89beaa927b 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -29,7 +29,7 @@
namespace art {
namespace arm64 {
-class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
+class CustomDisassembler final : public vixl::aarch64::Disassembler {
public:
explicit CustomDisassembler(DisassemblerOptions* options)
: vixl::aarch64::Disassembler(),
@@ -45,13 +45,13 @@ class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
// Use register aliases in the disassembly.
void AppendRegisterNameToOutput(const vixl::aarch64::Instruction* instr,
- const vixl::aarch64::CPURegister& reg) OVERRIDE;
+ const vixl::aarch64::CPURegister& reg) override;
// Improve the disassembly of literal load instructions.
- void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) OVERRIDE;
+ void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) override;
// Improve the disassembly of thread offset.
- void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) OVERRIDE;
+ void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) override;
private:
// Indicate if the disassembler should read data loaded from literal pools.
@@ -69,15 +69,15 @@ class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
DisassemblerOptions* options_;
};
-class DisassemblerArm64 FINAL : public Disassembler {
+class DisassemblerArm64 final : public Disassembler {
public:
explicit DisassemblerArm64(DisassemblerOptions* options) :
Disassembler(options), disasm(options) {
decoder.AppendVisitor(&disasm);
}
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
vixl::aarch64::Decoder decoder;
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index afa6af366f..bc74b43ac9 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -24,7 +24,7 @@
namespace art {
namespace mips {
-class DisassemblerMips FINAL : public Disassembler {
+class DisassemblerMips final : public Disassembler {
public:
explicit DisassemblerMips(DisassemblerOptions* options, bool is_o32_abi)
: Disassembler(options),
@@ -33,8 +33,8 @@ class DisassemblerMips FINAL : public Disassembler {
is_o32_abi_(is_o32_abi) {}
const char* RegName(uint32_t reg);
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
// Address and encoding of the last disassembled instruction.
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index 31b62bccf2..a329280b70 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -24,13 +24,13 @@ namespace x86 {
enum RegFile { GPR, MMX, SSE };
-class DisassemblerX86 FINAL : public Disassembler {
+class DisassemblerX86 final : public Disassembler {
public:
DisassemblerX86(DisassemblerOptions* options, bool supports_rex)
: Disassembler(options), supports_rex_(supports_rex) {}
- size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+ size_t Dump(std::ostream& os, const uint8_t* begin) override;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
private:
size_t DumpNops(std::ostream& os, const uint8_t* instr);
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f54c55153a..ebc18fc5ff 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -347,9 +347,9 @@ class ImgObjectVisitor : public ObjectVisitor {
begin_image_ptr_(begin_image_ptr),
dirty_pages_(dirty_pages) { }
- virtual ~ImgObjectVisitor() OVERRIDE { }
+ ~ImgObjectVisitor() override { }
- virtual void Visit(mirror::Object* object) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Sanity check that we are reading a real mirror::Object
CHECK(object->GetClass() != nullptr) << "Image object at address "
<< object
@@ -658,8 +658,8 @@ class ImgArtMethodVisitor : public ArtMethodVisitor {
dirty_func_(std::move(dirty_func)),
begin_image_ptr_(begin_image_ptr),
dirty_pages_(dirty_pages) { }
- virtual ~ImgArtMethodVisitor() OVERRIDE { }
- virtual void Visit(ArtMethod* method) OVERRIDE {
+ ~ImgArtMethodVisitor() override { }
+ void Visit(ArtMethod* method) override {
dirty_func_(method, begin_image_ptr_, dirty_pages_);
}
@@ -1671,8 +1671,7 @@ struct ImgDiagArgs : public CmdlineArgs {
protected:
using Base = CmdlineArgs;
- virtual ParseStatus ParseCustom(const StringPiece& option,
- std::string* error_msg) OVERRIDE {
+ ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) override {
{
ParseStatus base_parse = Base::ParseCustom(option, error_msg);
if (base_parse != kParseUnknownArgument) {
@@ -1703,7 +1702,7 @@ struct ImgDiagArgs : public CmdlineArgs {
return kParseOk;
}
- virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+ ParseStatus ParseChecks(std::string* error_msg) override {
// Perform the parent checks.
ParseStatus parent_checks = Base::ParseChecks(error_msg);
if (parent_checks != kParseOk) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 52096f0d7b..cb40c7d89d 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -57,7 +57,7 @@ class ImgDiagTest : public CommonRuntimeTest {
boot_image_location_ = image_spaces[0]->GetImageLocation();
}
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
// Needs to live until CommonRuntimeTest::SetUp finishes, since we pass it a cstring.
runtime_args_image_ = android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str());
options->push_back(std::make_pair(runtime_args_image_, nullptr));
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index c4ac180a15..1bcfe8720a 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -25,7 +25,7 @@
namespace art {
-class MallocAllocator FINAL : public Allocator {
+class MallocAllocator final : public Allocator {
public:
MallocAllocator() {}
~MallocAllocator() {}
@@ -44,7 +44,7 @@ class MallocAllocator FINAL : public Allocator {
MallocAllocator g_malloc_allocator;
-class NoopAllocator FINAL : public Allocator {
+class NoopAllocator final : public Allocator {
public:
NoopAllocator() {}
~NoopAllocator() {}
diff --git a/libartbase/base/arena_bit_vector.cc b/libartbase/base/arena_bit_vector.cc
index 01f9013737..c6d899313d 100644
--- a/libartbase/base/arena_bit_vector.cc
+++ b/libartbase/base/arena_bit_vector.cc
@@ -50,7 +50,7 @@ using ArenaBitVectorAllocatorKind =
ArenaBitVectorAllocatorKindImpl<kArenaAllocatorCountAllocations>;
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind {
+class ArenaBitVectorAllocator final : public Allocator, private ArenaBitVectorAllocatorKind {
public:
static ArenaBitVectorAllocator* Create(ArenaAlloc* allocator, ArenaAllocKind kind) {
void* storage = allocator->template Alloc<ArenaBitVectorAllocator>(kind);
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 5668b6cd79..76f57dac74 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -26,7 +26,7 @@ namespace art {
// Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
// abstracting away the bit start offset to avoid needing passing as an argument everywhere.
-class BitMemoryRegion FINAL : public ValueObject {
+class BitMemoryRegion final : public ValueObject {
public:
struct Less {
bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index e24b073142..6dd2381c7e 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -318,7 +318,8 @@ static std::string GetDexFileName(const std::string& jar_prefix, bool host) {
std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() {
return std::vector<std::string>({GetDexFileName("core-oj", IsHost()),
- GetDexFileName("core-libart", IsHost())});
+ GetDexFileName("core-libart", IsHost()),
+ GetDexFileName("core-simple", IsHost())});
}
std::string CommonArtTestImpl::GetTestAndroidRoot() {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 62834c7d35..d645fa11a4 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -209,11 +209,11 @@ class CommonArtTestBase : public TestType, public CommonArtTestImpl {
virtual ~CommonArtTestBase() {}
protected:
- virtual void SetUp() OVERRIDE {
+ void SetUp() override {
CommonArtTestImpl::SetUp();
}
- virtual void TearDown() OVERRIDE {
+ void TearDown() override {
CommonArtTestImpl::TearDown();
}
};
diff --git a/libartbase/base/dumpable.h b/libartbase/base/dumpable.h
index 0c00505461..bd8622f0e2 100644
--- a/libartbase/base/dumpable.h
+++ b/libartbase/base/dumpable.h
@@ -29,7 +29,7 @@ namespace art {
// os << Dumpable<MyType>(my_type_instance);
//
template<typename T>
-class Dumpable FINAL {
+class Dumpable final {
public:
explicit Dumpable(const T& value) : value_(value) {
}
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index cd0bf8fafc..bc79ff2880 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -122,6 +122,14 @@ static constexpr bool kMovingCollector = true;
static constexpr bool kMarkCompactSupport = false && kMovingCollector;
// True if we allow moving classes.
static constexpr bool kMovingClasses = !kMarkCompactSupport;
+// If true, enable generational collection when using the Concurrent Copying
+// collector, i.e. use sticky-bit CC for minor collections and (full) CC for
+// major collections.
+#ifdef ART_USE_GENERATIONAL_CC
+static constexpr bool kEnableGenerationalConcurrentCopyingCollection = true;
+#else
+static constexpr bool kEnableGenerationalConcurrentCopyingCollection = false;
+#endif
// If true, enable the tlab allocator by default.
#ifdef ART_USE_TLAB
diff --git a/libartbase/base/hash_set.h b/libartbase/base/hash_set.h
index 2b1a5eb947..42aa46feb9 100644
--- a/libartbase/base/hash_set.h
+++ b/libartbase/base/hash_set.h
@@ -359,6 +359,9 @@ class HashSet {
// and set the empty slot to be the location we just moved from.
// Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
// element to its actual location/index.
+ // Note that since erase shuffles back elements, it may result in the same element being visited
+ // twice during HashSet iteration. This happens when an element already visited during iteration
+ // gets shuffled to the end of the bucket array.
iterator erase(iterator it) {
// empty_index is the index that will become empty.
size_t empty_index = it.index_;
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index a479b7d650..81d55fc2f3 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -37,7 +37,7 @@ class Indenter : public std::streambuf {
count_(count) {}
private:
- std::streamsize xsputn(const char* s, std::streamsize n) OVERRIDE {
+ std::streamsize xsputn(const char* s, std::streamsize n) override {
std::streamsize result = n; // Aborts on failure.
const char* eol = static_cast<const char*>(memchr(s, '\n', n));
while (eol != nullptr) {
@@ -54,7 +54,7 @@ class Indenter : public std::streambuf {
return result;
}
- int_type overflow(int_type c) OVERRIDE {
+ int_type overflow(int_type c) override {
if (UNLIKELY(c == std::char_traits<char>::eof())) {
out_sbuf_->pubsync();
return c;
diff --git a/libartbase/base/leb128.h b/libartbase/base/leb128.h
index d5847fd6c6..b866d37552 100644
--- a/libartbase/base/leb128.h
+++ b/libartbase/base/leb128.h
@@ -357,7 +357,7 @@ class Leb128Encoder {
// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
template <typename Vector = std::vector<uint8_t>>
-class Leb128EncodingVector FINAL : private Vector,
+class Leb128EncodingVector final : private Vector,
public Leb128Encoder<Vector> {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index f26cf0708b..33866bba08 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -23,9 +23,6 @@
#include "android-base/macros.h"
#include "android-base/thread_annotations.h"
-#define OVERRIDE override
-#define FINAL final
-
// Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid
// globally importing gtest/gtest.h into the main ART header files.
#define ART_FRIEND_TEST(test_set_name, individual_test)\
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 15a5d71a6b..02e29f1d21 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -28,7 +28,7 @@
namespace art {
-class MallocArena FINAL : public Arena {
+class MallocArena final : public Arena {
public:
explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize);
virtual ~MallocArena();
diff --git a/libartbase/base/malloc_arena_pool.h b/libartbase/base/malloc_arena_pool.h
index c48be59eb5..9216c033c3 100644
--- a/libartbase/base/malloc_arena_pool.h
+++ b/libartbase/base/malloc_arena_pool.h
@@ -23,17 +23,17 @@
namespace art {
-class MallocArenaPool FINAL : public ArenaPool {
+class MallocArenaPool final : public ArenaPool {
public:
MallocArenaPool();
~MallocArenaPool();
- Arena* AllocArena(size_t size) OVERRIDE;
- void FreeArenaChain(Arena* first) OVERRIDE;
- size_t GetBytesAllocated() const OVERRIDE;
- void ReclaimMemory() OVERRIDE;
- void LockReclaimMemory() OVERRIDE;
+ Arena* AllocArena(size_t size) override;
+ void FreeArenaChain(Arena* first) override;
+ size_t GetBytesAllocated() const override;
+ void ReclaimMemory() override;
+ void LockReclaimMemory() override;
// Is a nop for malloc pools.
- void TrimMaps() OVERRIDE;
+ void TrimMaps() override;
private:
Arena* free_arenas_;
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 5cea869519..1bf553d293 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -23,6 +23,10 @@
#include <sys/resource.h>
#endif
+#if defined(__linux__)
+#include <sys/prctl.h>
+#endif
+
#include <map>
#include <memory>
#include <sstream>
@@ -30,12 +34,6 @@
#include "android-base/stringprintf.h"
#include "android-base/unique_fd.h"
-#if !defined(__Fuchsia__)
-#include "cutils/ashmem.h"
-#else
-#include "fuchsia_compat.h"
-#endif
-
#include "allocator.h"
#include "bit_utils.h"
#include "globals.h"
@@ -61,6 +59,24 @@ using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
+// A map containing unique strings used for indentifying anonymous mappings
+static std::map<std::string, int> debugStrMap GUARDED_BY(MemMap::GetMemMapsLock());
+
+// Retrieve iterator to a `gMaps` entry that is known to exist.
+Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
+ DCHECK(map.IsValid());
+ DCHECK(gMaps != nullptr);
+ for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
+ it != end && it->first == map.BaseBegin();
+ ++it) {
+ if (it->second == &map) {
+ return it;
+ }
+ }
+ LOG(FATAL) << "MemMap not found";
+ UNREACHABLE();
+}
+
std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
os << "MemMap:" << std::endl;
for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
@@ -211,6 +227,33 @@ bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byt
return false;
}
+bool MemMap::CheckReservation(uint8_t* expected_ptr,
+ size_t byte_count,
+ const char* name,
+ const MemMap& reservation,
+ /*out*/std::string* error_msg) {
+ if (!reservation.IsValid()) {
+ *error_msg = StringPrintf("Invalid reservation for %s", name);
+ return false;
+ }
+ DCHECK_ALIGNED(reservation.Begin(), kPageSize);
+ if (reservation.Begin() != expected_ptr) {
+ *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
+ name,
+ reservation.Begin(),
+ expected_ptr);
+ return false;
+ }
+ if (byte_count > reservation.Size()) {
+ *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
+ byte_count,
+ reservation.Size());
+ return false;
+ }
+ return true;
+}
+
+
#if USE_ART_LOW_4G_ALLOCATOR
void* MemMap::TryMemMapLow4GB(void* ptr,
size_t page_aligned_byte_count,
@@ -231,20 +274,48 @@ void* MemMap::TryMemMapLow4GB(void* ptr,
}
#endif
-MemMap* MemMap::MapAnonymous(const char* name,
- uint8_t* expected_ptr,
- size_t byte_count,
- int prot,
- bool low_4gb,
- bool reuse,
- std::string* error_msg,
- bool use_ashmem) {
+void MemMap::SetDebugName(void* map_ptr, const char* name, size_t size) {
+ // Debug naming is only used for Android target builds. For Linux targets,
+ // we'll still call prctl but it wont do anything till we upstream the prctl.
+ if (kIsTargetFuchsia || !kIsTargetBuild) {
+ return;
+ }
+
+ // lock as std::map is not thread-safe
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+
+ std::string debug_friendly_name("dalvik-");
+ debug_friendly_name += name;
+ auto it = debugStrMap.find(debug_friendly_name);
+
+ if (it == debugStrMap.end()) {
+ it = debugStrMap.insert(std::make_pair(std::move(debug_friendly_name), 1)).first;
+ }
+
+ DCHECK(it != debugStrMap.end());
+#if defined(PR_SET_VMA) && defined(__linux__)
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, size, it->first.c_str());
+#else
+ // Prevent variable unused compiler errors.
+ UNUSED(map_ptr, size);
+#endif
+}
+
+MemMap MemMap::MapAnonymous(const char* name,
+ uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ bool reuse,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg,
+ bool use_debug_name) {
#ifndef __LP64__
UNUSED(low_4gb);
#endif
- use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
if (byte_count == 0) {
- return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
+ *error_msg = "Empty MemMap requested.";
+ return Invalid();
}
size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
@@ -252,51 +323,25 @@ MemMap* MemMap::MapAnonymous(const char* name,
if (reuse) {
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
- CHECK(expected_ptr != nullptr);
+ CHECK(addr != nullptr);
+ DCHECK(reservation == nullptr);
- DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
+ DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
flags |= MAP_FIXED;
- }
-
- if (use_ashmem) {
- if (!kIsTargetBuild) {
- // When not on Android (either host or assuming a linux target) ashmem is faked using
- // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they
- // will then use a regular mmap.
- struct rlimit rlimit_fsize;
- CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
- use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
- (page_aligned_byte_count < rlimit_fsize.rlim_cur);
+ } else if (reservation != nullptr) {
+ CHECK(addr != nullptr);
+ if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
+ return MemMap::Invalid();
}
+ flags |= MAP_FIXED;
}
unique_fd fd;
-
- if (use_ashmem) {
- // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
- // prefixed "dalvik-".
- std::string debug_friendly_name("dalvik-");
- debug_friendly_name += name;
- fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
-
- if (fd.get() == -1) {
- // We failed to create the ashmem region. Print a warning, but continue
- // anyway by creating a true anonymous mmap with an fd of -1. It is
- // better to use an unlabelled anonymous map than to fail to create a
- // map at all.
- PLOG(WARNING) << "ashmem_create_region failed for '" << name << "'";
- } else {
- // We succeeded in creating the ashmem region. Use the created ashmem
- // region as backing for the mmap.
- flags &= ~MAP_ANONYMOUS;
- }
- }
-
// We need to store and potentially set an error number for pretty printing of errors
int saved_errno = 0;
- void* actual = MapInternal(expected_ptr,
+ void* actual = MapInternal(addr,
page_aligned_byte_count,
prot,
flags,
@@ -313,28 +358,43 @@ MemMap* MemMap::MapAnonymous(const char* name,
*error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
"See process maps in the log.",
- expected_ptr,
+ addr,
page_aligned_byte_count,
prot,
flags,
fd.get(),
strerror(saved_errno));
}
- return nullptr;
+ return Invalid();
}
- if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
- return nullptr;
+ if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
+ return Invalid();
+ }
+
+ if (use_debug_name) {
+ SetDebugName(actual, name, page_aligned_byte_count);
}
- return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
- page_aligned_byte_count, prot, reuse);
+
+ if (reservation != nullptr) {
+ // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
+ DCHECK_EQ(actual, reservation->Begin());
+ reservation->ReleaseReservedMemory(byte_count);
+ }
+ return MemMap(name,
+ reinterpret_cast<uint8_t*>(actual),
+ byte_count,
+ actual,
+ page_aligned_byte_count,
+ prot,
+ reuse);
}
-MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
+MemMap MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
if (byte_count == 0) {
- return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
+ return Invalid();
}
const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
- return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
+ return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
}
template<typename A, typename B>
@@ -342,19 +402,18 @@ static ptrdiff_t PointerDiff(A* a, B* b) {
return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
}
-bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
#if !HAVE_MREMAP_SYSCALL
- UNUSED(source_ptr);
+ UNUSED(source);
*error = "Cannot perform atomic replace because we are missing the required mremap syscall";
return false;
#else // !HAVE_MREMAP_SYSCALL
- CHECK(source_ptr != nullptr);
- CHECK(*source_ptr != nullptr);
+ CHECK(source != nullptr);
+ CHECK(source->IsValid());
if (!MemMap::kCanReplaceMapping) {
*error = "Unable to perform atomic replace due to runtime environment!";
return false;
}
- MemMap* source = *source_ptr;
// neither can be reuse.
if (source->reuse_ || reuse_) {
*error = "One or both mappings is not a real mmap!";
@@ -406,12 +465,9 @@ bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
// them later.
size_t new_base_size = std::max(source->base_size_, base_size_);
- // Delete the old source, don't unmap it though (set reuse) since it is already gone.
- *source_ptr = nullptr;
+ // Invalidate *source, don't unmap it though since it is already gone.
size_t source_size = source->size_;
- source->already_unmapped_ = true;
- delete source;
- source = nullptr;
+ source->Invalidate();
size_ = source_size;
base_size_ = new_base_size;
@@ -422,29 +478,37 @@ bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
#endif // !HAVE_MREMAP_SYSCALL
}
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
- size_t byte_count,
- int prot,
- int flags,
- int fd,
- off_t start,
- bool low_4gb,
- bool reuse,
- const char* filename,
- std::string* error_msg) {
+MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
+ size_t byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t start,
+ bool low_4gb,
+ const char* filename,
+ bool reuse,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
- // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
- // expect his mapping to be contained within an existing map.
+ // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
+ // reservation, i.e we expect this mapping to be contained within an existing map.
if (reuse) {
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
CHECK(expected_ptr != nullptr);
+ DCHECK(reservation == nullptr);
DCHECK(error_msg != nullptr);
DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
<< ((error_msg != nullptr) ? *error_msg : std::string());
flags |= MAP_FIXED;
+ } else if (reservation != nullptr) {
+ DCHECK(error_msg != nullptr);
+ if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
+ return Invalid();
+ }
+ flags |= MAP_FIXED;
} else {
CHECK_EQ(0, flags & MAP_FIXED);
// Don't bother checking for an overlapping region here. We'll
@@ -452,7 +516,7 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
}
if (byte_count == 0) {
- return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
+ return Invalid();
}
// Adjust 'offset' to be page-aligned as required by mmap.
int page_offset = start % kPageSize;
@@ -491,10 +555,10 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
static_cast<int64_t>(page_aligned_offset), filename,
strerror(saved_errno));
}
- return nullptr;
+ return Invalid();
}
if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
- return nullptr;
+ return Invalid();
}
if (redzone_size != 0) {
const uint8_t *real_start = actual + page_offset;
@@ -506,14 +570,32 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
page_aligned_byte_count -= redzone_size;
}
- return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
- prot, reuse, redzone_size);
+ if (reservation != nullptr) {
+ // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
+ DCHECK_EQ(actual, reservation->Begin());
+ reservation->ReleaseReservedMemory(byte_count);
+ }
+ return MemMap(filename,
+ actual + page_offset,
+ byte_count,
+ actual,
+ page_aligned_byte_count,
+ prot,
+ reuse,
+ redzone_size);
+}
+
+MemMap::MemMap(MemMap&& other)
+ : MemMap() {
+ swap(other);
}
MemMap::~MemMap() {
- if (base_begin_ == nullptr && base_size_ == 0) {
- return;
- }
+ Reset();
+}
+
+void MemMap::DoReset() {
+ DCHECK(IsValid());
// Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
// before it is returned to the system.
@@ -533,19 +615,56 @@ MemMap::~MemMap() {
}
}
+ Invalidate();
+}
+
+void MemMap::Invalidate() {
+ DCHECK(IsValid());
+
// Remove it from gMaps.
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
- bool found = false;
- DCHECK(gMaps != nullptr);
- for (auto it = gMaps->lower_bound(base_begin_), end = gMaps->end();
- it != end && it->first == base_begin_; ++it) {
- if (it->second == this) {
- found = true;
- gMaps->erase(it);
- break;
+ auto it = GetGMapsEntry(*this);
+ gMaps->erase(it);
+
+ // Mark it as invalid.
+ base_size_ = 0u;
+ DCHECK(!IsValid());
+}
+
+void MemMap::swap(MemMap& other) {
+ if (IsValid() || other.IsValid()) {
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ DCHECK(gMaps != nullptr);
+ auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
+ auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
+ if (IsValid()) {
+ DCHECK(this_it != gMaps->end());
+ DCHECK_EQ(this_it->second, this);
+ this_it->second = &other;
+ }
+ if (other.IsValid()) {
+ DCHECK(other_it != gMaps->end());
+ DCHECK_EQ(other_it->second, &other);
+ other_it->second = this;
}
+ // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
+ // with the `gMaps` key when other threads try to use `gMaps`.
+ SwapMembers(other);
+ } else {
+ SwapMembers(other);
}
- CHECK(found) << "MemMap not found";
+}
+
+void MemMap::SwapMembers(MemMap& other) {
+ name_.swap(other.name_);
+ std::swap(begin_, other.begin_);
+ std::swap(size_, other.size_);
+ std::swap(base_begin_, other.base_begin_);
+ std::swap(base_size_, other.base_size_);
+ std::swap(prot_, other.prot_);
+ std::swap(reuse_, other.reuse_);
+ std::swap(already_unmapped_, other.already_unmapped_);
+ std::swap(redzone_size_, other.redzone_size_);
}
MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
@@ -568,9 +687,11 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_
}
}
-MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
- std::string* error_msg, bool use_ashmem) {
- use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
+MemMap MemMap::RemapAtEnd(uint8_t* new_end,
+ const char* tail_name,
+ int tail_prot,
+ std::string* error_msg,
+ bool use_debug_name) {
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -583,11 +704,11 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
uint8_t* new_base_end = new_end;
DCHECK_LE(new_base_end, old_base_end);
if (new_base_end == old_base_end) {
- return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
+ return Invalid();
}
- size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
- base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
- DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
+ size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
+ size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
+ DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
size_t tail_size = old_end - new_end;
uint8_t* tail_base_begin = new_base_end;
size_t tail_base_size = old_base_end - new_base_end;
@@ -595,34 +716,12 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
DCHECK_ALIGNED(tail_base_size, kPageSize);
unique_fd fd;
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
- if (use_ashmem) {
- // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
- // prefixed "dalvik-".
- std::string debug_friendly_name("dalvik-");
- debug_friendly_name += tail_name;
- fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
- flags = MAP_PRIVATE | MAP_FIXED;
- if (fd.get() == -1) {
- *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
- tail_name, strerror(errno));
- return nullptr;
- }
- }
+ int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
- // Unmap/map the tail region.
- int result = TargetMUnmap(tail_base_begin, tail_base_size);
- if (result == -1) {
- PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
- *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
- tail_base_begin, tail_base_size, name_.c_str());
- return nullptr;
- }
- // Don't cause memory allocation between the munmap and the mmap
- // calls. Otherwise, libc (or something else) might take this memory
- // region. Note this isn't perfect as there's no way to prevent
- // other threads to try to take this memory region here.
+ // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
+ // removes old mappings for the overlapping region. This makes the operation atomic
+ // and prevents other threads from racing to allocate memory in the requested region.
uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
tail_base_size,
tail_prot,
@@ -634,9 +733,62 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
*error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
"maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
fd.get());
- return nullptr;
+ return Invalid();
+ }
+ // Update *this.
+ if (new_base_size == 0u) {
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ auto it = GetGMapsEntry(*this);
+ gMaps->erase(it);
+ }
+
+ if (use_debug_name) {
+ SetDebugName(actual, tail_name, tail_base_size);
+ }
+
+ size_ = new_size;
+ base_size_ = new_base_size;
+ // Return the new mapping.
+ return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
+}
+
+MemMap MemMap::TakeReservedMemory(size_t byte_count) {
+ uint8_t* begin = Begin();
+ ReleaseReservedMemory(byte_count); // Performs necessary DCHECK()s on this reservation.
+ size_t base_size = RoundUp(byte_count, kPageSize);
+ return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse */ false);
+}
+
+void MemMap::ReleaseReservedMemory(size_t byte_count) {
+ // Check the reservation mapping.
+ DCHECK(IsValid());
+ DCHECK(!reuse_);
+ DCHECK(!already_unmapped_);
+ DCHECK_EQ(redzone_size_, 0u);
+ DCHECK_EQ(begin_, base_begin_);
+ DCHECK_EQ(size_, base_size_);
+ DCHECK_ALIGNED(begin_, kPageSize);
+ DCHECK_ALIGNED(size_, kPageSize);
+
+ // Check and round up the `byte_count`.
+ DCHECK_NE(byte_count, 0u);
+ DCHECK_LE(byte_count, size_);
+ byte_count = RoundUp(byte_count, kPageSize);
+
+ if (byte_count == size_) {
+ Invalidate();
+ } else {
+ // Shrink the reservation MemMap and update its `gMaps` entry.
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ auto it = GetGMapsEntry(*this);
+ // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
+ gMaps->erase(it);
+ begin_ += byte_count;
+ size_ -= byte_count;
+ base_begin_ = begin_;
+ base_size_ = size_;
+ gMaps->emplace(base_begin_, this);
}
- return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
}
void MemMap::MadviseDontNeedAndZero() {
@@ -675,15 +827,15 @@ bool MemMap::Protect(int prot) {
return false;
}
-bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
+bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
- CHECK(begin_map != nullptr);
- CHECK(end_map != nullptr);
+ CHECK(begin_map.IsValid());
+ CHECK(end_map.IsValid());
CHECK(HasMemMap(begin_map));
CHECK(HasMemMap(end_map));
- CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
- MemMap* map = begin_map;
- while (map->BaseBegin() != end_map->BaseBegin()) {
+ CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
+ MemMap* map = &begin_map;
+ while (map->BaseBegin() != end_map.BaseBegin()) {
MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
if (next_map == nullptr) {
// Found a gap.
@@ -758,11 +910,11 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
}
}
-bool MemMap::HasMemMap(MemMap* map) {
- void* base_begin = map->BaseBegin();
+bool MemMap::HasMemMap(MemMap& map) {
+ void* base_begin = map.BaseBegin();
for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
it != end && it->first == base_begin; ++it) {
- if (it->second == map) {
+ if (it->second == &map) {
return true;
}
}
@@ -1049,6 +1201,7 @@ void MemMap::AlignBy(size_t size) {
CHECK_EQ(size_, base_size_) << "Unsupported";
CHECK_GT(size, static_cast<size_t>(kPageSize));
CHECK_ALIGNED(size, kPageSize);
+ CHECK(!reuse_);
if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
IsAlignedParam(base_size_, size)) {
// Already aligned.
@@ -1079,17 +1232,17 @@ void MemMap::AlignBy(size_t size) {
<< " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
}
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ if (base_begin < aligned_base_begin) {
+ auto it = GetGMapsEntry(*this);
+ // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
+ gMaps->erase(it);
+ gMaps->insert(std::make_pair(aligned_base_begin, this));
+ }
base_begin_ = aligned_base_begin;
base_size_ = aligned_base_size;
begin_ = aligned_base_begin;
size_ = aligned_base_size;
DCHECK(gMaps != nullptr);
- if (base_begin < aligned_base_begin) {
- auto it = gMaps->find(base_begin);
- CHECK(it != gMaps->end()) << "MemMap not found";
- gMaps->erase(it);
- gMaps->insert(std::make_pair(base_begin_, this));
- }
}
} // namespace art
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 1979357714..20eda324e1 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -60,6 +60,37 @@ class MemMap {
public:
static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
+ // Creates an invalid mapping.
+ MemMap() {}
+
+ // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
+ static MemMap Invalid() {
+ return MemMap();
+ }
+
+ MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_);
+ MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) {
+ Reset();
+ swap(other);
+ return *this;
+ }
+
+ // Releases the memory mapping.
+ ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+
+ // Swap two MemMaps.
+ void swap(MemMap& other);
+
+ void Reset() {
+ if (IsValid()) {
+ DoReset();
+ }
+ }
+
+ bool IsValid() const {
+ return base_size_ != 0u;
+ }
+
// Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
// relinquishes ownership of the source mmap.
//
@@ -74,85 +105,109 @@ class MemMap {
// * mremap must succeed when called on the mappings.
//
// If this call succeeds it will return true and:
- // * Deallocate *source
- // * Sets *source to nullptr
+ // * Invalidate *source
// * The protection of this will remain the same.
// * The size of this will be the size of the source
// * The data in this will be the data from source.
//
// If this call fails it will return false and make no changes to *source or this. The ownership
// of the source mmap is returned to the caller.
- bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+ bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
+
+ // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
+ static void SetDebugName(void* map_ptr, const char* name, size_t size);
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
- // "reuse" allows re-mapping an address range from an existing mapping.
+ //
+ // `reuse` allows re-mapping an address range from an existing mapping which retains the
+ // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
+ // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
//
// The word "anonymous" in this context means "not backed by a file". The supplied
// 'name' will be used -- on systems that support it -- to give the mapping
// a name.
//
- // On success, returns returns a MemMap instance. On failure, returns null.
- static MemMap* MapAnonymous(const char* name,
- uint8_t* addr,
- size_t byte_count,
- int prot,
- bool low_4gb,
- bool reuse,
- std::string* error_msg,
- bool use_ashmem = true);
+ // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
+ static MemMap MapAnonymous(const char* name,
+ uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ bool reuse,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg,
+ bool use_debug_name = true);
+ static MemMap MapAnonymous(const char* name,
+ uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ bool low_4gb,
+ /*out*/std::string* error_msg) {
+ return MapAnonymous(name,
+ addr,
+ byte_count,
+ prot,
+ low_4gb,
+ /* reuse */ false,
+ /* reservation */ nullptr,
+ error_msg);
+ }
// Create placeholder for a region allocated by direct call to mmap.
// This is useful when we do not have control over the code calling mmap,
// but when we still want to keep track of it in the list.
// The region is not considered to be owned and will not be unmmaped.
- static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
+ static MemMap MapDummy(const char* name, uint8_t* addr, size_t byte_count);
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative.
//
- // On success, returns returns a MemMap instance. On failure, returns null.
- static MemMap* MapFile(size_t byte_count,
- int prot,
- int flags,
- int fd,
- off_t start,
- bool low_4gb,
- const char* filename,
- std::string* error_msg) {
+ // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
+ static MemMap MapFile(size_t byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t start,
+ bool low_4gb,
+ const char* filename,
+ std::string* error_msg) {
return MapFileAtAddress(nullptr,
byte_count,
prot,
flags,
fd,
start,
- /*low_4gb*/low_4gb,
- /*reuse*/false,
+ /* low_4gb */ low_4gb,
filename,
+ /* reuse */ false,
+ /* reservation */ nullptr,
error_msg);
}
// Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
// not relative. This version allows requesting a specific address for the base of the mapping.
- // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
- // the memory. If error_msg is null then we do not print /proc/maps to the log if
- // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
- // printing /proc/maps takes several milliseconds in the worst case.
//
- // On success, returns returns a MemMap instance. On failure, returns null.
- static MemMap* MapFileAtAddress(uint8_t* addr,
- size_t byte_count,
- int prot,
- int flags,
- int fd,
- off_t start,
- bool low_4gb,
- bool reuse,
- const char* filename,
- std::string* error_msg);
-
- // Releases the memory mapping.
- ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+ // `reuse` allows re-mapping an address range from an existing mapping which retains the
+ // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
+ // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
+ //
+ // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
+ // This helps improve performance of the fail case since reading and printing /proc/maps takes
+ // several milliseconds in the worst case.
+ //
+ // On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
+ static MemMap MapFileAtAddress(uint8_t* addr,
+ size_t byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t start,
+ bool low_4gb,
+ const char* filename,
+ bool reuse,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg);
const std::string& GetName() const {
return name_;
@@ -200,13 +255,21 @@ class MemMap {
}
// Unmap the pages at end and remap them to create another memory map.
- MemMap* RemapAtEnd(uint8_t* new_end,
- const char* tail_name,
- int tail_prot,
- std::string* error_msg,
- bool use_ashmem = true);
+ MemMap RemapAtEnd(uint8_t* new_end,
+ const char* tail_name,
+ int tail_prot,
+ std::string* error_msg,
+ bool use_debug_name = true);
+
+ // Take ownership of pages at the beginning of the mapping. The mapping must be an
+ // anonymous reservation mapping, owning entire pages. The `byte_count` must not
+ // exceed the size of this reservation.
+ //
+ // Returns a mapping owning `byte_count` bytes rounded up to entire pages
+ // with size set to the passed `byte_count`.
+ MemMap TakeReservedMemory(size_t byte_count);
- static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
+ static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
REQUIRES(!MemMap::mem_maps_lock_);
static void DumpMaps(std::ostream& os, bool terse = false)
REQUIRES(!MemMap::mem_maps_lock_);
@@ -240,9 +303,13 @@ class MemMap {
bool reuse,
size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
+ void DoReset();
+ void Invalidate();
+ void SwapMembers(MemMap& other);
+
static void DumpMapsLocked(std::ostream& os, bool terse)
REQUIRES(MemMap::mem_maps_lock_);
- static bool HasMemMap(MemMap* map)
+ static bool HasMemMap(MemMap& map)
REQUIRES(MemMap::mem_maps_lock_);
static MemMap* GetLargestMemMapAt(void* address)
REQUIRES(MemMap::mem_maps_lock_);
@@ -265,29 +332,38 @@ class MemMap {
off_t offset)
REQUIRES(!MemMap::mem_maps_lock_);
+ // Release memory owned by a reservation mapping.
+ void ReleaseReservedMemory(size_t byte_count);
+
// member function to access real_munmap
static bool CheckMapRequest(uint8_t* expected_ptr,
void* actual_ptr,
size_t byte_count,
std::string* error_msg);
- const std::string name_;
- uint8_t* begin_; // Start of data. May be changed by AlignBy.
- size_t size_; // Length of data.
+ static bool CheckReservation(uint8_t* expected_ptr,
+ size_t byte_count,
+ const char* name,
+ const MemMap& reservation,
+ /*out*/std::string* error_msg);
+
+ std::string name_;
+ uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy.
+ size_t size_ = 0u; // Length of data.
- void* base_begin_; // Page-aligned base address. May be changed by AlignBy.
- size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
- int prot_; // Protection of the map.
+ void* base_begin_ = nullptr; // Page-aligned base address. May be changed by AlignBy.
+ size_t base_size_ = 0u; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
+ int prot_ = 0; // Protection of the map.
// When reuse_ is true, this is just a view of an existing mapping
// and we do not take ownership and are not responsible for
// unmapping.
- const bool reuse_;
+ bool reuse_ = false;
// When already_unmapped_ is true the destructor will not call munmap.
- bool already_unmapped_;
+ bool already_unmapped_ = false;
- const size_t redzone_size_;
+ size_t redzone_size_ = 0u;
#if USE_ART_LOW_4G_ALLOCATOR
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
@@ -309,6 +385,10 @@ class MemMap {
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
+inline void swap(MemMap& lhs, MemMap& rhs) {
+ lhs.swap(rhs);
+}
+
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
// Zero and release pages if possible, no requirements on alignments.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index c575c7a31f..ab3d18ff04 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -30,14 +30,6 @@ namespace art {
class MemMapTest : public CommonArtTest {
public:
- static uint8_t* BaseBegin(MemMap* mem_map) {
- return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
- }
-
- static size_t BaseSize(MemMap* mem_map) {
- return mem_map->base_size_;
- }
-
static bool IsAddressMapped(void* addr) {
bool res = msync(addr, 1, MS_SYNC) == 0;
if (!res && errno != ENOMEM) {
@@ -60,15 +52,14 @@ class MemMapTest : public CommonArtTest {
static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
// Find a valid map address and unmap it before returning.
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
- nullptr,
- size,
- PROT_READ,
- low_4gb,
- false,
- &error_msg));
- CHECK(map != nullptr);
- return map->Begin();
+ MemMap map = MemMap::MapAnonymous("temp",
+ /* addr */ nullptr,
+ size,
+ PROT_READ,
+ low_4gb,
+ &error_msg);
+ CHECK(map.IsValid());
+ return map.Begin();
}
static void RemapAtEndTest(bool low_4gb) {
@@ -76,37 +67,37 @@ class MemMapTest : public CommonArtTest {
// Cast the page size to size_t.
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a two-page memory region.
- MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
- nullptr,
- 2 * page_size,
- PROT_READ | PROT_WRITE,
- low_4gb,
- false,
- &error_msg);
+ MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+ /* addr */ nullptr,
+ 2 * page_size,
+ PROT_READ | PROT_WRITE,
+ low_4gb,
+ &error_msg);
// Check its state and write to it.
- uint8_t* base0 = m0->Begin();
+ ASSERT_TRUE(m0.IsValid());
+ uint8_t* base0 = m0.Begin();
ASSERT_TRUE(base0 != nullptr) << error_msg;
- size_t size0 = m0->Size();
- EXPECT_EQ(m0->Size(), 2 * page_size);
- EXPECT_EQ(BaseBegin(m0), base0);
- EXPECT_EQ(BaseSize(m0), size0);
+ size_t size0 = m0.Size();
+ EXPECT_EQ(m0.Size(), 2 * page_size);
+ EXPECT_EQ(m0.BaseBegin(), base0);
+ EXPECT_EQ(m0.BaseSize(), size0);
memset(base0, 42, 2 * page_size);
// Remap the latter half into a second MemMap.
- MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
- "MemMapTest_RemapAtEndTest_map1",
- PROT_READ | PROT_WRITE,
- &error_msg);
+ MemMap m1 = m0.RemapAtEnd(base0 + page_size,
+ "MemMapTest_RemapAtEndTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
// Check the states of the two maps.
- EXPECT_EQ(m0->Begin(), base0) << error_msg;
- EXPECT_EQ(m0->Size(), page_size);
- EXPECT_EQ(BaseBegin(m0), base0);
- EXPECT_EQ(BaseSize(m0), page_size);
- uint8_t* base1 = m1->Begin();
- size_t size1 = m1->Size();
+ EXPECT_EQ(m0.Begin(), base0) << error_msg;
+ EXPECT_EQ(m0.Size(), page_size);
+ EXPECT_EQ(m0.BaseBegin(), base0);
+ EXPECT_EQ(m0.BaseSize(), page_size);
+ uint8_t* base1 = m1.Begin();
+ size_t size1 = m1.Size();
EXPECT_EQ(base1, base0 + page_size);
EXPECT_EQ(size1, page_size);
- EXPECT_EQ(BaseBegin(m1), base1);
- EXPECT_EQ(BaseSize(m1), size1);
+ EXPECT_EQ(m1.BaseBegin(), base1);
+ EXPECT_EQ(m1.BaseSize(), size1);
// Write to the second region.
memset(base1, 43, page_size);
// Check the contents of the two regions.
@@ -117,13 +108,18 @@ class MemMapTest : public CommonArtTest {
EXPECT_EQ(base1[i], 43);
}
// Unmap the first region.
- delete m0;
+ m0.Reset();
// Make sure the second region is still accessible after the first
// region is unmapped.
for (size_t i = 0; i < page_size; ++i) {
EXPECT_EQ(base1[i], 43);
}
- delete m1;
+ MemMap m2 = m1.RemapAtEnd(m1.Begin(),
+ "MemMapTest_RemapAtEndTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ ASSERT_TRUE(m2.IsValid()) << error_msg;
+ ASSERT_FALSE(m1.IsValid());
}
void CommonInit() {
@@ -168,232 +164,228 @@ TEST_F(MemMapTest, Start) {
#if HAVE_MREMAP_SYSCALL
TEST_F(MemMapTest, ReplaceMapping_SameSize) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
- kPageSize,
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- nullptr,
- kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- void* source_addr = source->Begin();
- void* dest_addr = dest->Begin();
+ MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ void* source_addr = source.Begin();
+ void* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
std::vector<uint8_t> data = RandomData(kPageSize);
- memcpy(source->Begin(), data.data(), data.size());
+ memcpy(source.Begin(), data.data(), data.size());
- ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_TRUE(source == nullptr);
+ ASSERT_FALSE(source.IsValid());
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
- ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
}
TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
- 5 * kPageSize, // Need to make it larger
- // initially so we know
- // there won't be mappings
- // in the way we we move
- // source.
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- nullptr,
- 3 * kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- uint8_t* source_addr = source->Begin();
- uint8_t* dest_addr = dest->Begin();
+ MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ /* addr */ nullptr,
+ 5 * kPageSize, // Need to make it larger
+ // initially so we know
+ // there won't be mappings
+ // in the way we we move
+ // source.
+ PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ /* addr */ nullptr,
+ 3 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ uint8_t* source_addr = source.Begin();
+ uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
// Fill the source with random data.
std::vector<uint8_t> data = RandomData(3 * kPageSize);
- memcpy(source->Begin(), data.data(), data.size());
+ memcpy(source.Begin(), data.data(), data.size());
// Make the dest smaller so that we know we'll have space.
- dest->SetSize(kPageSize);
+ dest.SetSize(kPageSize);
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
- ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_TRUE(source == nullptr);
+ ASSERT_FALSE(source.IsValid());
- ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
}
TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
- 3 * kPageSize,
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- nullptr,
- kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- uint8_t* source_addr = source->Begin();
- uint8_t* dest_addr = dest->Begin();
+ MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ /* addr */ nullptr,
+ 3 * kPageSize,
+ PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ uint8_t* source_addr = source.Begin();
+ uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
std::vector<uint8_t> data = RandomData(kPageSize);
- memcpy(source->Begin(), data.data(), kPageSize);
+ memcpy(source.Begin(), data.data(), kPageSize);
- ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
ASSERT_FALSE(IsAddressMapped(source_addr));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
ASSERT_TRUE(IsAddressMapped(dest_addr));
ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
- ASSERT_TRUE(source == nullptr);
+ ASSERT_FALSE(source.IsValid());
- ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
}
TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
std::string error_msg;
- std::unique_ptr<MemMap> dest(
+ MemMap dest =
MemMap::MapAnonymous(
"MapAnonymousEmpty-atomic-replace-dest",
- nullptr,
+ /* addr */ nullptr,
3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
// the way we we move source.
PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(dest != nullptr);
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
- dest->SetSize(kPageSize);
+ dest.SetSize(kPageSize);
// Create source from the last 2 pages
- MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- dest->Begin() + kPageSize,
- 2 * kPageSize,
- PROT_WRITE | PROT_READ,
- false,
- false,
- &error_msg);
- ASSERT_TRUE(source != nullptr);
- MemMap* orig_source = source;
- ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
- uint8_t* source_addr = source->Begin();
- uint8_t* dest_addr = dest->Begin();
+ MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ dest.Begin() + kPageSize,
+ 2 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(source.IsValid());
+ ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
+ uint8_t* source_addr = source.Begin();
+ uint8_t* dest_addr = dest.Begin();
ASSERT_TRUE(IsAddressMapped(source_addr));
// Fill the source and dest with random data.
std::vector<uint8_t> data = RandomData(2 * kPageSize);
- memcpy(source->Begin(), data.data(), data.size());
+ memcpy(source.Begin(), data.data(), data.size());
std::vector<uint8_t> dest_data = RandomData(kPageSize);
- memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+ memcpy(dest.Begin(), dest_data.data(), dest_data.size());
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
- ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+ ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
- ASSERT_TRUE(source == orig_source);
ASSERT_TRUE(IsAddressMapped(source_addr));
ASSERT_TRUE(IsAddressMapped(dest_addr));
- ASSERT_EQ(source->Size(), data.size());
- ASSERT_EQ(dest->Size(), dest_data.size());
+ ASSERT_EQ(source.Size(), data.size());
+ ASSERT_EQ(dest.Size(), dest_data.size());
- ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
- ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
-
- delete source;
+ ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
+ ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
}
#endif // HAVE_MREMAP_SYSCALL
TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- 0,
- PROT_READ,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
- ASSERT_TRUE(error_msg.empty());
- map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+ /* addr */ nullptr,
+ 0,
+ PROT_READ,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid()) << error_msg;
+ ASSERT_FALSE(error_msg.empty());
+
+ error_msg.clear();
+ map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
}
TEST_F(MemMapTest, MapAnonymousFailNullError) {
CommonInit();
// Test that we don't crash with a null error_str when mapping at an invalid location.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
- reinterpret_cast<uint8_t*>(kPageSize),
- 0x20000,
- PROT_READ | PROT_WRITE,
- false,
- false,
- nullptr));
- ASSERT_EQ(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
+ reinterpret_cast<uint8_t*>(kPageSize),
+ 0x20000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ nullptr);
+ ASSERT_FALSE(map.IsValid());
}
#ifdef __LP64__
TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+ /* addr */ nullptr,
+ 0,
+ PROT_READ,
+ /* low_4gb */ true,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid()) << error_msg;
+ ASSERT_FALSE(error_msg.empty());
+
+ error_msg.clear();
+ map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+ ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
}
TEST_F(MemMapTest, MapFile32Bit) {
CommonInit();
@@ -402,18 +394,18 @@ TEST_F(MemMapTest, MapFile32Bit) {
constexpr size_t kMapSize = kPageSize;
std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
- std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
- PROT_READ,
- MAP_PRIVATE,
- scratch_file.GetFd(),
- /*start*/0,
- /*low_4gb*/true,
- scratch_file.GetFilename().c_str(),
- &error_msg));
- ASSERT_TRUE(map != nullptr) << error_msg;
+ MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+ PROT_READ,
+ MAP_PRIVATE,
+ scratch_file.GetFd(),
+ /*start*/0,
+ /*low_4gb*/true,
+ scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(map->Size(), kMapSize);
- ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+ ASSERT_EQ(map.Size(), kMapSize);
+ ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
}
#endif
@@ -423,36 +415,33 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
// Find a valid address.
uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
// Map at an address that should work, which should succeed.
- std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
- valid_address,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+ MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+ valid_address,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_TRUE(map0->BaseBegin() == valid_address);
+ ASSERT_TRUE(map0.BaseBegin() == valid_address);
// Map at an unspecified address, which should succeed.
- std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
- nullptr,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+ MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+ /* addr */ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- ASSERT_TRUE(map1->BaseBegin() != nullptr);
+ ASSERT_TRUE(map1.BaseBegin() != nullptr);
// Attempt to map at the same address, which should fail.
- std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
- reinterpret_cast<uint8_t*>(map1->BaseBegin()),
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map2.get() == nullptr) << error_msg;
+ MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+ reinterpret_cast<uint8_t*>(map1.BaseBegin()),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_FALSE(map2.IsValid()) << error_msg;
ASSERT_TRUE(!error_msg.empty());
}
@@ -480,23 +469,22 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
// Try all addresses starting from 2GB to 4GB.
size_t start_addr = 2 * GB;
std::string error_msg;
- std::unique_ptr<MemMap> map;
+ MemMap map;
for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
- map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
- reinterpret_cast<uint8_t*>(start_addr),
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- false,
- &error_msg));
- if (map != nullptr) {
+ map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+ reinterpret_cast<uint8_t*>(start_addr),
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ true,
+ &error_msg);
+ if (map.IsValid()) {
break;
}
}
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
- ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
+ ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
+ ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
}
TEST_F(MemMapTest, MapAnonymousOverflow) {
@@ -504,14 +492,13 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
std::string error_msg;
uintptr_t ptr = 0;
ptr -= kPageSize; // Now it's close to the top.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
- reinterpret_cast<uint8_t*>(ptr),
- 2 * kPageSize, // brings it over the top.
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_EQ(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
+ reinterpret_cast<uint8_t*>(ptr),
+ 2 * kPageSize, // brings it over the top.
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
}
@@ -519,29 +506,27 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(
+ MemMap map =
MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
kPageSize,
PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- ASSERT_EQ(nullptr, map.get());
+ /* low_4gb */ true,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
}
TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
- reinterpret_cast<uint8_t*>(0xF0000000),
- 0x20000000,
- PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- ASSERT_EQ(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
+ reinterpret_cast<uint8_t*>(0xF0000000),
+ 0x20000000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ &error_msg);
+ ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
}
#endif
@@ -549,23 +534,25 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
TEST_F(MemMapTest, MapAnonymousReuse) {
CommonInit();
std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
- nullptr,
- 0x20000,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_NE(nullptr, map.get());
+ MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
+ nullptr,
+ 0x20000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ /* reservation */ nullptr,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid());
ASSERT_TRUE(error_msg.empty());
- std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
- reinterpret_cast<uint8_t*>(map->BaseBegin()),
- 0x10000,
- PROT_READ | PROT_WRITE,
- false,
- true,
- &error_msg));
- ASSERT_NE(nullptr, map2.get());
+ MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
+ reinterpret_cast<uint8_t*>(map.BaseBegin()),
+ 0x10000,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ true,
+ /* reservation */ nullptr,
+ &error_msg);
+ ASSERT_TRUE(map2.IsValid());
ASSERT_TRUE(error_msg.empty());
}
@@ -574,65 +561,61 @@ TEST_F(MemMapTest, CheckNoGaps) {
std::string error_msg;
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
- nullptr,
- kPageSize * kNumPages,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ MemMap map = MemMap::MapAnonymous("MapAnonymous0",
+ /* addr */ nullptr,
+ kPageSize * kNumPages,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
// Record the base address.
- uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
+ uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
// Unmap it.
- map.reset();
+ map.Reset();
// Map at the same address, but in page-sized separate mem maps,
// assuming the space at the address is still available.
- std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
- map_base,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+ MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+ map_base,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
- map_base + kPageSize,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+ MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+ map_base + kPageSize,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
- std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
- map_base + kPageSize * 2,
- kPageSize,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_TRUE(map2.get() != nullptr) << error_msg;
+ MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+ map_base + kPageSize * 2,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
// One-map cases.
- ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
// Two or three-map cases.
- ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
- ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
// Unmap the middle one.
- map1.reset();
+ map1.Reset();
// Should return false now that there's a gap in the middle.
- ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+ ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
}
TEST_F(MemMapTest, AlignBy) {
@@ -641,52 +624,52 @@ TEST_F(MemMapTest, AlignBy) {
// Cast the page size to size_t.
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a region.
- std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
- nullptr,
- 14 * page_size,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- uint8_t* base0 = m0->Begin();
+ MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
+ /* addr */ nullptr,
+ 14 * page_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(m0.IsValid());
+ uint8_t* base0 = m0.Begin();
ASSERT_TRUE(base0 != nullptr) << error_msg;
- ASSERT_EQ(m0->Size(), 14 * page_size);
- ASSERT_EQ(BaseBegin(m0.get()), base0);
- ASSERT_EQ(BaseSize(m0.get()), m0->Size());
+ ASSERT_EQ(m0.Size(), 14 * page_size);
+ ASSERT_EQ(m0.BaseBegin(), base0);
+ ASSERT_EQ(m0.BaseSize(), m0.Size());
// Break it into several regions by using RemapAtEnd.
- std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
- "MemMapTest_AlignByTest_map1",
- PROT_READ | PROT_WRITE,
- &error_msg));
- uint8_t* base1 = m1->Begin();
+ MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
+ "MemMapTest_AlignByTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ uint8_t* base1 = m1.Begin();
ASSERT_TRUE(base1 != nullptr) << error_msg;
ASSERT_EQ(base1, base0 + 3 * page_size);
- ASSERT_EQ(m0->Size(), 3 * page_size);
+ ASSERT_EQ(m0.Size(), 3 * page_size);
- std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
- "MemMapTest_AlignByTest_map2",
- PROT_READ | PROT_WRITE,
- &error_msg));
- uint8_t* base2 = m2->Begin();
+ MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
+ "MemMapTest_AlignByTest_map2",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ uint8_t* base2 = m2.Begin();
ASSERT_TRUE(base2 != nullptr) << error_msg;
ASSERT_EQ(base2, base1 + 4 * page_size);
- ASSERT_EQ(m1->Size(), 4 * page_size);
+ ASSERT_EQ(m1.Size(), 4 * page_size);
- std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
- "MemMapTest_AlignByTest_map1",
- PROT_READ | PROT_WRITE,
- &error_msg));
- uint8_t* base3 = m3->Begin();
+ MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
+ "MemMapTest_AlignByTest_map1",
+ PROT_READ | PROT_WRITE,
+ &error_msg);
+ uint8_t* base3 = m3.Begin();
ASSERT_TRUE(base3 != nullptr) << error_msg;
ASSERT_EQ(base3, base2 + 3 * page_size);
- ASSERT_EQ(m2->Size(), 3 * page_size);
- ASSERT_EQ(m3->Size(), 4 * page_size);
+ ASSERT_EQ(m2.Size(), 3 * page_size);
+ ASSERT_EQ(m3.Size(), 4 * page_size);
- uint8_t* end0 = base0 + m0->Size();
- uint8_t* end1 = base1 + m1->Size();
- uint8_t* end2 = base2 + m2->Size();
- uint8_t* end3 = base3 + m3->Size();
+ uint8_t* end0 = base0 + m0.Size();
+ uint8_t* end1 = base1 + m1.Size();
+ uint8_t* end2 = base2 + m2.Size();
+ uint8_t* end3 = base3 + m3.Size();
ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
@@ -703,40 +686,144 @@ TEST_F(MemMapTest, AlignBy) {
}
// Align by 2 * page_size;
- m0->AlignBy(2 * page_size);
- m1->AlignBy(2 * page_size);
- m2->AlignBy(2 * page_size);
- m3->AlignBy(2 * page_size);
+ m0.AlignBy(2 * page_size);
+ m1.AlignBy(2 * page_size);
+ m2.AlignBy(2 * page_size);
+ m3.AlignBy(2 * page_size);
- EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
- EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
+ EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
if (IsAlignedParam(base0, 2 * page_size)) {
- EXPECT_EQ(m0->Begin(), base0);
- EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
- EXPECT_EQ(m1->Begin(), base1 + page_size);
- EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
- EXPECT_EQ(m2->Begin(), base2 + page_size);
- EXPECT_EQ(m2->Begin() + m2->Size(), end2);
- EXPECT_EQ(m3->Begin(), base3);
- EXPECT_EQ(m3->Begin() + m3->Size(), end3);
+ EXPECT_EQ(m0.Begin(), base0);
+ EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
+ EXPECT_EQ(m1.Begin(), base1 + page_size);
+ EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
+ EXPECT_EQ(m2.Begin(), base2 + page_size);
+ EXPECT_EQ(m2.Begin() + m2.Size(), end2);
+ EXPECT_EQ(m3.Begin(), base3);
+ EXPECT_EQ(m3.Begin() + m3.Size(), end3);
} else {
- EXPECT_EQ(m0->Begin(), base0 + page_size);
- EXPECT_EQ(m0->Begin() + m0->Size(), end0);
- EXPECT_EQ(m1->Begin(), base1);
- EXPECT_EQ(m1->Begin() + m1->Size(), end1);
- EXPECT_EQ(m2->Begin(), base2);
- EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
- EXPECT_EQ(m3->Begin(), base3 + page_size);
- EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
+ EXPECT_EQ(m0.Begin(), base0 + page_size);
+ EXPECT_EQ(m0.Begin() + m0.Size(), end0);
+ EXPECT_EQ(m1.Begin(), base1);
+ EXPECT_EQ(m1.Begin() + m1.Size(), end1);
+ EXPECT_EQ(m2.Begin(), base2);
+ EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
+ EXPECT_EQ(m3.Begin(), base3 + page_size);
+ EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
}
}
+TEST_F(MemMapTest, Reservation) {
+ CommonInit();
+ std::string error_msg;
+ ScratchFile scratch_file;
+ constexpr size_t kMapSize = 5 * kPageSize;
+ std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
+ ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
+
+ MemMap reservation = MemMap::MapAnonymous("Test reservation",
+ /* addr */ nullptr,
+ kMapSize,
+ PROT_NONE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_TRUE(error_msg.empty());
+
+ // Map first part of the reservation.
+ constexpr size_t kChunk1Size = kPageSize - 1u;
+ static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
+ uint8_t* addr1 = reservation.Begin();
+ MemMap map1 = MemMap::MapFileAtAddress(addr1,
+ /* byte_count */ kChunk1Size,
+ PROT_READ,
+ MAP_PRIVATE,
+ scratch_file.GetFd(),
+ /* start */ 0,
+ /* low_4gb */ false,
+ scratch_file.GetFilename().c_str(),
+ /* reuse */ false,
+ &reservation,
+ &error_msg);
+ ASSERT_TRUE(map1.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map1.Size(), kChunk1Size);
+ ASSERT_EQ(addr1, map1.Begin());
+ ASSERT_TRUE(reservation.IsValid());
+ // Entire pages are taken from the `reservation`.
+ ASSERT_LT(map1.End(), map1.BaseEnd());
+ ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
+
+ // Map second part as an anonymous mapping.
+ constexpr size_t kChunk2Size = 2 * kPageSize;
+ DCHECK_LT(kChunk2Size, reservation.Size()); // We want to split the reservation.
+ uint8_t* addr2 = reservation.Begin();
+ MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
+ addr2,
+ /* byte_count */ kChunk2Size,
+ PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &reservation,
+ &error_msg);
+ ASSERT_TRUE(map2.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map2.Size(), kChunk2Size);
+ ASSERT_EQ(addr2, map2.Begin());
+ ASSERT_EQ(map2.End(), map2.BaseEnd()); // kChunk2Size is page aligned.
+ ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
+
+ // Map the rest of the reservation except the last byte.
+ const size_t kChunk3Size = reservation.Size() - 1u;
+ uint8_t* addr3 = reservation.Begin();
+ MemMap map3 = MemMap::MapFileAtAddress(addr3,
+ /* byte_count */ kChunk3Size,
+ PROT_READ,
+ MAP_PRIVATE,
+ scratch_file.GetFd(),
+ /* start */ dchecked_integral_cast<size_t>(addr3 - addr1),
+ /* low_4gb */ false,
+ scratch_file.GetFilename().c_str(),
+ /* reuse */ false,
+ &reservation,
+ &error_msg);
+ ASSERT_TRUE(map3.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map3.Size(), kChunk3Size);
+ ASSERT_EQ(addr3, map3.Begin());
+ // Entire pages are taken from the `reservation`, so it's now exhausted.
+ ASSERT_FALSE(reservation.IsValid());
+
+ // Now split the MiddleReservation.
+ constexpr size_t kChunk2ASize = kPageSize - 1u;
+ DCHECK_LT(kChunk2ASize, map2.Size()); // We want to split the reservation.
+ MemMap map2a = map2.TakeReservedMemory(kChunk2ASize);
+ ASSERT_TRUE(map2a.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map2a.Size(), kChunk2ASize);
+ ASSERT_EQ(addr2, map2a.Begin());
+ ASSERT_TRUE(map2.IsValid());
+ ASSERT_LT(map2a.End(), map2a.BaseEnd());
+ ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
+
+ // And take the rest of the middle reservation.
+ const size_t kChunk2BSize = map2.Size() - 1u;
+ uint8_t* addr2b = map2.Begin();
+ MemMap map2b = map2.TakeReservedMemory(kChunk2BSize);
+ ASSERT_TRUE(map2b.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map2b.Size(), kChunk2ASize);
+ ASSERT_EQ(addr2b, map2b.Begin());
+ ASSERT_FALSE(map2.IsValid());
+}
+
} // namespace art
diff --git a/libartbase/base/memory_region.h b/libartbase/base/memory_region.h
index 206032923f..9c9ff92071 100644
--- a/libartbase/base/memory_region.h
+++ b/libartbase/base/memory_region.h
@@ -34,7 +34,7 @@ namespace art {
// Memory regions are useful for accessing memory with bounds check in
// debug mode. They can be safely passed by value and do not assume ownership
// of the region.
-class MemoryRegion FINAL : public ValueObject {
+class MemoryRegion final : public ValueObject {
public:
struct ContentEquals {
constexpr bool operator()(const MemoryRegion& lhs, const MemoryRegion& rhs) const {
diff --git a/libartbase/base/unix_file/fd_file.h b/libartbase/base/unix_file/fd_file.h
index d61dab6ce3..19be3ef6f7 100644
--- a/libartbase/base/unix_file/fd_file.h
+++ b/libartbase/base/unix_file/fd_file.h
@@ -89,13 +89,13 @@ class FdFile : public RandomAccessFile {
virtual ~FdFile();
// RandomAccessFile API.
- int Close() OVERRIDE WARN_UNUSED;
- int64_t Read(char* buf, int64_t byte_count, int64_t offset) const OVERRIDE WARN_UNUSED;
- int SetLength(int64_t new_length) OVERRIDE WARN_UNUSED;
- int64_t GetLength() const OVERRIDE;
- int64_t Write(const char* buf, int64_t byte_count, int64_t offset) OVERRIDE WARN_UNUSED;
+ int Close() override WARN_UNUSED;
+ int64_t Read(char* buf, int64_t byte_count, int64_t offset) const override WARN_UNUSED;
+ int SetLength(int64_t new_length) override WARN_UNUSED;
+ int64_t GetLength() const override;
+ int64_t Write(const char* buf, int64_t byte_count, int64_t offset) override WARN_UNUSED;
- int Flush() OVERRIDE WARN_UNUSED;
+ int Flush() override WARN_UNUSED;
// Short for SetLength(0); Flush(); Close();
// If the file was opened with a path name and unlink = true, also calls Unlink() on the path.
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index b5f946e5a2..174d22792a 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -68,31 +68,33 @@ bool ZipEntry::ExtractToFile(File& file, std::string* error_msg) {
return true;
}
-MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_filename,
- std::string* error_msg) {
+MemMap ZipEntry::ExtractToMemMap(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg) {
std::string name(entry_filename);
name += " extracted in memory from ";
name += zip_filename;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
- nullptr, GetUncompressedLength(),
- PROT_READ | PROT_WRITE, false, false,
- error_msg));
- if (map.get() == nullptr) {
+ MemMap map = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ GetUncompressedLength(),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ error_msg);
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
- return nullptr;
+ return MemMap::Invalid();
}
- const int32_t error = ExtractToMemory(handle_, zip_entry_,
- map->Begin(), map->Size());
+ const int32_t error = ExtractToMemory(handle_, zip_entry_, map.Begin(), map.Size());
if (error) {
*error_msg = std::string(ErrorCodeString(error));
- return nullptr;
+ return MemMap::Invalid();
}
- return map.release();
+ return map;
}
-MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
const int zip_fd = GetFileDescriptor(handle_);
const char* entry_filename = entry_name_.c_str();
@@ -109,7 +111,7 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
*error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because it is compressed.",
entry_filename,
zip_filename);
- return nullptr;
+ return MemMap::Invalid();
} else if (zip_entry_->uncompressed_length != zip_entry_->compressed_length) {
*error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because "
"entry has bad size (%u != %u).",
@@ -117,7 +119,7 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
zip_filename,
zip_entry_->uncompressed_length,
zip_entry_->compressed_length);
- return nullptr;
+ return MemMap::Invalid();
}
std::string name(entry_filename);
@@ -130,19 +132,17 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
LOG(INFO) << "zip_archive: " << "make mmap of " << name << " @ offset = " << offset;
}
- std::unique_ptr<MemMap> map(
- MemMap::MapFileAtAddress(nullptr, // Expected pointer address
- GetUncompressedLength(), // Byte count
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- zip_fd,
- offset,
- false, // Don't restrict allocation to lower4GB
- false, // Doesn't overlap existing map (reuse=false)
- name.c_str(),
- /*out*/error_msg));
-
- if (map == nullptr) {
+ MemMap map =
+ MemMap::MapFile(GetUncompressedLength(), // Byte count
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ zip_fd,
+ offset,
+ /* low_4gb */ false,
+ name.c_str(),
+ error_msg);
+
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
}
@@ -169,12 +169,12 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
LOG(INFO) << "---------------------------";
// Dump map contents.
- if (map != nullptr) {
+ if (map.IsValid()) {
tmp = "";
count = kMaxDumpChars;
- uint8_t* begin = map->Begin();
+ uint8_t* begin = map.Begin();
for (i = 0; i < count; ++i) {
tmp += StringPrintf("%3d ", (unsigned int)begin[i]);
}
@@ -185,19 +185,20 @@ MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* err
}
}
- return map.release();
+ return map;
}
-MemMap* ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
- const char* entry_filename,
- std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg) {
if (IsUncompressed() && GetFileDescriptor(handle_) >= 0) {
- MemMap* ret = MapDirectlyFromFile(zip_filename, error_msg);
- if (ret != nullptr) {
+ std::string local_error_msg;
+ MemMap ret = MapDirectlyFromFile(zip_filename, &local_error_msg);
+ if (ret.IsValid()) {
return ret;
}
+ // Fall back to extraction for the failure case.
}
- // Fall back to extraction for the failure case.
return ExtractToMemMap(zip_filename, entry_filename, error_msg);
}
diff --git a/libartbase/base/zip_archive.h b/libartbase/base/zip_archive.h
index 73495da96a..8fc8b54d2c 100644
--- a/libartbase/base/zip_archive.h
+++ b/libartbase/base/zip_archive.h
@@ -43,21 +43,22 @@ class ZipEntry {
bool ExtractToFile(File& file, std::string* error_msg);
// Extract this entry to anonymous memory (R/W).
// Returns null on failure and sets error_msg.
- MemMap* ExtractToMemMap(const char* zip_filename, const char* entry_filename,
- std::string* error_msg);
+ MemMap ExtractToMemMap(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg);
// Create a file-backed private (clean, R/W) memory mapping to this entry.
// 'zip_filename' is used for diagnostics only,
// the original file that the ZipArchive was open with is used
// for the mapping.
//
// Will only succeed if the entry is stored uncompressed.
- // Returns null on failure and sets error_msg.
- MemMap* MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
+ // Returns invalid MemMap on failure and sets error_msg.
+ MemMap MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
virtual ~ZipEntry();
- MemMap* MapDirectlyOrExtract(const char* zip_filename,
- const char* entry_filename,
- std::string* error_msg);
+ MemMap MapDirectlyOrExtract(const char* zip_filename,
+ const char* entry_filename,
+ std::string* error_msg);
uint32_t GetUncompressedLength();
uint32_t GetCrc32();
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index cc7d7aae34..eb7d3d3308 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -23,6 +23,7 @@
#include "base/file_magic.h"
#include "base/file_utils.h"
+#include "base/mem_map.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
@@ -38,41 +39,41 @@ namespace {
class MemMapContainer : public DexFileContainer {
public:
- explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
- virtual ~MemMapContainer() OVERRIDE { }
+ explicit MemMapContainer(MemMap&& mem_map) : mem_map_(std::move(mem_map)) { }
+ ~MemMapContainer() override { }
- int GetPermissions() OVERRIDE {
- if (mem_map_.get() == nullptr) {
+ int GetPermissions() override {
+ if (!mem_map_.IsValid()) {
return 0;
} else {
- return mem_map_->GetProtect();
+ return mem_map_.GetProtect();
}
}
- bool IsReadOnly() OVERRIDE {
+ bool IsReadOnly() override {
return GetPermissions() == PROT_READ;
}
- bool EnableWrite() OVERRIDE {
+ bool EnableWrite() override {
CHECK(IsReadOnly());
- if (mem_map_.get() == nullptr) {
+ if (!mem_map_.IsValid()) {
return false;
} else {
- return mem_map_->Protect(PROT_READ | PROT_WRITE);
+ return mem_map_.Protect(PROT_READ | PROT_WRITE);
}
}
- bool DisableWrite() OVERRIDE {
+ bool DisableWrite() override {
CHECK(!IsReadOnly());
- if (mem_map_.get() == nullptr) {
+ if (!mem_map_.IsValid()) {
return false;
} else {
- return mem_map_->Protect(PROT_READ);
+ return mem_map_.Protect(PROT_READ);
}
}
private:
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
};
@@ -180,22 +181,24 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
uint32_t location_checksum,
- std::unique_ptr<MemMap> map,
+ MemMap&& map,
bool verify,
bool verify_checksum,
std::string* error_msg) const {
ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
- CHECK(map.get() != nullptr);
+ CHECK(map.IsValid());
- if (map->Size() < sizeof(DexFile::Header)) {
+ size_t size = map.Size();
+ if (size < sizeof(DexFile::Header)) {
*error_msg = StringPrintf(
"DexFile: failed to open dex file '%s' that is too short to have a header",
location.c_str());
return nullptr;
}
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
+ uint8_t* begin = map.Begin();
+ std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+ size,
/*data_base*/ nullptr,
/*data_size*/ 0u,
location,
@@ -285,7 +288,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
std::string* error_msg) const {
ScopedTrace trace(std::string("Open dex file ") + std::string(location));
CHECK(!location.empty());
- std::unique_ptr<MemMap> map;
+ MemMap map;
{
File delayed_close(fd, /* check_usage */ false);
struct stat sbuf;
@@ -300,31 +303,33 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
return nullptr;
}
size_t length = sbuf.st_size;
- map.reset(MemMap::MapFile(length,
- PROT_READ,
- mmap_shared ? MAP_SHARED : MAP_PRIVATE,
- fd,
- 0,
- /*low_4gb*/false,
- location.c_str(),
- error_msg));
- if (map == nullptr) {
+ map = MemMap::MapFile(length,
+ PROT_READ,
+ mmap_shared ? MAP_SHARED : MAP_PRIVATE,
+ fd,
+ 0,
+ /*low_4gb*/false,
+ location.c_str(),
+ error_msg);
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
return nullptr;
}
}
- if (map->Size() < sizeof(DexFile::Header)) {
+ const uint8_t* begin = map.Begin();
+ size_t size = map.Size();
+ if (size < sizeof(DexFile::Header)) {
*error_msg = StringPrintf(
"DexFile: failed to open dex file '%s' that is too short to have a header",
location.c_str());
return nullptr;
}
- const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
+ const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(begin);
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
+ std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+ size,
/*data_base*/ nullptr,
/*data_size*/ 0u,
location,
@@ -366,7 +371,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
return nullptr;
}
- std::unique_ptr<MemMap> map;
+ MemMap map;
if (zip_entry->IsUncompressed()) {
if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
// Do not mmap unaligned ZIP entries because
@@ -376,8 +381,8 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
<< "Falling back to extracting file.";
} else {
// Map uncompressed files within zip as file-backed to avoid a dirty copy.
- map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
- if (map == nullptr) {
+ map = zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg);
+ if (!map.IsValid()) {
LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
<< "is your ZIP file corrupted? Falling back to extraction.";
// Try again with Extraction which still has a chance of recovery.
@@ -385,21 +390,23 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
}
}
- if (map == nullptr) {
+ if (!map.IsValid()) {
// Default path for compressed ZIP entries,
// and fallback for stored ZIP entries.
- map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
+ map = zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg);
}
- if (map == nullptr) {
+ if (!map.IsValid()) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
*error_code = DexFileLoaderErrorCode::kExtractToMemoryError;
return nullptr;
}
VerifyResult verify_result;
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
+ uint8_t* begin = map.Begin();
+ size_t size = map.Size();
+ std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+ size,
/*data_base*/ nullptr,
/*data_size*/ 0u,
location,
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index da2620f587..40d4673625 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -51,7 +51,7 @@ class ArtDexFileLoader : public DexFileLoader {
std::vector<uint32_t>* checksums,
std::string* error_msg,
int zip_fd = -1,
- bool* only_contains_uncompressed_dex = nullptr) const OVERRIDE;
+ bool* only_contains_uncompressed_dex = nullptr) const override;
// Opens .dex file, backed by existing memory
std::unique_ptr<const DexFile> Open(const uint8_t* base,
@@ -61,12 +61,12 @@ class ArtDexFileLoader : public DexFileLoader {
const OatDexFile* oat_dex_file,
bool verify,
bool verify_checksum,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
// Opens .dex file that has been memory-mapped by the caller.
std::unique_ptr<const DexFile> Open(const std::string& location,
uint32_t location_checkum,
- std::unique_ptr<MemMap> mem_map,
+ MemMap&& mem_map,
bool verify,
bool verify_checksum,
std::string* error_msg) const;
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index 3f311b7451..a7d03637b1 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -44,7 +44,7 @@ static void Copy(const std::string& src, const std::string& dst) {
}
class ArtDexFileLoaderTest : public CommonArtTest {
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonArtTest::SetUp();
// Open a jar file from the boot classpath for use in basic tests of dex accessors.
std::vector<std::string> lib_core_dex_file_names = GetLibCoreDexFileNames();
diff --git a/libdexfile/dex/compact_dex_file.h b/libdexfile/dex/compact_dex_file.h
index affc9a20b0..8eade6dba8 100644
--- a/libdexfile/dex/compact_dex_file.h
+++ b/libdexfile/dex/compact_dex_file.h
@@ -253,15 +253,15 @@ class CompactDexFile : public DexFile {
// Returns true if the byte string points to the magic value.
static bool IsMagicValid(const uint8_t* magic);
- virtual bool IsMagicValid() const OVERRIDE;
+ bool IsMagicValid() const override;
// Returns true if the byte string after the magic is the correct value.
static bool IsVersionValid(const uint8_t* magic);
- virtual bool IsVersionValid() const OVERRIDE;
+ bool IsVersionValid() const override;
// TODO This is completely a guess. We really need to do better. b/72402467
// We ask for 64 megabytes which should be big enough for any realistic dex file.
- virtual size_t GetDequickenedSize() const OVERRIDE {
+ size_t GetDequickenedSize() const override {
return 64 * MB;
}
@@ -269,9 +269,9 @@ class CompactDexFile : public DexFile {
return down_cast<const Header&>(DexFile::GetHeader());
}
- virtual bool SupportsDefaultMethods() const OVERRIDE;
+ bool SupportsDefaultMethods() const override;
- uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
return debug_info_offsets_.GetOffset(dex_method_index);
@@ -281,7 +281,7 @@ class CompactDexFile : public DexFile {
size_t base_size,
const uint8_t* data_begin,
size_t data_size);
- virtual uint32_t CalculateChecksum() const OVERRIDE;
+ uint32_t CalculateChecksum() const override;
private:
CompactDexFile(const uint8_t* base,
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 6d9ca4aafa..400c32b519 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -36,21 +36,21 @@ namespace {
class VectorContainer : public DexFileContainer {
public:
explicit VectorContainer(std::vector<uint8_t>&& vector) : vector_(std::move(vector)) { }
- virtual ~VectorContainer() OVERRIDE { }
+ ~VectorContainer() override { }
- int GetPermissions() OVERRIDE {
+ int GetPermissions() override {
return 0;
}
- bool IsReadOnly() OVERRIDE {
+ bool IsReadOnly() override {
return true;
}
- bool EnableWrite() OVERRIDE {
+ bool EnableWrite() override {
return false;
}
- bool DisableWrite() OVERRIDE {
+ bool DisableWrite() override {
return false;
}
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index 6807025e13..ad8a1842fc 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -708,12 +708,12 @@ class InstructionOperands {
// Class for accessing operands for instructions with a range format
// (e.g. 3rc and 4rcc).
-class RangeInstructionOperands FINAL : public InstructionOperands {
+class RangeInstructionOperands final : public InstructionOperands {
public:
RangeInstructionOperands(uint32_t first_operand, size_t num_operands)
: InstructionOperands(num_operands), first_operand_(first_operand) {}
~RangeInstructionOperands() {}
- uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+ uint32_t GetOperand(size_t operand_index) const override;
private:
const uint32_t first_operand_;
@@ -723,13 +723,13 @@ class RangeInstructionOperands FINAL : public InstructionOperands {
// Class for accessing operands for instructions with a variable
// number of arguments format (e.g. 35c and 45cc).
-class VarArgsInstructionOperands FINAL : public InstructionOperands {
+class VarArgsInstructionOperands final : public InstructionOperands {
public:
VarArgsInstructionOperands(const uint32_t (&operands)[Instruction::kMaxVarArgRegs],
size_t num_operands)
: InstructionOperands(num_operands), operands_(operands) {}
~VarArgsInstructionOperands() {}
- uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+ uint32_t GetOperand(size_t operand_index) const override;
private:
const uint32_t (&operands_)[Instruction::kMaxVarArgRegs];
@@ -739,12 +739,12 @@ class VarArgsInstructionOperands FINAL : public InstructionOperands {
// Class for accessing operands without the receiver by wrapping an
// existing InstructionOperands instance.
-class NoReceiverInstructionOperands FINAL : public InstructionOperands {
+class NoReceiverInstructionOperands final : public InstructionOperands {
public:
explicit NoReceiverInstructionOperands(const InstructionOperands* const inner)
: InstructionOperands(inner->GetNumberOfOperands() - 1), inner_(inner) {}
~NoReceiverInstructionOperands() {}
- uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+ uint32_t GetOperand(size_t operand_index) const override;
private:
const InstructionOperands* const inner_;
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index 999e5b99e9..fd7e78f19f 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -73,17 +73,17 @@ class StandardDexFile : public DexFile {
// Returns true if the byte string points to the magic value.
static bool IsMagicValid(const uint8_t* magic);
- virtual bool IsMagicValid() const OVERRIDE;
+ bool IsMagicValid() const override;
// Returns true if the byte string after the magic is the correct value.
static bool IsVersionValid(const uint8_t* magic);
- virtual bool IsVersionValid() const OVERRIDE;
+ bool IsVersionValid() const override;
- virtual bool SupportsDefaultMethods() const OVERRIDE;
+ bool SupportsDefaultMethods() const override;
- uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
- virtual size_t GetDequickenedSize() const OVERRIDE {
+ size_t GetDequickenedSize() const override {
return Size();
}
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 6f49adf718..c7653457aa 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -1183,7 +1183,7 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::OpenSource(
// (e.g. dex metadata files)
LOG(WARNING) << "Could not find entry " << kDexMetadataProfileEntry
<< " in the zip archive. Creating an empty profile.";
- source->reset(ProfileSource::Create(nullptr));
+ source->reset(ProfileSource::Create(MemMap::Invalid()));
return kProfileLoadSuccess;
}
if (zip_entry->GetUncompressedLength() == 0) {
@@ -1192,11 +1192,9 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::OpenSource(
}
// TODO(calin) pass along file names to assist with debugging.
- std::unique_ptr<MemMap> map(zip_entry->MapDirectlyOrExtract(kDexMetadataProfileEntry,
- "profile file",
- error));
+ MemMap map = zip_entry->MapDirectlyOrExtract(kDexMetadataProfileEntry, "profile file", error);
- if (map != nullptr) {
+ if (map.IsValid()) {
source->reset(ProfileSource::Create(std::move(map)));
return kProfileLoadSuccess;
} else {
@@ -1211,11 +1209,11 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ProfileSource:
const std::string& debug_stage,
std::string* error) {
if (IsMemMap()) {
- if (mem_map_cur_ + byte_count > mem_map_->Size()) {
+ if (mem_map_cur_ + byte_count > mem_map_.Size()) {
return kProfileLoadBadData;
}
for (size_t i = 0; i < byte_count; i++) {
- buffer[i] = *(mem_map_->Begin() + mem_map_cur_);
+ buffer[i] = *(mem_map_.Begin() + mem_map_cur_);
mem_map_cur_++;
}
} else {
@@ -1237,13 +1235,13 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ProfileSource:
bool ProfileCompilationInfo::ProfileSource::HasConsumedAllData() const {
return IsMemMap()
- ? (mem_map_ == nullptr || mem_map_cur_ == mem_map_->Size())
+ ? (!mem_map_.IsValid() || mem_map_cur_ == mem_map_.Size())
: (testEOF(fd_) == 0);
}
bool ProfileCompilationInfo::ProfileSource::HasEmptyContent() const {
if (IsMemMap()) {
- return mem_map_ == nullptr || mem_map_->Size() == 0;
+ return !mem_map_.IsValid() || mem_map_.Size() == 0;
} else {
struct stat stat_buffer;
if (fstat(fd_, &stat_buffer) != 0) {
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 3596f3e5a6..0dbf490cde 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -637,14 +637,14 @@ class ProfileCompilationInfo {
*/
static ProfileSource* Create(int32_t fd) {
DCHECK_GT(fd, -1);
- return new ProfileSource(fd, /*map*/ nullptr);
+ return new ProfileSource(fd, MemMap::Invalid());
}
/**
* Create a profile source backed by a memory map. The map can be null in
* which case it will the treated as an empty source.
*/
- static ProfileSource* Create(std::unique_ptr<MemMap>&& mem_map) {
+ static ProfileSource* Create(MemMap&& mem_map) {
return new ProfileSource(/*fd*/ -1, std::move(mem_map));
}
@@ -664,13 +664,13 @@ class ProfileCompilationInfo {
bool HasConsumedAllData() const;
private:
- ProfileSource(int32_t fd, std::unique_ptr<MemMap>&& mem_map)
+ ProfileSource(int32_t fd, MemMap&& mem_map)
: fd_(fd), mem_map_(std::move(mem_map)), mem_map_cur_(0) {}
bool IsMemMap() const { return fd_ == -1; }
int32_t fd_; // The fd is not owned by this class.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
size_t mem_map_cur_; // Current position in the map to read from.
};
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 42c3320ea5..417abaa435 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -35,7 +35,7 @@ static constexpr size_t kMaxMethodIds = 65535;
class ProfileCompilationInfoTest : public CommonArtTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonArtTest::SetUp();
allocator_.reset(new ArenaAllocator(&pool_));
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c04c50e027..91283d6977 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -129,7 +129,7 @@ const DexFile* OpenDexFile(const OatDexFile* oat_dex_file, std::string* error_ms
}
template <typename ElfTypes>
-class OatSymbolizer FINAL {
+class OatSymbolizer final {
public:
OatSymbolizer(const OatFile* oat_file, const std::string& output_name, bool no_bits) :
oat_file_(oat_file),
@@ -708,7 +708,7 @@ class OatDumper {
return nullptr;
}
- std::unique_ptr<MemMap> mmap(MemMap::MapFile(
+ MemMap mmap = MemMap::MapFile(
file->GetLength(),
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
@@ -716,13 +716,13 @@ class OatDumper {
/* start offset */ 0,
/* low_4gb */ false,
vdex_filename.c_str(),
- error_msg));
- if (mmap == nullptr) {
+ error_msg);
+ if (!mmap.IsValid()) {
*error_msg = "Failed to mmap file " + vdex_filename + ": " + *error_msg;
return nullptr;
}
- std::unique_ptr<VdexFile> vdex_file(new VdexFile(mmap.release()));
+ std::unique_ptr<VdexFile> vdex_file(new VdexFile(std::move(mmap)));
if (!vdex_file->IsValid()) {
*error_msg = "Vdex file is not valid";
return nullptr;
@@ -1825,11 +1825,11 @@ class ImageDumper {
oat_file = OatFile::Open(/* zip_fd */ -1,
oat_location,
oat_location,
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg);
}
if (oat_file == nullptr) {
@@ -1980,7 +1980,7 @@ class ImageDumper {
public:
explicit DumpArtMethodVisitor(ImageDumper* image_dumper) : image_dumper_(image_dumper) {}
- virtual void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& indent_os = image_dumper_->vios_.Stream();
indent_os << method << " " << " ArtMethod: " << ArtMethod::PrettyMethod(method) << "\n";
image_dumper_->DumpMethod(method, indent_os);
@@ -2723,11 +2723,11 @@ static int DumpImages(Runtime* runtime, OatDumperOptions* options, std::ostream*
std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
options->app_oat_,
options->app_oat_,
- nullptr,
- nullptr,
- false,
- /*low_4gb*/true,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ true,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file " << options->app_oat_ << " with error " << error_msg;
@@ -2847,11 +2847,11 @@ static int DumpOat(Runtime* runtime,
std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
oat_filename,
oat_filename,
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_filename,
+ /* reservation */ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2873,11 +2873,11 @@ static int SymbolizeOat(const char* oat_filename,
std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
oat_filename,
oat_filename,
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_filename,
+ /* reservation */ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2921,11 +2921,11 @@ class IMTDumper {
std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
oat_filename,
oat_filename,
- nullptr,
- nullptr,
- false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
/*low_4gb*/false,
dex_filename,
+ /* reservation */ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -3350,8 +3350,7 @@ struct OatdumpArgs : public CmdlineArgs {
protected:
using Base = CmdlineArgs;
- virtual ParseStatus ParseCustom(const StringPiece& option,
- std::string* error_msg) OVERRIDE {
+ ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) override {
{
ParseStatus base_parse = Base::ParseCustom(option, error_msg);
if (base_parse != kParseUnknownArgument) {
@@ -3408,7 +3407,7 @@ struct OatdumpArgs : public CmdlineArgs {
return kParseOk;
}
- virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+ ParseStatus ParseChecks(std::string* error_msg) override {
// Infer boot image location from the image location if possible.
if (boot_image_location_ == nullptr) {
boot_image_location_ = image_location_;
@@ -3536,7 +3535,7 @@ struct OatdumpArgs : public CmdlineArgs {
};
struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
- virtual bool NeedsRuntime() OVERRIDE {
+ bool NeedsRuntime() override {
CHECK(args_ != nullptr);
// If we are only doing the oat file, disable absolute_addresses. Keep them for image dumping.
@@ -3563,7 +3562,7 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
!args_->symbolize_;
}
- virtual bool ExecuteWithoutRuntime() OVERRIDE {
+ bool ExecuteWithoutRuntime() override {
CHECK(args_ != nullptr);
CHECK(args_->oat_filename_ != nullptr);
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index 6e991dee3d..d9f34a50db 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -58,13 +58,13 @@ struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback {
explicit JvmtiMethodInspectionCallback(DeoptManager* manager) : manager_(manager) {}
bool IsMethodBeingInspected(art::ArtMethod* method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+ override REQUIRES_SHARED(art::Locks::mutator_lock_);
bool IsMethodSafeToJit(art::ArtMethod* method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+ override REQUIRES_SHARED(art::Locks::mutator_lock_);
bool MethodNeedsDebugVersion(art::ArtMethod* method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+ override REQUIRES_SHARED(art::Locks::mutator_lock_);
private:
DeoptManager* manager_;
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 6a8ba48109..e98517fdff 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -50,7 +50,7 @@ namespace impl {
// pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI
// specification we allow exceptions originating from events to overwrite the current exception,
// including exceptions originating from earlier events.
-class ScopedEventDispatchEnvironment FINAL : public art::ValueObject {
+class ScopedEventDispatchEnvironment final : public art::ValueObject {
public:
ScopedEventDispatchEnvironment() : env_(nullptr), throw_(nullptr, nullptr) {
DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative);
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index f71a5dc72d..43d0b10914 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -265,7 +265,7 @@ class JvmtiDdmChunkListener : public art::DdmCallback {
explicit JvmtiDdmChunkListener(EventHandler* handler) : handler_(handler) {}
void DdmPublishChunk(uint32_t type, const art::ArrayRef<const uint8_t>& data)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kDdmPublishChunk)) {
art::Thread* self = art::Thread::Current();
handler_->DispatchEvent<ArtJvmtiEvent::kDdmPublishChunk>(
@@ -288,7 +288,7 @@ class JvmtiAllocationListener : public art::gc::AllocationListener {
explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK_EQ(self, art::Thread::Current());
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kVmObjectAlloc)) {
@@ -337,7 +337,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
explicit JvmtiMonitorListener(EventHandler* handler) : handler_(handler) {}
void MonitorContendedLocking(art::Monitor* m)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEnter)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -351,7 +351,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
}
void MonitorContendedLocked(art::Monitor* m)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEntered)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -365,7 +365,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
}
void ObjectWaitStart(art::Handle<art::mirror::Object> obj, int64_t timeout)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -392,7 +392,7 @@ class JvmtiMonitorListener : public art::MonitorCallback {
//
// See b/65558434 for more discussion.
void MonitorWaitFinished(art::Monitor* m, bool timeout)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -429,11 +429,11 @@ class JvmtiGcPauseListener : public art::gc::GcPauseListener {
start_enabled_(false),
finish_enabled_(false) {}
- void StartPause() OVERRIDE {
+ void StartPause() override {
handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(art::Thread::Current());
}
- void EndPause() OVERRIDE {
+ void EndPause() override {
handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(art::Thread::Current());
}
@@ -475,7 +475,7 @@ static void SetupGcPauseTracking(JvmtiGcPauseListener* listener, ArtJvmtiEvent e
}
}
-class JvmtiMethodTraceListener FINAL : public art::instrumentation::InstrumentationListener {
+class JvmtiMethodTraceListener final : public art::instrumentation::InstrumentationListener {
public:
explicit JvmtiMethodTraceListener(EventHandler* handler) : event_handler_(handler) {}
@@ -484,7 +484,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodEntry)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -501,7 +501,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> return_value)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
DCHECK_EQ(
@@ -528,7 +528,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const art::JValue& return_value)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
DCHECK_NE(
@@ -556,7 +556,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (!method->IsRuntimeMethod() &&
event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
jvalue val;
@@ -586,7 +586,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method,
uint32_t new_dex_pc)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
DCHECK(!method->IsRuntimeMethod());
// Default methods might be copied to multiple classes. We need to get the canonical version of
// this method so that we can check for breakpoints correctly.
@@ -613,7 +613,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method,
uint32_t dex_pc,
art::ArtField* field)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldAccess)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
// DCHECK(!self->IsExceptionPending());
@@ -638,7 +638,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
uint32_t dex_pc,
art::ArtField* field,
art::Handle<art::mirror::Object> new_val)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
// DCHECK(!self->IsExceptionPending());
@@ -670,7 +670,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
uint32_t dex_pc,
art::ArtField* field,
const art::JValue& field_value)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
art::JNIEnvExt* jnienv = self->GetJniEnv();
DCHECK(!self->IsExceptionPending());
@@ -700,7 +700,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
}
void WatchedFramePop(art::Thread* self, const art::ShadowFrame& frame)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
art::JNIEnvExt* jnienv = self->GetJniEnv();
jboolean is_exception_pending = self->IsExceptionPending();
RunEventCallback<ArtJvmtiEvent::kFramePop>(
@@ -720,7 +720,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
// Finds the location where this exception will most likely be caught. We ignore intervening
// native frames (which could catch the exception) and return the closest java frame with a
// compatible catch statement.
- class CatchLocationFinder FINAL : public art::StackVisitor {
+ class CatchLocationFinder final : public art::StackVisitor {
public:
CatchLocationFinder(art::Thread* target,
art::Handle<art::mirror::Class> exception_class,
@@ -733,7 +733,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
catch_method_ptr_(out_catch_method),
catch_dex_pc_ptr_(out_catch_pc) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -782,7 +782,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
// Call-back when an exception is thrown.
void ExceptionThrown(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
DCHECK(self->IsExceptionThrownByCurrentMethod(exception_object.Get()));
// The instrumentation events get rid of this for us.
DCHECK(!self->IsExceptionPending());
@@ -812,7 +812,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
// Call-back when an exception is handled.
void ExceptionHandled(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
// Since the exception has already been handled there shouldn't be one pending.
DCHECK(!self->IsExceptionPending());
if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kExceptionCatch)) {
@@ -839,7 +839,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
return;
}
@@ -849,7 +849,7 @@ class JvmtiMethodTraceListener FINAL : public art::instrumentation::Instrumentat
art::ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtMethod* callee ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(art::Locks::mutator_lock_) override {
return;
}
@@ -959,7 +959,7 @@ void EventHandler::HandleLocalAccessCapabilityAdded() {
: runtime_(runtime) {}
bool operator()(art::ObjPtr<art::mirror::Class> klass)
- OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ override REQUIRES(art::Locks::mutator_lock_) {
if (!klass->IsLoaded()) {
// Skip classes that aren't loaded since they might not have fully allocated and initialized
// their methods. Furthemore since the jvmti-plugin must have been loaded by this point
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index a660fb56c4..2ca87fd482 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -79,11 +79,8 @@ static void DoDexUnquicken(const art::DexFile& new_dex_file,
const art::VdexFile* vdex = GetVdex(original_dex_file);
if (vdex != nullptr) {
vdex->UnquickenDexFile(new_dex_file, original_dex_file, /* decompile_return_instruction */true);
- } else {
- // The dex file isn't quickened since it is being used directly. We might still have hiddenapis
- // so we need to get rid of those.
- UnhideApis(new_dex_file);
}
+ UnhideApis(new_dex_file);
}
static void DCheckVerifyDexFile(const art::DexFile& dex) {
diff --git a/openjdkjvmti/object_tagging.h b/openjdkjvmti/object_tagging.h
index 1b8366a501..4181302f3a 100644
--- a/openjdkjvmti/object_tagging.h
+++ b/openjdkjvmti/object_tagging.h
@@ -45,15 +45,15 @@ namespace openjdkjvmti {
struct ArtJvmTiEnv;
class EventHandler;
-class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
+class ObjectTagTable final : public JvmtiWeakTable<jlong> {
public:
ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
: event_handler_(event_handler), jvmti_env_(env) {}
- bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
+ bool Set(art::mirror::Object* obj, jlong tag) override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
- bool SetLocked(art::mirror::Object* obj, jlong tag) OVERRIDE
+ bool SetLocked(art::mirror::Object* obj, jlong tag) override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
@@ -73,8 +73,8 @@ class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
}
protected:
- bool DoesHandleNullOnSweep() OVERRIDE;
- void HandleNullSweep(jlong tag) OVERRIDE;
+ bool DoesHandleNullOnSweep() override;
+ void HandleNullSweep(jlong tag) override;
private:
EventHandler* event_handler_;
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 9bea18a763..f1d6fb0b5d 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -91,10 +91,8 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
// Make the mmap
std::string error_msg;
art::ArrayRef<const unsigned char> final_data(final_dex_data, final_len);
- std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
- final_data,
- &error_msg));
- if (map.get() == nullptr) {
+ art::MemMap map = Redefiner::MoveDataToMemMap(orig_location, final_data, &error_msg);
+ if (!map.IsValid()) {
LOG(WARNING) << "Unable to allocate mmap for redefined dex file! Error was: " << error_msg;
self->ThrowOutOfMemoryError(StringPrintf(
"Unable to allocate dex file for transformation of %s", descriptor).c_str());
@@ -102,15 +100,15 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
}
// Make a dex-file
- if (map->Size() < sizeof(art::DexFile::Header)) {
+ if (map.Size() < sizeof(art::DexFile::Header)) {
LOG(WARNING) << "Could not read dex file header because dex_data was too short";
art::ThrowClassFormatError(nullptr,
"Unable to read transformed dex file of %s",
descriptor);
return nullptr;
}
- uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
- std::string map_name = map->GetName();
+ uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map.Begin())->checksum_;
+ std::string map_name = map.GetName();
const art::ArtDexFileLoader dex_file_loader;
std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
checksum,
@@ -165,7 +163,7 @@ struct ClassCallback : public art::ClassLoadCallback {
const art::DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/art::DexFile const** final_dex_file,
/*out*/art::DexFile::ClassDef const** final_class_def)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
bool is_enabled =
event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookRetransformable) ||
event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookNonRetransformable);
@@ -383,7 +381,7 @@ struct ClassCallback : public art::ClassLoadCallback {
void VisitRoots(art::mirror::Object*** roots,
size_t count,
const art::RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE {
+ override {
for (size_t i = 0; i != count; ++i) {
if (*roots[i] == input_) {
*roots[i] = output_;
@@ -394,7 +392,7 @@ struct ClassCallback : public art::ClassLoadCallback {
void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
size_t count,
const art::RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
for (size_t i = 0; i != count; ++i) {
if (roots[i]->AsMirrorPtr() == input_) {
roots[i]->Assign(output_);
@@ -420,7 +418,7 @@ struct ClassCallback : public art::ClassLoadCallback {
WeakGlobalUpdate(art::mirror::Class* root_input, art::mirror::Class* root_output)
: input_(root_input), output_(root_output) {}
- art::mirror::Object* IsMarked(art::mirror::Object* obj) OVERRIDE {
+ art::mirror::Object* IsMarked(art::mirror::Object* obj) override {
if (obj == input_) {
return output_;
}
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index dce2733e7e..895e73450e 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -49,26 +49,27 @@ namespace openjdkjvmti {
void ArtClassDefinition::InitializeMemory() const {
DCHECK(art::MemMap::kCanReplaceMapping);
VLOG(signals) << "Initializing de-quickened memory for dex file of " << name_;
- CHECK(dex_data_mmap_ != nullptr);
- CHECK(temp_mmap_ != nullptr);
- CHECK_EQ(dex_data_mmap_->GetProtect(), PROT_NONE);
- CHECK_EQ(temp_mmap_->GetProtect(), PROT_READ | PROT_WRITE);
+ CHECK(dex_data_mmap_.IsValid());
+ CHECK(temp_mmap_.IsValid());
+ CHECK_EQ(dex_data_mmap_.GetProtect(), PROT_NONE);
+ CHECK_EQ(temp_mmap_.GetProtect(), PROT_READ | PROT_WRITE);
std::string desc = std::string("L") + name_ + ";";
std::unique_ptr<FixedUpDexFile>
fixed_dex_file(FixedUpDexFile::Create(*initial_dex_file_unquickened_, desc.c_str()));
CHECK(fixed_dex_file.get() != nullptr);
- CHECK_LE(fixed_dex_file->Size(), temp_mmap_->Size());
- CHECK_EQ(temp_mmap_->Size(), dex_data_mmap_->Size());
+ CHECK_LE(fixed_dex_file->Size(), temp_mmap_.Size());
+ CHECK_EQ(temp_mmap_.Size(), dex_data_mmap_.Size());
// Copy the data to the temp mmap.
- memcpy(temp_mmap_->Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+ memcpy(temp_mmap_.Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
// Move the mmap atomically.
- art::MemMap* source = temp_mmap_.release();
+ art::MemMap source;
+ source.swap(temp_mmap_);
std::string error;
- CHECK(dex_data_mmap_->ReplaceWith(&source, &error)) << "Failed to replace mmap for "
- << name_ << " because " << error;
- CHECK(dex_data_mmap_->Protect(PROT_READ));
+ CHECK(dex_data_mmap_.ReplaceWith(&source, &error)) << "Failed to replace mmap for "
+ << name_ << " because " << error;
+ CHECK(dex_data_mmap_.Protect(PROT_READ));
}
bool ArtClassDefinition::IsModified() const {
@@ -85,13 +86,13 @@ bool ArtClassDefinition::IsModified() const {
}
// The dex_data_ was never touched by the agents.
- if (dex_data_mmap_ != nullptr && dex_data_mmap_->GetProtect() == PROT_NONE) {
- if (current_dex_file_.data() == dex_data_mmap_->Begin()) {
+ if (dex_data_mmap_.IsValid() && dex_data_mmap_.GetProtect() == PROT_NONE) {
+ if (current_dex_file_.data() == dex_data_mmap_.Begin()) {
// the dex_data_ looks like it changed (not equal to current_dex_file_) but we never
// initialized the dex_data_mmap_. This means the new_dex_data was filled in without looking
// at the initial dex_data_.
return true;
- } else if (dex_data_.data() == dex_data_mmap_->Begin()) {
+ } else if (dex_data_.data() == dex_data_mmap_.Begin()) {
// The dex file used to have modifications but they were not added again.
return true;
} else {
@@ -244,26 +245,24 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
std::string mmap_name("anon-mmap-for-redefine: ");
mmap_name += name_;
std::string error;
- dex_data_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
- nullptr,
- dequick_size,
- PROT_NONE,
- /*low_4gb*/ false,
- /*reuse*/ false,
- &error));
- mmap_name += "-TEMP";
- temp_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
- nullptr,
+ dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
+ /* addr */ nullptr,
dequick_size,
- PROT_READ | PROT_WRITE,
+ PROT_NONE,
/*low_4gb*/ false,
- /*reuse*/ false,
- &error));
- if (UNLIKELY(dex_data_mmap_ != nullptr && temp_mmap_ != nullptr)) {
+ &error);
+ mmap_name += "-TEMP";
+ temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
+ /* addr */ nullptr,
+ dequick_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ &error);
+ if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
// Need to save the initial dexfile so we don't need to search for it in the fault-handler.
initial_dex_file_unquickened_ = quick_dex;
- dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
- dex_data_mmap_->Size());
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_.Begin(),
+ dex_data_mmap_.Size());
if (from_class_ext_) {
// We got initial from class_ext so the current one must have undergone redefinition so no
// cdex or quickening stuff.
@@ -275,14 +274,14 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
// This class hasn't been redefined before. The dequickened current data is the same as the
// dex_data_mmap_ when it's filled it. We don't need to copy anything because the mmap will
// not be cleared until after everything is done.
- current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_.Begin(),
dequick_size);
}
return;
}
}
- dex_data_mmap_.reset(nullptr);
- temp_mmap_.reset(nullptr);
+ dex_data_mmap_.Reset();
+ temp_mmap_.Reset();
// Failed to mmap a large enough area (or on-demand dequickening was disabled). This is
// unfortunate. Since currently the size is just a guess though we might as well try to do it
// manually.
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index f888a7474f..224e664459 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -56,8 +56,8 @@ class ArtClassDefinition {
loader_(nullptr),
name_(),
protection_domain_(nullptr),
- dex_data_mmap_(nullptr),
- temp_mmap_(nullptr),
+ dex_data_mmap_(),
+ temp_mmap_(),
dex_data_memory_(),
initial_dex_file_unquickened_(nullptr),
dex_data_(),
@@ -100,9 +100,9 @@ class ArtClassDefinition {
}
bool ContainsAddress(uintptr_t ptr) const {
- return dex_data_mmap_ != nullptr &&
- reinterpret_cast<uintptr_t>(dex_data_mmap_->Begin()) <= ptr &&
- reinterpret_cast<uintptr_t>(dex_data_mmap_->End()) > ptr;
+ return dex_data_mmap_.IsValid() &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_.Begin()) <= ptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_.End()) > ptr;
}
bool IsModified() const REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -128,9 +128,9 @@ class ArtClassDefinition {
bool IsLazyDefinition() const {
DCHECK(IsInitialized());
- return dex_data_mmap_ != nullptr &&
- dex_data_.data() == dex_data_mmap_->Begin() &&
- dex_data_mmap_->GetProtect() == PROT_NONE;
+ return dex_data_mmap_.IsValid() &&
+ dex_data_.data() == dex_data_mmap_.Begin() &&
+ dex_data_mmap_.GetProtect() == PROT_NONE;
}
jobject GetProtectionDomain() const {
@@ -159,9 +159,9 @@ class ArtClassDefinition {
// Mmap that will be filled with the original-dex-file lazily if it needs to be de-quickened or
// de-compact-dex'd
- mutable std::unique_ptr<art::MemMap> dex_data_mmap_;
+ mutable art::MemMap dex_data_mmap_;
// This is a temporary mmap we will use to be able to fill the dex file data atomically.
- mutable std::unique_ptr<art::MemMap> temp_mmap_;
+ mutable art::MemMap temp_mmap_;
// A unique_ptr to the current dex_data if it needs to be cleaned up.
std::vector<unsigned char> dex_data_memory_;
diff --git a/openjdkjvmti/ti_dump.cc b/openjdkjvmti/ti_dump.cc
index 253580e0e1..c9abb71e4c 100644
--- a/openjdkjvmti/ti_dump.cc
+++ b/openjdkjvmti/ti_dump.cc
@@ -44,7 +44,7 @@
namespace openjdkjvmti {
struct DumpCallback : public art::RuntimeSigQuitCallback {
- void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void SigQuit() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Thread* thread = art::Thread::Current();
art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(art::Thread::Current());
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index d1583e5a5a..6c79a602c3 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -824,7 +824,7 @@ jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env,
user_data);
}
-class FollowReferencesHelper FINAL {
+class FollowReferencesHelper final {
public:
FollowReferencesHelper(HeapUtil* h,
jvmtiEnv* jvmti_env,
@@ -892,7 +892,7 @@ class FollowReferencesHelper FINAL {
}
private:
- class CollectAndReportRootsVisitor FINAL : public art::RootVisitor {
+ class CollectAndReportRootsVisitor final : public art::RootVisitor {
public:
CollectAndReportRootsVisitor(FollowReferencesHelper* helper,
ObjectTagTable* tag_table,
@@ -905,7 +905,7 @@ class FollowReferencesHelper FINAL {
stop_reports_(false) {}
void VisitRoots(art::mirror::Object*** roots, size_t count, const art::RootInfo& info)
- OVERRIDE
+ override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
for (size_t i = 0; i != count; ++i) {
@@ -916,7 +916,7 @@ class FollowReferencesHelper FINAL {
void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
size_t count,
const art::RootInfo& info)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_)
+ override REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
for (size_t i = 0; i != count; ++i) {
AddRoot(roots[i]->AsMirrorPtr(), info);
@@ -1411,7 +1411,7 @@ jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
bool operator()(art::ObjPtr<art::mirror::Class> klass)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (klass->IsLoaded() || klass->IsErroneous()) {
classes_.push_back(self_->GetJniEnv()->AddLocalReference<jclass>(klass));
}
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 87d832caec..1588df4086 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -66,7 +66,7 @@ struct TiMethodCallback : public art::MethodCallback {
void RegisterNativeMethod(art::ArtMethod* method,
const void* cur_method,
/*out*/void** new_method)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kNativeMethodBind)) {
art::Thread* thread = art::Thread::Current();
art::JNIEnvExt* jnienv = thread->GetJniEnv();
@@ -550,7 +550,7 @@ class CommonLocalVariableClosure : public art::Closure {
CommonLocalVariableClosure(jint depth, jint slot)
: result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
art::ScopedAssertNoThreadSuspension sants("CommonLocalVariableClosure::Run");
std::unique_ptr<art::Context> context(art::Context::Create());
@@ -702,7 +702,7 @@ class GetLocalVariableClosure : public CommonLocalVariableClosure {
jvmtiError GetTypeError(art::ArtMethod* method ATTRIBUTE_UNUSED,
art::Primitive::Type slot_type,
const std::string& descriptor ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (slot_type) {
case art::Primitive::kPrimByte:
case art::Primitive::kPrimChar:
@@ -722,7 +722,7 @@ class GetLocalVariableClosure : public CommonLocalVariableClosure {
}
jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (type_) {
case art::Primitive::kPrimNot: {
uint32_t ptr_val;
@@ -816,7 +816,7 @@ class SetLocalVariableClosure : public CommonLocalVariableClosure {
jvmtiError GetTypeError(art::ArtMethod* method,
art::Primitive::Type slot_type,
const std::string& descriptor)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (slot_type) {
case art::Primitive::kPrimNot: {
if (type_ != art::Primitive::kPrimNot) {
@@ -852,7 +852,7 @@ class SetLocalVariableClosure : public CommonLocalVariableClosure {
}
jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
switch (type_) {
case art::Primitive::kPrimNot: {
uint32_t ptr_val;
@@ -941,7 +941,7 @@ class GetLocalInstanceClosure : public art::Closure {
depth_(depth),
val_(nullptr) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
art::ScopedAssertNoThreadSuspension sants("GetLocalInstanceClosure::Run");
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
std::unique_ptr<art::Context> context(art::Context::Create());
diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc
index 7157974c13..4fa97f10aa 100644
--- a/openjdkjvmti/ti_phase.cc
+++ b/openjdkjvmti/ti_phase.cc
@@ -56,7 +56,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
return soa.AddLocalReference<jthread>(soa.Self()->GetPeer());
}
- void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) override {
art::Thread* self = art::Thread::Current();
switch (phase) {
case RuntimePhase::kInitialAgents:
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index dd0428dfcf..2ec2f04e73 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -158,7 +158,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
obsoleted_methods_(obsoleted_methods),
obsolete_maps_(obsolete_maps) { }
- ~ObsoleteMethodStackVisitor() OVERRIDE {}
+ ~ObsoleteMethodStackVisitor() override {}
public:
// Returns true if we successfully installed obsolete methods on this thread, filling
@@ -177,7 +177,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
visitor.WalkStack();
}
- bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES(art::Locks::mutator_lock_) {
art::ScopedAssertNoThreadSuspension snts("Fixing up the stack for obsolete methods.");
art::ArtMethod* old_method = GetMethod();
if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
@@ -300,24 +300,22 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
}
// Moves dex data to an anonymous, read-only mmap'd region.
-std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
- art::ArrayRef<const unsigned char> data,
- std::string* error_msg) {
- std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
+art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location,
+ art::ArrayRef<const unsigned char> data,
+ std::string* error_msg) {
+ art::MemMap map = art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
- nullptr,
+ /* addr */ nullptr,
data.size(),
PROT_READ|PROT_WRITE,
- /*low_4gb*/false,
- /*reuse*/false,
- error_msg));
- if (map == nullptr) {
- return map;
- }
- memcpy(map->Begin(), data.data(), data.size());
- // Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
- // programs from corrupting it.
- map->Protect(PROT_READ);
+ /*low_4gb*/ false,
+ error_msg);
+ if (LIKELY(map.IsValid())) {
+ memcpy(map.Begin(), data.data(), data.size());
+ // Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
+ // programs from corrupting it.
+ map.Protect(PROT_READ);
+ }
return map;
}
@@ -429,23 +427,22 @@ jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition
}
JvmtiUniquePtr<char> generic_unique_ptr(MakeJvmtiUniquePtr(env, generic_ptr_unused));
JvmtiUniquePtr<char> signature_unique_ptr(MakeJvmtiUniquePtr(env, signature_ptr));
- std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_dex_location,
- def.GetDexData(),
- error_msg_));
+ art::MemMap map = MoveDataToMemMap(original_dex_location, def.GetDexData(), error_msg_);
std::ostringstream os;
- if (map.get() == nullptr) {
+ if (!map.IsValid()) {
os << "Failed to create anonymous mmap for modified dex file of class " << def.GetName()
<< "in dex file " << original_dex_location << " because: " << *error_msg_;
*error_msg_ = os.str();
return ERR(OUT_OF_MEMORY);
}
- if (map->Size() < sizeof(art::DexFile::Header)) {
+ if (map.Size() < sizeof(art::DexFile::Header)) {
*error_msg_ = "Could not read dex file header because dex_data was too short";
return ERR(INVALID_CLASS_FORMAT);
}
- uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
+ std::string name = map.GetName();
+ uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map.Begin())->checksum_;
const art::ArtDexFileLoader dex_file_loader;
- std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map->GetName(),
+ std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(name,
checksum,
std::move(map),
/*verify*/true,
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index 6d8f6bf0db..f4a4280aac 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -78,9 +78,9 @@ class Redefiner {
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
- static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
- art::ArrayRef<const unsigned char> data,
- std::string* error_msg);
+ static art::MemMap MoveDataToMemMap(const std::string& original_location,
+ art::ArrayRef<const unsigned char> data,
+ std::string* error_msg);
// Helper for checking if redefinition/retransformation is allowed.
static jvmtiError GetClassRedefinitionError(jclass klass, /*out*/std::string* error_msg)
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index bcbab14cdd..1189b1dec5 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -186,7 +186,7 @@ static void Update() REQUIRES_SHARED(art::Locks::mutator_lock_) {
}
struct SearchCallback : public art::RuntimePhaseCallback {
- void NextRuntimePhase(RuntimePhase phase) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void NextRuntimePhase(RuntimePhase phase) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (phase == RuntimePhase::kStart) {
// It's time to update the system properties.
Update();
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 318d98d877..b6969afff1 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -128,7 +128,7 @@ struct GetStackTraceVectorClosure : public art::Closure {
start_result(0),
stop_result(0) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
auto frames_fn = [&](jvmtiFrameInfo info) {
frames.push_back(info);
};
@@ -195,7 +195,7 @@ struct GetStackTraceDirectClosure : public art::Closure {
DCHECK_GE(start_input, 0u);
}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
auto frames_fn = [&](jvmtiFrameInfo info) {
frame_buffer[index] = info;
++index;
@@ -287,7 +287,7 @@ struct GetAllStackTracesVectorClosure : public art::Closure {
GetAllStackTracesVectorClosure(size_t stop, Data* data_)
: barrier(0), stop_input(stop), data(data_) {}
- void Run(art::Thread* thread) OVERRIDE
+ void Run(art::Thread* thread) override
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!data->mutex) {
art::Thread* self = art::Thread::Current();
@@ -678,7 +678,7 @@ struct GetFrameCountClosure : public art::Closure {
public:
GetFrameCountClosure() : count(0) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
GetFrameCountVisitor visitor(self);
visitor.WalkStack(false);
@@ -759,7 +759,7 @@ struct GetLocationClosure : public art::Closure {
public:
explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
- void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
GetLocationVisitor visitor(self, n);
visitor.WalkStack(false);
@@ -842,7 +842,7 @@ struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor
delete context_;
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
if (!GetMethod()->IsRuntimeMethod()) {
art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
@@ -867,7 +867,7 @@ struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor
}
void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
for (const art::Handle<art::mirror::Object>& m : monitors) {
if (m.Get() == obj) {
return;
@@ -889,7 +889,7 @@ struct MonitorInfoClosure : public art::Closure {
explicit MonitorInfoClosure(Fn handle_results)
: err_(OK), handle_results_(handle_results) {}
- void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
// Find the monitors on the stack.
MonitorVisitor visitor(target);
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 949b566860..e53309445d 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -82,7 +82,7 @@ struct ThreadCallback : public art::ThreadLifecycleCallback {
thread.get());
}
- void ThreadStart(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void ThreadStart(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (!started) {
// Runtime isn't started. We only expect at most the signal handler or JIT threads to be
// started here.
@@ -101,7 +101,7 @@ struct ThreadCallback : public art::ThreadLifecycleCallback {
Post<ArtJvmtiEvent::kThreadStart>(self);
}
- void ThreadDeath(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void ThreadDeath(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Post<ArtJvmtiEvent::kThreadEnd>(self);
}
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index 8797553b07..d87ca56b85 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -68,7 +68,7 @@
namespace openjdkjvmti {
// A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
-class TransformationFaultHandler FINAL : public art::FaultHandler {
+class TransformationFaultHandler final : public art::FaultHandler {
public:
explicit TransformationFaultHandler(art::FaultManager* manager)
: art::FaultHandler(manager),
@@ -84,7 +84,7 @@ class TransformationFaultHandler FINAL : public art::FaultHandler {
uninitialized_class_definitions_.clear();
}
- bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
DCHECK_EQ(sig, SIGSEGV);
art::Thread* self = art::Thread::Current();
if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index a15f7b88d8..02fc92533f 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -538,7 +538,7 @@ bool PatchOat::Patch(const std::string& image_location,
ScopedObjectAccess soa(Thread::Current());
std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
- std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>> space_to_memmap_map;
+ std::map<gc::space::ImageSpace*, MemMap> space_to_memmap_map;
for (size_t i = 0; i < spaces.size(); ++i) {
t.NewTiming("Image Patching setup");
@@ -567,15 +567,15 @@ bool PatchOat::Patch(const std::string& image_location,
// Create the map where we will write the image patches to.
std::string error_msg;
- std::unique_ptr<MemMap> image(MemMap::MapFile(image_len,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg));
- if (image.get() == nullptr) {
+ MemMap image = MemMap::MapFile(image_len,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ input_image->Fd(),
+ 0,
+ /*low_4gb*/false,
+ input_image->GetPath().c_str(),
+ &error_msg);
+ if (!image.IsValid()) {
LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
return false;
}
@@ -583,7 +583,7 @@ bool PatchOat::Patch(const std::string& image_location,
space_to_memmap_map.emplace(space, std::move(image));
PatchOat p = PatchOat(isa,
- space_to_memmap_map[space].get(),
+ &space_to_memmap_map[space],
space->GetLiveBitmap(),
space->GetMemMap(),
delta,
@@ -636,22 +636,22 @@ bool PatchOat::Patch(const std::string& image_location,
LOG(ERROR) << "Error while getting input image size";
return false;
}
- std::unique_ptr<MemMap> original(MemMap::MapFile(input_image_size,
- PROT_READ,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg));
- if (original.get() == nullptr) {
+ MemMap original = MemMap::MapFile(input_image_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ input_image->Fd(),
+ 0,
+ /*low_4gb*/false,
+ input_image->GetPath().c_str(),
+ &error_msg);
+ if (!original.IsValid()) {
LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
return false;
}
const MemMap* relocated = p.image_;
- if (!WriteRelFile(*original, *relocated, image_relocation_filename, &error_msg)) {
+ if (!WriteRelFile(original, *relocated, image_relocation_filename, &error_msg)) {
LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
<< ": " << error_msg;
return false;
@@ -815,7 +815,7 @@ class PatchOat::PatchOatArtFieldVisitor : public ArtFieldVisitor {
public:
explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtField* field) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
dest->SetDeclaringClass(
patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Ptr()));
@@ -834,7 +834,7 @@ class PatchOat::PatchOatArtMethodVisitor : public ArtMethodVisitor {
public:
explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
patch_oat_->FixupMethod(method, dest);
}
@@ -877,7 +877,7 @@ class PatchOat::FixupRootVisitor : public RootVisitor {
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
}
@@ -885,7 +885,7 @@ class PatchOat::FixupRootVisitor : public RootVisitor {
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 2b1210b5b1..ac2fdf594d 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -74,7 +74,7 @@ class PatchOat {
// All pointers are only borrowed.
PatchOat(InstructionSet isa, MemMap* image,
gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
- std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* map, TimingLogger* timings)
+ std::map<gc::space::ImageSpace*, MemMap>* map, TimingLogger* timings)
: image_(image), bitmap_(bitmap), heap_(heap),
delta_(delta), isa_(isa), space_map_(map), timings_(timings) {}
@@ -139,7 +139,7 @@ class PatchOat {
if (image_space->Contains(obj)) {
uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) -
reinterpret_cast<uintptr_t>(image_space->GetMemMap()->Begin());
- return reinterpret_cast<T*>(space_map_->find(image_space)->second->Begin() + heap_off);
+ return reinterpret_cast<T*>(space_map_->find(image_space)->second.Begin() + heap_off);
}
}
LOG(FATAL) << "Did not find object in boot image space " << obj;
@@ -195,7 +195,7 @@ class PatchOat {
// Active instruction set, used to know the entrypoint size.
const InstructionSet isa_;
- const std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* space_map_;
+ const std::map<gc::space::ImageSpace*, MemMap>* space_map_;
TimingLogger* timings_;
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 370f59dc8a..286b6867a3 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -40,7 +40,7 @@ static constexpr size_t kMaxMethodIds = 65535;
class ProfileAssistantTest : public CommonRuntimeTest {
public:
- void PostRuntimeCreate() OVERRIDE {
+ void PostRuntimeCreate() override {
allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
diff --git a/profman/profman.cc b/profman/profman.cc
index 9b470973c6..cecd3c2d63 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -185,7 +185,7 @@ NO_RETURN static void Abort(const char* msg) {
// TODO(calin): This class has grown too much from its initial design. Split the functionality
// into smaller, more contained pieces.
-class ProfMan FINAL {
+class ProfMan final {
public:
ProfMan() :
reference_profile_file_fd_(kInvalidFd),
diff --git a/runtime/aot_class_linker.h b/runtime/aot_class_linker.h
index 927b53302b..6a8133efc1 100644
--- a/runtime/aot_class_linker.h
+++ b/runtime/aot_class_linker.h
@@ -34,14 +34,14 @@ class AotClassLinker : public ClassLinker {
Handle<mirror::Class> klass,
verifier::HardFailLogMode log_level,
std::string* error_msg)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_);
bool InitializeClass(Thread *self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_);
};
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d4ceede07a..d4dbbf9541 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -46,7 +46,7 @@ namespace art {
class ArchTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use 64-bit ISA for runtime setup to make method size potentially larger
// than necessary (rather than smaller) during CreateCalleeSaveMethod
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -55,7 +55,7 @@ class ArchTest : public CommonRuntimeTest {
// Do not do any of the finalization. We don't want to run any code, we don't need the heap
// prepared, it actually will be a problem with setting the instruction set to x86_64 in
// SetUpRuntimeOptions.
- void FinalizeSetup() OVERRIDE {
+ void FinalizeSetup() override {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
};
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index b9802967fe..845cdaa100 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -26,7 +26,7 @@
namespace art {
namespace arm {
-class ArmContext FINAL : public Context {
+class ArmContext final : public Context {
public:
ArmContext() {
Reset();
@@ -34,55 +34,55 @@ class ArmContext FINAL : public Context {
virtual ~ArmContext() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(PC, new_pc);
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(R0, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pointers to register locations, initialized to null or the specific registers below.
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index f82534b511..d964148900 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -25,7 +25,7 @@ class ArmInstructionSetFeatures;
using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
// Instruction set features relevant to the ARM architecture.
-class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class ArmInstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,18 +47,18 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static ArmFeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
// Return a string of the form "div,lpae" or "none".
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Is the divide instruction feature enabled?
bool HasDivideInstruction() const {
@@ -82,7 +82,7 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
ArmInstructionSetFeatures(bool has_div,
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 8f56430a00..1153a772ee 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -656,18 +656,21 @@ ENTRY art_quick_osr_stub
END art_quick_osr_stub
/*
- * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_
+ * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_.
+ * Both must reside on the stack, between current SP and target SP.
+ * The r12 (IP) shall be clobbered rather than retrieved from gprs_.
*/
ARM_ENTRY art_quick_do_long_jump
- vldm r1, {s0-s31} @ load all fprs from argument fprs_
- ldr r2, [r0, #60] @ r2 = r15 (PC from gprs_ 60=4*15)
- ldr r14, [r0, #56] @ (LR from gprs_ 56=4*14)
- add r0, r0, #12 @ increment r0 to skip gprs_[0..2] 12=4*3
- ldm r0, {r3-r13} @ load remaining gprs from argument gprs_
+ vldm r1, {s0-s31} @ Load all fprs from argument fprs_.
+ mov sp, r0 @ Make SP point to gprs_.
+ @ Do not access fprs_ from now, they may be below SP.
+ ldm sp, {r0-r11} @ load r0-r11 from gprs_.
+ ldr r12, [sp, #60] @ Load the value of PC (r15) from gprs_ (60 = 4 * 15) into IP (r12).
+ ldr lr, [sp, #56] @ Load LR from gprs_, 56 = 4 * 14.
+ ldr sp, [sp, #52] @ Load SP from gprs_ 52 = 4 * 13.
+ @ Do not access gprs_ from now, they are below SP.
REFRESH_MARKING_REGISTER
- ldr r0, [r0, #-12] @ load r0 value
- mov r1, #0 @ clear result register r1
- bx r2 @ do long jump
+ bx r12 @ Do long jump.
END art_quick_do_long_jump
/*
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 0f0814a675..16f4792e98 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -23,6 +23,12 @@
#include "quick/quick_method_frame_info.h"
#include "thread-current-inl.h"
+#if __has_feature(hwaddress_sanitizer)
+#include <sanitizer/hwasan_interface.h>
+#else
+#define __hwasan_handle_longjmp(sp)
+#endif
+
namespace art {
namespace arm64 {
@@ -139,6 +145,8 @@ void Arm64Context::DoLongJump() {
}
// Ensure the Thread Register contains the address of the current thread.
DCHECK_EQ(reinterpret_cast<uintptr_t>(Thread::Current()), gprs[TR]);
+ // Tell HWASan about the new stack top.
+ __hwasan_handle_longjmp(reinterpret_cast<void*>(gprs[SP]));
// The Marking Register will be updated by art_quick_do_long_jump.
art_quick_do_long_jump(gprs, fprs);
}
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index e64cfb86ea..95dac90ac7 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -26,7 +26,7 @@
namespace art {
namespace arm64 {
-class Arm64Context FINAL : public Context {
+class Arm64Context final : public Context {
public:
Arm64Context() {
Reset();
@@ -34,56 +34,56 @@ class Arm64Context FINAL : public Context {
~Arm64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_lr) OVERRIDE {
+ void SetPC(uintptr_t new_lr) override {
SetGPR(kPC, new_lr);
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(X0, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, arraysize(gprs_));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, arraysize(gprs_));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
// Note: PC isn't an available GPR (outside of internals), so don't allow retrieving the value.
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfXRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
static constexpr size_t kPC = kNumberOfXRegisters;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index af2d4c79f9..163a2d8eba 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -25,7 +25,7 @@ class Arm64InstructionSetFeatures;
using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
// Instruction set features relevant to the ARM64 architecture.
-class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Arm64InstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,16 +47,16 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static Arm64FeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm64;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
// Return a string of the form "a53" or "none".
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Generate code addressing Cortex-A53 erratum 835769?
bool NeedFixCortexA53_835769() const {
@@ -74,7 +74,7 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix)
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 40a8dbc008..96ceecfe9b 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -83,12 +83,10 @@
* Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- // art::Runtime** xIP0 = &art::Runtime::instance_
- adrp xIP0, :got:_ZN3art7Runtime9instance_E
- ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
+ // art::Runtime* xIP0 = art::Runtime::instance_;
// Our registers aren't intermixed - just spill in order.
- ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
+ adrp xIP0, _ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
// ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves];
ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
@@ -127,12 +125,10 @@
* Runtime::CreateCalleeSaveMethod(kSaveRefsOnly).
*/
.macro SETUP_SAVE_REFS_ONLY_FRAME
- // art::Runtime** xIP0 = &art::Runtime::instance_
- adrp xIP0, :got:_ZN3art7Runtime9instance_E
- ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
+ // art::Runtime* xIP0 = art::Runtime::instance_;
// Our registers aren't intermixed - just spill in order.
- ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
+ adrp xIP0, _ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
// ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly];
ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
@@ -227,12 +223,10 @@
* TODO This is probably too conservative - saving FP & LR.
*/
.macro SETUP_SAVE_REFS_AND_ARGS_FRAME
- // art::Runtime** xIP0 = &art::Runtime::instance_
- adrp xIP0, :got:_ZN3art7Runtime9instance_E
- ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
+ // art::Runtime* xIP0 = art::Runtime::instance_;
// Our registers aren't intermixed - just spill in order.
- ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
+ adrp xIP0, _ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
// ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs];
ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
@@ -331,11 +325,9 @@
SAVE_TWO_REGS x25, x26, 464
SAVE_TWO_REGS x27, x28, 480
- // art::Runtime** xIP0 = &art::Runtime::instance_
- adrp xIP0, :got:_ZN3art7Runtime9instance_E
- ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
- ldr xIP0, [xIP0] // art::Runtime* xIP0 = art::Runtime::instance_;
+ // art::Runtime* xIP0 = art::Runtime::instance_;
+ adrp xIP0, _ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
// ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything];
ldr xIP0, [xIP0, \runtime_method_offset]
@@ -1091,56 +1083,59 @@ SAVE_SIZE=14*8 // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, FP
END art_quick_osr_stub
/*
- * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_
+ * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_.
+ * Both must reside on the stack, between current SP and target SP.
+ * IP0 and IP1 shall be clobbered rather than retrieved from gprs_.
*/
ENTRY art_quick_do_long_jump
// Load FPRs
- ldp d0, d1, [x1], #16
- ldp d2, d3, [x1], #16
- ldp d4, d5, [x1], #16
- ldp d6, d7, [x1], #16
- ldp d8, d9, [x1], #16
- ldp d10, d11, [x1], #16
- ldp d12, d13, [x1], #16
- ldp d14, d15, [x1], #16
- ldp d16, d17, [x1], #16
- ldp d18, d19, [x1], #16
- ldp d20, d21, [x1], #16
- ldp d22, d23, [x1], #16
- ldp d24, d25, [x1], #16
- ldp d26, d27, [x1], #16
- ldp d28, d29, [x1], #16
- ldp d30, d31, [x1]
-
- // Load GPRs
- // TODO: lots of those are smashed, could optimize.
- add x0, x0, #30*8
- ldp x30, x1, [x0], #-16 // LR & SP
- ldp x28, x29, [x0], #-16
- ldp x26, x27, [x0], #-16
- ldp x24, x25, [x0], #-16
- ldp x22, x23, [x0], #-16
- ldp x20, x21, [x0], #-16
- ldp x18, x19, [x0], #-16 // X18 & xSELF
- ldp x16, x17, [x0], #-16
- ldp x14, x15, [x0], #-16
- ldp x12, x13, [x0], #-16
- ldp x10, x11, [x0], #-16
- ldp x8, x9, [x0], #-16
- ldp x6, x7, [x0], #-16
- ldp x4, x5, [x0], #-16
- ldp x2, x3, [x0], #-16
- mov sp, x1
+ ldp d0, d1, [x1, #0]
+ ldp d2, d3, [x1, #16]
+ ldp d4, d5, [x1, #32]
+ ldp d6, d7, [x1, #48]
+ ldp d8, d9, [x1, #64]
+ ldp d10, d11, [x1, #80]
+ ldp d12, d13, [x1, #96]
+ ldp d14, d15, [x1, #112]
+ ldp d16, d17, [x1, #128]
+ ldp d18, d19, [x1, #144]
+ ldp d20, d21, [x1, #160]
+ ldp d22, d23, [x1, #176]
+ ldp d24, d25, [x1, #192]
+ ldp d26, d27, [x1, #208]
+ ldp d28, d29, [x1, #224]
+ ldp d30, d31, [x1, #240]
+
+ // Load GPRs. Delay loading x0, x1 because x0 is used as gprs_.
+ ldp x2, x3, [x0, #16]
+ ldp x4, x5, [x0, #32]
+ ldp x6, x7, [x0, #48]
+ ldp x8, x9, [x0, #64]
+ ldp x10, x11, [x0, #80]
+ ldp x12, x13, [x0, #96]
+ ldp x14, x15, [x0, #112]
+ // Do not load IP0 (x16) and IP1 (x17), these shall be clobbered below.
+ ldp x18, x19, [x0, #144] // X18 and xSELF.
+ ldp x20, x21, [x0, #160] // For Baker RB, wMR (w20) is reloaded below.
+ ldp x22, x23, [x0, #176]
+ ldp x24, x25, [x0, #192]
+ ldp x26, x27, [x0, #208]
+ ldp x28, x29, [x0, #224]
+ ldp x30, xIP0, [x0, #240] // LR and SP, load SP to IP0.
+
+ // Load PC to IP1, it's at the end (after the space for the unused XZR).
+ ldr xIP1, [x0, #33*8]
+
+ // Load x0, x1.
+ ldp x0, x1, [x0, #0]
+
+ // Set SP. Do not access fprs_ and gprs_ from now, they are below SP.
+ mov sp, xIP0
REFRESH_MARKING_REGISTER
- // Need to load PC, it's at the end (after the space for the unused XZR). Use x1.
- ldr x1, [x0, #33*8]
- // And the value of x0.
- ldr x0, [x0]
-
- br x1
+ br xIP1
END art_quick_do_long_jump
/*
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 7e073b288a..960aea1fcd 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -33,53 +33,53 @@ class MipsContext : public Context {
}
virtual ~MipsContext() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(T9, new_pc);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(A0, new_arg0_value);
}
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 76bc639277..ab5bb3c101 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -28,7 +28,7 @@ class MipsInstructionSetFeatures;
using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
// Instruction set features relevant to the MIPS architecture.
-class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class MipsInstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -50,15 +50,15 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static MipsFeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kMips;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Is this an ISA revision greater than 2 opening up new opcodes.
bool IsMipsIsaRevGreaterThanEqual2() const {
@@ -87,7 +87,7 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index b2a6138471..857abfd2b8 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -33,53 +33,53 @@ class Mips64Context : public Context {
}
virtual ~Mips64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(SP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
SetGPR(T9, new_pc);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(A0, new_arg0_value);
}
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index 27e544ed91..e204d9de83 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -25,7 +25,7 @@ class Mips64InstructionSetFeatures;
using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
// Instruction set features relevant to the MIPS64 architecture.
-class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Mips64InstructionSetFeatures final : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
@@ -48,15 +48,15 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static Mips64FeaturesUniquePtr FromAssembly();
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kMips64;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
// Does it have MSA (MIPS SIMD Architecture) support.
bool HasMsa() const {
@@ -69,7 +69,7 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
// Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE;
+ std::string* error_msg) const override;
private:
explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index b0c0e43e35..e8df90eccd 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -37,7 +37,7 @@ namespace art {
class StubTest : public CommonRuntimeTest {
protected:
// We need callee-save methods set up in the Runtime for exceptions.
- void SetUp() OVERRIDE {
+ void SetUp() override {
// Do the normal setup.
CommonRuntimeTest::SetUp();
@@ -54,7 +54,7 @@ class StubTest : public CommonRuntimeTest {
}
}
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use a smaller heap
for (std::pair<std::string, const void*>& pair : *options) {
if (pair.first.find("-Xmx") == 0) {
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index 0ebb22bd6d..5b438c3623 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -26,62 +26,62 @@
namespace art {
namespace x86 {
-class X86Context FINAL : public Context {
+class X86Context final : public Context {
public:
X86Context() {
Reset();
}
virtual ~X86Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(ESP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
eip_ = new_pc;
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(EAX, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pretend XMM registers are made of uin32_t pieces, because they are manipulated
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 57cf4b2741..6bd626319e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -49,17 +49,17 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+ bool Equals(const InstructionSetFeatures* other) const override;
- bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+ bool HasAtLeast(const InstructionSetFeatures* other) const override;
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86;
}
- uint32_t AsBitmap() const OVERRIDE;
+ uint32_t AsBitmap() const override;
- std::string GetFeatureString() const OVERRIDE;
+ std::string GetFeatureString() const override;
virtual ~X86InstructionSetFeatures() {}
@@ -69,9 +69,9 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
- virtual std::unique_ptr<const InstructionSetFeatures>
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
return AddFeaturesFromSplitString(features, false, error_msg);
}
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d242693f81..ab38614c98 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -26,62 +26,62 @@
namespace art {
namespace x86_64 {
-class X86_64Context FINAL : public Context {
+class X86_64Context final : public Context {
public:
X86_64Context() {
Reset();
}
virtual ~X86_64Context() {}
- void Reset() OVERRIDE;
+ void Reset() override;
- void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
- void SetSP(uintptr_t new_sp) OVERRIDE {
+ void SetSP(uintptr_t new_sp) override {
SetGPR(RSP, new_sp);
}
- void SetPC(uintptr_t new_pc) OVERRIDE {
+ void SetPC(uintptr_t new_pc) override {
rip_ = new_pc;
}
- void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+ void SetArg0(uintptr_t new_arg0_value) override {
SetGPR(RDI, new_arg0_value);
}
- bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg] != nullptr;
}
- uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+ uintptr_t* GetGPRAddress(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
return gprs_[reg];
}
- uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetGPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
DCHECK(IsAccessibleGPR(reg));
return *gprs_[reg];
}
- void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetGPR(uint32_t reg, uintptr_t value) override;
- bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+ bool IsAccessibleFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
return fprs_[reg] != nullptr;
}
- uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+ uintptr_t GetFPR(uint32_t reg) override {
DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
DCHECK(IsAccessibleFPR(reg));
return *fprs_[reg];
}
- void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+ void SetFPR(uint32_t reg, uintptr_t value) override;
- void SmashCallerSaves() OVERRIDE;
- NO_RETURN void DoLongJump() OVERRIDE;
+ void SmashCallerSaves() override;
+ NO_RETURN void DoLongJump() override;
private:
// Pointers to register locations. Values are initialized to null or the special registers below.
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index e76490ba13..76258fa5d4 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -25,7 +25,7 @@ class X86_64InstructionSetFeatures;
using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
// Instruction set features relevant to the X86_64 architecture.
-class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
+class X86_64InstructionSetFeatures final : public X86InstructionSetFeatures {
public:
// Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
@@ -59,7 +59,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
return Convert(X86InstructionSetFeatures::FromAssembly(true));
}
- InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const override {
return InstructionSet::kX86_64;
}
@@ -69,7 +69,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const std::vector<std::string>& features,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
return X86InstructionSetFeatures::AddFeaturesFromSplitString(features, true, error_msg);
}
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 123595c6fe..5afd000b05 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -40,7 +40,7 @@ class Object;
class String;
} // namespace mirror
-class ArtField FINAL {
+class ArtField final {
public:
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce08cb0bea..48ddc6992d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -66,7 +66,7 @@ using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
using MethodDexCacheType = std::atomic<MethodDexCachePair>;
} // namespace mirror
-class ArtMethod FINAL {
+class ArtMethod final {
public:
// Should the class state be checked on sensitive operations?
DECLARE_RUNTIME_DEBUG_FLAG(kCheckDeclaringClassState);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 702f0e453b..851c23f1cb 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -31,29 +31,40 @@
namespace art {
-class MemMapArena FINAL : public Arena {
+class MemMapArena final : public Arena {
public:
MemMapArena(size_t size, bool low_4gb, const char* name);
virtual ~MemMapArena();
- void Release() OVERRIDE;
+ void Release() override;
private:
- std::unique_ptr<MemMap> map_;
+ static MemMap Allocate(size_t size, bool low_4gb, const char* name);
+
+ MemMap map_;
};
-MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name)
+ : map_(Allocate(size, low_4gb, name)) {
+ memory_ = map_.Begin();
+ static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
+ "Arena should not need stronger alignment than kPageSize.");
+ DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
+ size_ = map_.Size();
+}
+
+MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
// Round up to a full page as that's the smallest unit of allocation for mmap()
// and we want to be able to use all memory that we actually allocate.
size = RoundUp(size, kPageSize);
std::string error_msg;
- map_.reset(MemMap::MapAnonymous(
- name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
- CHECK(map_.get() != nullptr) << error_msg;
- memory_ = map_->Begin();
- static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
- "Arena should not need stronger alignment than kPageSize.");
- DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
- size_ = map_->Size();
+ MemMap map = MemMap::MapAnonymous(name,
+ /* addr */ nullptr,
+ size,
+ PROT_READ | PROT_WRITE,
+ low_4gb,
+ &error_msg);
+ CHECK(map.IsValid()) << error_msg;
+ return map;
}
MemMapArena::~MemMapArena() {
@@ -62,7 +73,7 @@ MemMapArena::~MemMapArena() {
void MemMapArena::Release() {
if (bytes_allocated_ > 0) {
- map_->MadviseDontNeedAndZero();
+ map_.MadviseDontNeedAndZero();
bytes_allocated_ = 0;
}
}
diff --git a/runtime/base/mem_map_arena_pool.h b/runtime/base/mem_map_arena_pool.h
index 24e150e1e7..e98ef07ddb 100644
--- a/runtime/base/mem_map_arena_pool.h
+++ b/runtime/base/mem_map_arena_pool.h
@@ -21,17 +21,17 @@
namespace art {
-class MemMapArenaPool FINAL : public ArenaPool {
+class MemMapArenaPool final : public ArenaPool {
public:
explicit MemMapArenaPool(bool low_4gb = false, const char* name = "LinearAlloc");
virtual ~MemMapArenaPool();
- Arena* AllocArena(size_t size) OVERRIDE;
- void FreeArenaChain(Arena* first) OVERRIDE;
- size_t GetBytesAllocated() const OVERRIDE;
- void ReclaimMemory() OVERRIDE;
- void LockReclaimMemory() OVERRIDE;
+ Arena* AllocArena(size_t size) override;
+ void FreeArenaChain(Arena* first) override;
+ size_t GetBytesAllocated() const override;
+ void ReclaimMemory() override;
+ void LockReclaimMemory() override;
// Trim the maps in arenas by madvising, used by JIT to reduce memory usage.
- void TrimMaps() OVERRIDE;
+ void TrimMaps() override;
private:
const bool low_4gb_;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 7b888b18d9..28b29125cd 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -125,7 +125,7 @@ static void BackOff(uint32_t i) {
}
}
-class ScopedAllMutexesLock FINAL {
+class ScopedAllMutexesLock final {
public:
explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
@@ -144,7 +144,7 @@ class ScopedAllMutexesLock FINAL {
const BaseMutex* const mutex_;
};
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
public:
explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
for (uint32_t i = 0;
@@ -166,7 +166,7 @@ class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
};
// Scoped class that generates events at the beginning and end of lock contention.
-class ScopedContentionRecorder FINAL : public ValueObject {
+class ScopedContentionRecorder final : public ValueObject {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
: mutex_(kLogLockContentions ? mutex : nullptr),
@@ -1142,10 +1142,6 @@ void Locks::Init() {
DCHECK(subtype_check_lock_ == nullptr);
subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
- DCHECK(cha_lock_ == nullptr);
- cha_lock_ = new Mutex("CHA lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
DCHECK(classlinker_classes_lock_ == nullptr);
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
@@ -1226,6 +1222,10 @@ void Locks::Init() {
DCHECK(custom_tls_lock_ == nullptr);
custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+ DCHECK(cha_lock_ == nullptr);
+ cha_lock_ = new Mutex("CHA lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
DCHECK(native_debug_interface_lock_ == nullptr);
native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index af2e7b2763..d127d0f01f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -72,6 +72,7 @@ enum LockLevel : uint8_t {
kJdwpSocketLock,
kRegionSpaceRegionLock,
kMarkSweepMarkStackLock,
+ kCHALock,
kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
@@ -109,7 +110,6 @@ enum LockLevel : uint8_t {
kMonitorPoolLock,
kClassLinkerClassesLock, // TODO rename.
kDexToDexCompilerLock,
- kCHALock,
kSubtypeCheckLock,
kBreakpointLock,
kMonitorLock,
@@ -297,7 +297,7 @@ class LOCKABLE Mutex : public BaseMutex {
// For negative capabilities in clang annotations.
const Mutex& operator!() const { return *this; }
- void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+ void WakeupToRespondToEmptyCheckpoint() override;
private:
#if ART_USE_FUTEXES
@@ -418,7 +418,7 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
// For negative capabilities in clang annotations.
const ReaderWriterMutex& operator!() const { return *this; }
- void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+ void WakeupToRespondToEmptyCheckpoint() override;
private:
#if ART_USE_FUTEXES
@@ -661,14 +661,11 @@ class Locks {
// TODO: improve name, perhaps instrumentation_update_lock_.
static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
- // Guards Class Hierarchy Analysis (CHA).
- static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
-
// Guard the update of the SubtypeCheck data stores in each Class::status_ field.
// This lock is used in SubtypeCheck methods which are the interface for
// any SubtypeCheck-mutating methods.
// In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
- static Mutex* subtype_check_lock_ ACQUIRED_AFTER(cha_lock_);
+ static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
@@ -745,11 +742,14 @@ class Locks {
// GetThreadLocalStorage.
static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+ // Guards Class Hierarchy Analysis (CHA).
+ static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
// When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
// actually only encodes the mutex being below jni_function_table_lock_ although having
// kGenericBottomLock level is lower than this.
- #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::custom_tls_lock_)
+ #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
// Have an exclusive aborting thread.
static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
diff --git a/runtime/cha.cc b/runtime/cha.cc
index ccbe066ed6..3ea920dff1 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -181,7 +181,7 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
// headers, sets the should_deoptimize flag on stack to 1.
// TODO: also set the register value to 1 when should_deoptimize is allocated in
// a register.
-class CHAStackVisitor FINAL : public StackVisitor {
+class CHAStackVisitor final : public StackVisitor {
public:
CHAStackVisitor(Thread* thread_in,
Context* context,
@@ -190,7 +190,7 @@ class CHAStackVisitor FINAL : public StackVisitor {
method_headers_(method_headers) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
// Avoid types of methods that do not have an oat quick method header.
if (method == nullptr ||
@@ -245,13 +245,13 @@ class CHAStackVisitor FINAL : public StackVisitor {
DISALLOW_COPY_AND_ASSIGN(CHAStackVisitor);
};
-class CHACheckpoint FINAL : public Closure {
+class CHACheckpoint final : public Closure {
public:
explicit CHACheckpoint(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
: barrier_(0),
method_headers_(method_headers) {}
- void Run(Thread* thread) OVERRIDE {
+ void Run(Thread* thread) override {
// Note thread and self may not be equal if thread was already suspended at
// the point of the request.
Thread* self = Thread::Current();
@@ -636,38 +636,54 @@ void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods(
// We do this under cha_lock_. Committing code also grabs this lock to
// make sure the code is only committed when all single-implementation
// assumptions are still true.
- MutexLock cha_mu(self, *Locks::cha_lock_);
- // Invalidate compiled methods that assume some virtual calls have only
- // single implementations.
- for (ArtMethod* invalidated : invalidated_single_impl_methods) {
- if (!invalidated->HasSingleImplementation()) {
- // It might have been invalidated already when other class linking is
- // going on.
- continue;
- }
- invalidated->SetHasSingleImplementation(false);
- if (invalidated->IsAbstract()) {
- // Clear the single implementation method.
- invalidated->SetSingleImplementation(nullptr, image_pointer_size);
- }
+ std::vector<std::pair<ArtMethod*, OatQuickMethodHeader*>> headers;
+ {
+ MutexLock cha_mu(self, *Locks::cha_lock_);
+ // Invalidate compiled methods that assume some virtual calls have only
+ // single implementations.
+ for (ArtMethod* invalidated : invalidated_single_impl_methods) {
+ if (!invalidated->HasSingleImplementation()) {
+ // It might have been invalidated already when other class linking is
+ // going on.
+ continue;
+ }
+ invalidated->SetHasSingleImplementation(false);
+ if (invalidated->IsAbstract()) {
+ // Clear the single implementation method.
+ invalidated->SetSingleImplementation(nullptr, image_pointer_size);
+ }
- if (runtime->IsAotCompiler()) {
- // No need to invalidate any compiled code as the AotCompiler doesn't
- // run any code.
- continue;
- }
+ if (runtime->IsAotCompiler()) {
+ // No need to invalidate any compiled code as the AotCompiler doesn't
+ // run any code.
+ continue;
+ }
- // Invalidate all dependents.
- for (const auto& dependent : GetDependents(invalidated)) {
- ArtMethod* method = dependent.first;;
- OatQuickMethodHeader* method_header = dependent.second;
- VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
- DCHECK(runtime->UseJitCompilation());
- runtime->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
- method, method_header);
- dependent_method_headers.insert(method_header);
+ // Invalidate all dependents.
+ for (const auto& dependent : GetDependents(invalidated)) {
+ ArtMethod* method = dependent.first;;
+ OatQuickMethodHeader* method_header = dependent.second;
+ VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
+ DCHECK(runtime->UseJitCompilation());
+ // We need to call JitCodeCache::InvalidateCompiledCodeFor but we cannot do it here
+ // since it would run into problems with lock-ordering. We don't want to re-order the
+ // locks since that would make code-commit racy.
+ headers.push_back({method, method_header});
+ dependent_method_headers.insert(method_header);
+ }
+ RemoveAllDependenciesFor(invalidated);
+ }
+ }
+ // Since we are still loading the class that invalidated the code it's fine we have this after
+ // getting rid of the dependency. Any calls would need to be with the old version (since the
+ // new one isn't loaded yet) which still works fine. We will deoptimize just after this to
+ // ensure everything gets the new state.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ for (const auto& pair : headers) {
+ code_cache->InvalidateCompiledCodeFor(pair.first, pair.second);
}
- RemoveAllDependenciesFor(invalidated);
}
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f80d34ca2f..65f05d9362 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -884,7 +884,7 @@ class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor {
explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
: image_pointer_size_(image_pointer_size) {}
- void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild && !method->IsRuntimeMethod()) {
CHECK(method->GetDeclaringClass() != nullptr);
}
@@ -1390,7 +1390,7 @@ bool ClassLinker::OpenImageDexFiles(gc::space::ImageSpace* space,
// Helper class for ArtMethod checks when adding an image. Keeps all required functionality
// together and caches some intermediate results.
-class ImageSanityChecks FINAL {
+class ImageSanityChecks final {
public:
static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1951,7 +1951,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
done_(false) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (!done_ && class_table != nullptr) {
DefiningClassLoaderFilterVisitor visitor(class_loader, visitor_);
@@ -1972,7 +1972,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
ClassVisitor* visitor)
: defining_class_loader_(defining_class_loader), visitor_(visitor) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (klass->GetClassLoader() != defining_class_loader_) {
return true;
}
@@ -2009,7 +2009,7 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor) {
class GetClassesInToVector : public ClassVisitor {
public:
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE {
+ bool operator()(ObjPtr<mirror::Class> klass) override {
classes_.push_back(klass);
return true;
}
@@ -2021,7 +2021,7 @@ class GetClassInToObjectArray : public ClassVisitor {
explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
: arr_(arr), index_(0) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
++index_;
if (index_ <= arr_->GetLength()) {
arr_->Set(index_ - 1, klass);
@@ -3845,7 +3845,7 @@ class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
class_table->FreezeSnapshot();
@@ -3871,7 +3871,7 @@ class LookupClassesVisitor : public ClassLoaderVisitor {
result_(result) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
ObjPtr<mirror::Class> klass = class_table->Lookup(descriptor_, hash_);
// Add `klass` only if `class_loader` is its defining (not just initiating) class loader.
@@ -5563,7 +5563,7 @@ bool ClassLinker::LinkMethods(Thread* self,
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
// avoids the use of handles, if it didn't then rather than compare dex files we could compare dex
// caches in the implementation below.
-class MethodNameAndSignatureComparator FINAL : public ValueObject {
+class MethodNameAndSignatureComparator final : public ValueObject {
public:
explicit MethodNameAndSignatureComparator(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) :
@@ -8555,7 +8555,7 @@ class CountClassesVisitor : public ClassLoaderVisitor {
CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
num_zygote_classes += class_table->NumZygoteClasses(class_loader);
@@ -8825,7 +8825,7 @@ class GetResolvedClassesVisitor : public ClassVisitor {
extra_stats_(),
last_extra_stats_(extra_stats_.end()) { }
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!klass->IsProxyClass() &&
!klass->IsArrayClass() &&
klass->IsResolved() &&
@@ -8913,7 +8913,7 @@ class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
: method_(method),
pointer_size_(pointer_size) {}
- bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) override {
if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
holder_ = klass;
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e40f1dbcdf..ab7182a75e 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -442,7 +442,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
class TestRootVisitor : public SingleRootVisitor {
public:
- void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE {
+ void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) override {
EXPECT_TRUE(root != nullptr);
}
};
@@ -450,7 +450,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
class ClassLinkerMethodHandlesTest : public ClassLinkerTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/class_root.h b/runtime/class_root.h
index 4aa9801ab4..19a78b1ffb 100644
--- a/runtime/class_root.h
+++ b/runtime/class_root.h
@@ -165,18 +165,19 @@ CLASS_ROOT_LIST(SPECIALIZE_CLASS_ROOT_SELECTOR)
template <class MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
inline ObjPtr<mirror::Class> GetClassRoot(ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetClassRoot<kWithReadBarrier>(detail::ClassRootSelector<MirrorType>::value, class_roots);
+ return GetClassRoot<kReadBarrierOption>(detail::ClassRootSelector<MirrorType>::value,
+ class_roots);
}
template <class MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
inline ObjPtr<mirror::Class> GetClassRoot(ClassLinker* linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetClassRoot<kWithReadBarrier>(detail::ClassRootSelector<MirrorType>::value, linker);
+ return GetClassRoot<kReadBarrierOption>(detail::ClassRootSelector<MirrorType>::value, linker);
}
template <class MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
inline ObjPtr<mirror::Class> GetClassRoot() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetClassRoot<kWithReadBarrier>(detail::ClassRootSelector<MirrorType>::value);
+ return GetClassRoot<kReadBarrierOption>(detail::ClassRootSelector<MirrorType>::value);
}
} // namespace art
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 234b66a862..a5157df36b 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -157,11 +157,11 @@ class CommonRuntimeTestBase : public TestType, public CommonRuntimeTestImpl {
virtual ~CommonRuntimeTestBase() {}
protected:
- virtual void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTestImpl::SetUp();
}
- virtual void TearDown() OVERRIDE {
+ void TearDown() override {
CommonRuntimeTestImpl::TearDown();
}
};
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 60975b04f7..012ebcbe1c 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -25,7 +25,7 @@
namespace art {
-class CompilerFilter FINAL {
+class CompilerFilter final {
public:
// Note: Order here matters. Later filter choices are considered "as good
// as" earlier filter choices.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e607b31e68..366b5ec5e9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -138,7 +138,7 @@ static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
return os;
}
-class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class DebugInstrumentationListener final : public instrumentation::InstrumentationListener {
public:
DebugInstrumentationListener() {}
virtual ~DebugInstrumentationListener() {}
@@ -147,7 +147,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -176,7 +176,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
const JValue& return_value)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -195,7 +195,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
@@ -205,7 +205,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t new_dex_pc)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
// We also listen to kMethodExited instrumentation event and the current instruction is a
// RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -229,7 +229,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
ArtField* field)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field);
}
@@ -239,19 +239,19 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
uint32_t dex_pc,
ArtField* field,
const JValue& field_value)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value);
}
void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> exception_object)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostException(exception_object.Get());
}
// We only care about branches in the Jit.
void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc << ", " << dex_pc_offset;
}
@@ -262,20 +262,20 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
ArtMethod* method,
uint32_t dex_pc,
ArtMethod* target ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
}
// TODO Might be worth it to post ExceptionCatch event.
void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
- Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) OVERRIDE {
+ Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
LOG(ERROR) << "Unexpected exception handled event in debugger";
}
// TODO Might be worth it to implement this.
void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
- const ShadowFrame& frame ATTRIBUTE_UNUSED) OVERRIDE {
+ const ShadowFrame& frame ATTRIBUTE_UNUSED) override {
LOG(ERROR) << "Unexpected WatchedFramePop event in debugger";
}
@@ -1087,7 +1087,7 @@ class ClassListCreator : public ClassVisitor {
public:
explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
- bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (!c->IsPrimitive()) {
classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
}
@@ -2450,7 +2450,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram
expandBufAdd4BE(buf_, frame_count_);
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2608,7 +2608,7 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame
}
// Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor FINAL : public StackVisitor {
+class FindFrameVisitor final : public StackVisitor {
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -3040,7 +3040,7 @@ class CatchLocationFinder : public StackVisitor {
throw_dex_pc_(dex::kDexNoIndex) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -3693,7 +3693,7 @@ class NeedsDeoptimizationVisitor : public StackVisitor {
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
needs_deoptimization_(false) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
// The visitor is meant to be used when handling exception from compiled code only.
CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
<< ArtMethod::PrettyMethod(GetMethod());
diff --git a/runtime/debugger.h b/runtime/debugger.h
index e1de991812..33444f829c 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -54,20 +54,20 @@ class StackVisitor;
class Thread;
struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
- bool IsMethodBeingInspected(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsMethodSafeToJit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool MethodNeedsDebugVersion(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodBeingInspected(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodSafeToJit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool MethodNeedsDebugVersion(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
};
struct DebuggerDdmCallback : public DdmCallback {
void DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::mutator_lock_);
};
struct InternalDebuggerControlCallback : public DebuggerControlCallback {
- void StartDebugger() OVERRIDE;
- void StopDebugger() OVERRIDE;
- bool IsDebuggerConfigured() OVERRIDE;
+ void StartDebugger() override;
+ void StopDebugger() override;
+ bool IsDebuggerConfigured() override;
};
/*
@@ -831,15 +831,15 @@ class Dbg {
class DbgThreadLifecycleCallback : public ThreadLifecycleCallback {
public:
- void ThreadStart(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- void ThreadDeath(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadStart(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
+ void ThreadDeath(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
};
class DbgClassLoadCallback : public ClassLoadCallback {
public:
- void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
void ClassPrepare(Handle<mirror::Class> temp_klass,
- Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
};
static DbgThreadLifecycleCallback thread_lifecycle_callback_;
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 00a95cc7bd..2cbf557c1f 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -42,7 +42,7 @@ namespace art {
// Test class that provides some helpers to set a test up for compilation using dex2oat.
class Dex2oatEnvironmentTest : public CommonRuntimeTest {
public:
- virtual void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
const ArtDexFileLoader dex_file_loader;
@@ -106,7 +106,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
}
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
// options->push_back(std::make_pair("-verbose:oat", nullptr));
// Set up the image location.
@@ -117,7 +117,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
callbacks_.reset();
}
- virtual void TearDown() OVERRIDE {
+ void TearDown() override {
ClearDirectory(odex_dir_.c_str());
ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index f8388f315d..93af77fe23 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -20,6 +20,8 @@
#include <backtrace/BacktraceMap.h>
#include <gtest/gtest.h>
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
#include "base/file_utils.h"
#include "base/mem_map.h"
#include "common_runtime_test.h"
@@ -27,6 +29,7 @@
#include "dex2oat_environment_test.h"
#include "dexopt_test.h"
#include "gc/space/image_space.h"
+#include "hidden_api.h"
namespace art {
void DexoptTest::SetUp() {
@@ -45,6 +48,46 @@ void DexoptTest::PostRuntimeCreate() {
ReserveImageSpace();
}
+static std::string ImageLocation() {
+ Runtime* runtime = Runtime::Current();
+ const std::vector<gc::space::ImageSpace*>& image_spaces =
+ runtime->GetHeap()->GetBootImageSpaces();
+ if (image_spaces.empty()) {
+ return "";
+ }
+ return image_spaces[0]->GetImageLocation();
+}
+
+bool DexoptTest::Dex2Oat(const std::vector<std::string>& args, std::string* error_msg) {
+ Runtime* runtime = Runtime::Current();
+
+ std::vector<std::string> argv;
+ argv.push_back(runtime->GetCompilerExecutable());
+ if (runtime->IsJavaDebuggable()) {
+ argv.push_back("--debuggable");
+ }
+ runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+
+ if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kNoChecks) {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xhidden-api-checks");
+ }
+
+ if (!kIsTargetBuild) {
+ argv.push_back("--host");
+ }
+
+ argv.push_back("--boot-image=" + ImageLocation());
+
+ std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+ argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
+
+ argv.insert(argv.end(), args.begin(), args.end());
+
+ std::string command_line(android::base::Join(argv, ' '));
+ return Exec(argv, error_msg);
+}
+
void DexoptTest::GenerateOatForTest(const std::string& dex_location,
const std::string& oat_location_in,
CompilerFilter::Filter filter,
@@ -96,7 +139,7 @@ void DexoptTest::GenerateOatForTest(const std::string& dex_location,
}
std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+ ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
if (!relocate) {
// Restore the dalvik cache if needed.
@@ -108,11 +151,11 @@ void DexoptTest::GenerateOatForTest(const std::string& dex_location,
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
oat_location.c_str(),
oat_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_EQ(pic, odex_file->IsPic());
@@ -249,14 +292,16 @@ void DexoptTest::ReserveImageSpace() {
void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
if (start < end) {
std::string error_msg;
- image_reservation_.push_back(std::unique_ptr<MemMap>(
- MemMap::MapAnonymous("image reservation",
- reinterpret_cast<uint8_t*>(start), end - start,
- PROT_NONE, false, false, &error_msg)));
- ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
+ image_reservation_.push_back(MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<uint8_t*>(start),
+ end - start,
+ PROT_NONE,
+ /* low_4gb*/ false,
+ &error_msg));
+ ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
LOG(INFO) << "Reserved space for image " <<
- reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
- reinterpret_cast<void*>(image_reservation_.back()->End());
+ reinterpret_cast<void*>(image_reservation_.back().Begin()) << "-" <<
+ reinterpret_cast<void*>(image_reservation_.back().End());
}
}
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 6e8dc097d5..5dff379a32 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -26,11 +26,11 @@ namespace art {
class DexoptTest : public Dex2oatEnvironmentTest {
public:
- virtual void SetUp() OVERRIDE;
+ void SetUp() override;
virtual void PreRuntimeCreate();
- virtual void PostRuntimeCreate() OVERRIDE;
+ void PostRuntimeCreate() override;
// Generate an oat file for the purposes of test.
// The oat file will be generated for dex_location in the given oat_location
@@ -71,6 +71,8 @@ class DexoptTest : public Dex2oatEnvironmentTest {
// Generate a standard oat file in the oat location.
void GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter);
+ static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
+
private:
// Pre-Relocate the image to a known non-zero offset so we don't have to
// deal with the runtime randomly relocating the image by 0 and messing up
@@ -91,7 +93,7 @@ class DexoptTest : public Dex2oatEnvironmentTest {
// before the image is loaded.
void UnreserveImageSpace();
- std::vector<std::unique_ptr<MemMap>> image_reservation_;
+ std::vector<MemMap> image_reservation_;
};
} // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 026b5da748..e7715c4934 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -37,9 +37,7 @@ namespace art {
using android::base::StringPrintf;
template <typename ElfTypes>
-ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
- bool program_header_only,
- uint8_t* requested_base)
+ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable, bool program_header_only)
: writable_(writable),
program_header_only_(program_header_only),
header_(nullptr),
@@ -54,8 +52,7 @@ ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
dynstr_section_start_(nullptr),
hash_section_start_(nullptr),
symtab_symbol_table_(nullptr),
- dynsym_symbol_table_(nullptr),
- requested_base_(requested_base) {
+ dynsym_symbol_table_(nullptr) {
CHECK(file != nullptr);
}
@@ -64,10 +61,9 @@ ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file,
bool writable,
bool program_header_only,
bool low_4gb,
- std::string* error_msg,
- uint8_t* requested_base) {
- std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
- (file, writable, program_header_only, requested_base));
+ std::string* error_msg) {
+ std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
+ new ElfFileImpl<ElfTypes>(file, writable, program_header_only));
int prot;
int flags;
if (writable) {
@@ -89,9 +85,8 @@ ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file,
int flags,
bool low_4gb,
std::string* error_msg) {
- std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
- (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
- /*requested_base*/nullptr));
+ std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
+ new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only */ false));
if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
return nullptr;
}
@@ -283,7 +278,6 @@ bool ElfFileImpl<ElfTypes>::Setup(File* file,
template <typename ElfTypes>
ElfFileImpl<ElfTypes>::~ElfFileImpl() {
- STLDeleteElements(&segments_);
delete symtab_symbol_table_;
delete dynsym_symbol_table_;
}
@@ -418,17 +412,17 @@ template <typename ElfTypes>
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap* map, std::string* error_msg) {
- if (map == nullptr) {
+bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap&& map, std::string* error_msg) {
+ if (!map.IsValid()) {
// MemMap::Open should have already set an error.
DCHECK(!error_msg->empty());
return false;
}
- map_.reset(map);
- CHECK(map_.get() != nullptr) << file->GetPath();
- CHECK(map_->Begin() != nullptr) << file->GetPath();
+ map_ = std::move(map);
+ CHECK(map_.IsValid()) << file->GetPath();
+ CHECK(map_.Begin() != nullptr) << file->GetPath();
- header_ = reinterpret_cast<Elf_Ehdr*>(map_->Begin());
+ header_ = reinterpret_cast<Elf_Ehdr*>(map_.Begin());
if ((ELFMAG0 != header_->e_ident[EI_MAG0])
|| (ELFMAG1 != header_->e_ident[EI_MAG1])
|| (ELFMAG2 != header_->e_ident[EI_MAG2])
@@ -685,9 +679,7 @@ template <typename ElfTypes>
typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const {
CHECK_LT(i, GetProgramHeaderNum()) << file_path_; // Sanity check for caller.
uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
- if (program_header >= End()) {
- return nullptr; // Failure condition.
- }
+ CHECK_LT(program_header, End());
return reinterpret_cast<Elf_Phdr*>(program_header);
}
@@ -1028,9 +1020,17 @@ typename ElfTypes::Rela& ElfFileImpl<ElfTypes>::GetRela(Elf_Shdr& section_header
return *(GetRelaSectionStart(section_header) + i);
}
-// Base on bionic phdr_table_get_load_size
template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg) const {
+ uint8_t* vaddr_begin;
+ return GetLoadedAddressRange(&vaddr_begin, size, error_msg);
+}
+
+// Base on bionic phdr_table_get_load_size
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::GetLoadedAddressRange(/*out*/uint8_t** vaddr_begin,
+ /*out*/size_t* vaddr_size,
+ /*out*/std::string* error_msg) const {
Elf_Addr min_vaddr = static_cast<Elf_Addr>(-1);
Elf_Addr max_vaddr = 0u;
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
@@ -1049,7 +1049,8 @@ bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg)
<< program_header->p_vaddr << "+0x" << program_header->p_memsz << "=0x" << end_vaddr
<< " in ELF file \"" << file_path_ << "\"";
*error_msg = oss.str();
- *size = static_cast<size_t>(-1);
+ *vaddr_begin = nullptr;
+ *vaddr_size = static_cast<size_t>(-1);
return false;
}
if (end_vaddr > max_vaddr) {
@@ -1059,17 +1060,19 @@ bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg)
min_vaddr = RoundDown(min_vaddr, kPageSize);
max_vaddr = RoundUp(max_vaddr, kPageSize);
CHECK_LT(min_vaddr, max_vaddr) << file_path_;
- Elf_Addr loaded_size = max_vaddr - min_vaddr;
- // Check that the loaded_size fits in size_t.
- if (UNLIKELY(loaded_size > std::numeric_limits<size_t>::max())) {
+ // Check that the range fits into the runtime address space.
+ if (UNLIKELY(max_vaddr - 1u > std::numeric_limits<size_t>::max())) {
std::ostringstream oss;
- oss << "Loaded size is 0x" << std::hex << loaded_size << " but maximum size_t is 0x"
- << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_path_ << "\"";
+ oss << "Loaded range is 0x" << std::hex << min_vaddr << "-0x" << max_vaddr
+ << " but maximum size_t is 0x" << std::numeric_limits<size_t>::max()
+ << " for ELF file \"" << file_path_ << "\"";
*error_msg = oss.str();
- *size = static_cast<size_t>(-1);
+ *vaddr_begin = nullptr;
+ *vaddr_size = static_cast<size_t>(-1);
return false;
}
- *size = loaded_size;
+ *vaddr_begin = reinterpret_cast<uint8_t*>(min_vaddr);
+ *vaddr_size = dchecked_integral_cast<size_t>(max_vaddr - min_vaddr);
return true;
}
@@ -1100,7 +1103,8 @@ template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::Load(File* file,
bool executable,
bool low_4gb,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
CHECK(program_header_only_) << file->GetPath();
if (executable) {
@@ -1116,11 +1120,6 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
bool reserved = false;
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* program_header = GetProgramHeader(i);
- if (program_header == nullptr) {
- *error_msg = StringPrintf("No program header for entry %d in ELF file %s.",
- i, file->GetPath().c_str());
- return false;
- }
// Record .dynamic header information for later use
if (program_header->p_type == PT_DYNAMIC) {
@@ -1151,42 +1150,39 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
}
size_t file_length = static_cast<size_t>(temp_file_length);
if (!reserved) {
- uint8_t* reserve_base = reinterpret_cast<uint8_t*>(program_header->p_vaddr);
- uint8_t* reserve_base_override = reserve_base;
- // Override the base (e.g. when compiling with --compile-pic)
- if (requested_base_ != nullptr) {
- reserve_base_override = requested_base_;
- }
- std::string reservation_name("ElfFile reservation for ");
- reservation_name += file->GetPath();
- size_t loaded_size;
- if (!GetLoadedSize(&loaded_size, error_msg)) {
+ uint8_t* vaddr_begin;
+ size_t vaddr_size;
+ if (!GetLoadedAddressRange(&vaddr_begin, &vaddr_size, error_msg)) {
DCHECK(!error_msg->empty());
return false;
}
- std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
- reserve_base_override,
- loaded_size,
- PROT_NONE,
- low_4gb,
- false,
- error_msg));
- if (reserve.get() == nullptr) {
+ std::string reservation_name = "ElfFile reservation for " + file->GetPath();
+ MemMap local_reservation = MemMap::MapAnonymous(
+ reservation_name.c_str(),
+ (reservation != nullptr) ? reservation->Begin() : nullptr,
+ vaddr_size,
+ PROT_NONE,
+ low_4gb,
+ /* reuse */ false,
+ reservation,
+ error_msg);
+ if (!local_reservation.IsValid()) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
- reservation_name.c_str(), error_msg->c_str());
+ reservation_name.c_str(),
+ error_msg->c_str());
return false;
}
reserved = true;
- // Base address is the difference of actual mapped location and the p_vaddr
- base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve->Begin())
- - reinterpret_cast<uintptr_t>(reserve_base));
+ // Base address is the difference of actual mapped location and the vaddr_begin.
+ base_address_ = reinterpret_cast<uint8_t*>(
+ static_cast<uintptr_t>(local_reservation.Begin() - vaddr_begin));
// By adding the p_vaddr of a section/symbol to base_address_ we will always get the
// dynamic memory address of where that object is actually mapped
//
// TODO: base_address_ needs to be calculated in ::Open, otherwise
// FindDynamicSymbolAddress returns the wrong values until Load is called.
- segments_.push_back(reserve.release());
+ segments_.push_back(std::move(local_reservation));
}
// empty segment, nothing to map
if (program_header->p_memsz == 0) {
@@ -1234,50 +1230,54 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
return false;
}
if (program_header->p_filesz != 0u) {
- std::unique_ptr<MemMap> segment(
+ MemMap segment =
MemMap::MapFileAtAddress(p_vaddr,
program_header->p_filesz,
prot,
flags,
file->Fd(),
program_header->p_offset,
- /*low4_gb*/false,
- /*reuse*/true, // implies MAP_FIXED
+ /* low4_gb */ false,
file->GetPath().c_str(),
- error_msg));
- if (segment.get() == nullptr) {
+ /* reuse */ true, // implies MAP_FIXED
+ /* reservation */ nullptr,
+ error_msg);
+ if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
- if (segment->Begin() != p_vaddr) {
+ if (segment.Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
"instead mapped to %p",
- i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment.Begin());
return false;
}
- segments_.push_back(segment.release());
+ segments_.push_back(std::move(segment));
}
if (program_header->p_filesz < program_header->p_memsz) {
std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
static_cast<uint64_t>(i), file->GetPath().c_str());
- std::unique_ptr<MemMap> segment(
- MemMap::MapAnonymous(name.c_str(),
- p_vaddr + program_header->p_filesz,
- program_header->p_memsz - program_header->p_filesz,
- prot, false, true /* reuse */, error_msg));
- if (segment == nullptr) {
+ MemMap segment = MemMap::MapAnonymous(name.c_str(),
+ p_vaddr + program_header->p_filesz,
+ program_header->p_memsz - program_header->p_filesz,
+ prot,
+ /* low_4gb */ false,
+ /* reuse */ true,
+ /* reservation */ nullptr,
+ error_msg);
+ if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
i, file->GetPath().c_str(), error_msg->c_str());
return false;
}
- if (segment->Begin() != p_vaddr) {
+ if (segment.Begin() != p_vaddr) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
"at expected address %p, instead mapped to %p",
- i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+ i, file->GetPath().c_str(), p_vaddr, segment.Begin());
return false;
}
- segments_.push_back(segment.release());
+ segments_.push_back(std::move(segment));
}
}
@@ -1343,9 +1343,8 @@ bool ElfFileImpl<ElfTypes>::Load(File* file,
template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::ValidPointer(const uint8_t* start) const {
- for (size_t i = 0; i < segments_.size(); ++i) {
- const MemMap* segment = segments_[i];
- if (segment->Begin() <= start && start < segment->End()) {
+ for (const MemMap& segment : segments_) {
+ if (segment.Begin() <= start && start < segment.End()) {
return true;
}
}
@@ -1705,32 +1704,30 @@ ElfFile* ElfFile::Open(File* file,
bool writable,
bool program_header_only,
bool low_4gb,
- std::string* error_msg,
- uint8_t* requested_base) {
+ /*out*/std::string* error_msg) {
if (file->GetLength() < EI_NIDENT) {
*error_msg = StringPrintf("File %s is too short to be a valid ELF file",
file->GetPath().c_str());
return nullptr;
}
- std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
- PROT_READ,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- low_4gb,
- file->GetPath().c_str(),
- error_msg));
- if (map == nullptr || map->Size() != EI_NIDENT) {
+ MemMap map = MemMap::MapFile(EI_NIDENT,
+ PROT_READ,
+ MAP_PRIVATE,
+ file->Fd(),
+ 0,
+ low_4gb,
+ file->GetPath().c_str(),
+ error_msg);
+ if (!map.IsValid() || map.Size() != EI_NIDENT) {
return nullptr;
}
- uint8_t* header = map->Begin();
+ uint8_t* header = map.Begin();
if (header[EI_CLASS] == ELFCLASS64) {
ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
writable,
program_header_only,
low_4gb,
- error_msg,
- requested_base);
+ error_msg);
if (elf_file_impl == nullptr) {
return nullptr;
}
@@ -1740,8 +1737,7 @@ ElfFile* ElfFile::Open(File* file,
writable,
program_header_only,
low_4gb,
- error_msg,
- requested_base);
+ error_msg);
if (elf_file_impl == nullptr) {
return nullptr;
}
@@ -1755,7 +1751,7 @@ ElfFile* ElfFile::Open(File* file,
}
}
-ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg) {
+ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, /*out*/std::string* error_msg) {
// low_4gb support not required for this path.
constexpr bool low_4gb = false;
if (file->GetLength() < EI_NIDENT) {
@@ -1763,18 +1759,18 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* e
file->GetPath().c_str());
return nullptr;
}
- std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
- PROT_READ,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- low_4gb,
- file->GetPath().c_str(),
- error_msg));
- if (map == nullptr || map->Size() != EI_NIDENT) {
+ MemMap map = MemMap::MapFile(EI_NIDENT,
+ PROT_READ,
+ MAP_PRIVATE,
+ file->Fd(),
+ /* start */ 0,
+ low_4gb,
+ file->GetPath().c_str(),
+ error_msg);
+ if (!map.IsValid() || map.Size() != EI_NIDENT) {
return nullptr;
}
- uint8_t* header = map->Begin();
+ uint8_t* header = map.Begin();
if (header[EI_CLASS] == ELFCLASS64) {
ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
mmap_prot,
@@ -1812,8 +1808,12 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* e
return elf32_->func(__VA_ARGS__); \
}
-bool ElfFile::Load(File* file, bool executable, bool low_4gb, std::string* error_msg) {
- DELEGATE_TO_IMPL(Load, file, executable, low_4gb, error_msg);
+bool ElfFile::Load(File* file,
+ bool executable,
+ bool low_4gb,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
+ DELEGATE_TO_IMPL(Load, file, executable, low_4gb, reservation, error_msg);
}
const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index ab9e6fa2ec..8da7e1a29c 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -26,6 +26,9 @@
#include "./elf.h"
namespace art {
+
+class MemMap;
+
template <typename ElfTypes>
class ElfFileImpl;
@@ -42,18 +45,21 @@ class ElfFile {
bool writable,
bool program_header_only,
bool low_4gb,
- std::string* error_msg,
- uint8_t* requested_base = nullptr); // TODO: move arg to before error_msg.
+ /*out*/std::string* error_msg);
// Open with specific mmap flags, Always maps in the whole file, not just the
// program header sections.
static ElfFile* Open(File* file,
int mmap_prot,
int mmap_flags,
- std::string* error_msg);
+ /*out*/std::string* error_msg);
~ElfFile();
// Load segments into memory based on PT_LOAD program headers
- bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
+ bool Load(File* file,
+ bool executable,
+ bool low_4gb,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg);
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index a5808e27ba..b55b60f2dc 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -48,13 +48,12 @@ class ElfFileImpl {
bool writable,
bool program_header_only,
bool low_4gb,
- std::string* error_msg,
- uint8_t* requested_base = nullptr);
+ /*out*/std::string* error_msg);
static ElfFileImpl* Open(File* file,
int mmap_prot,
int mmap_flags,
bool low_4gb,
- std::string* error_msg);
+ /*out*/std::string* error_msg);
~ElfFileImpl();
const std::string& GetFilePath() const {
@@ -62,15 +61,15 @@ class ElfFileImpl {
}
uint8_t* Begin() const {
- return map_->Begin();
+ return map_.Begin();
}
uint8_t* End() const {
- return map_->End();
+ return map_.End();
}
size_t Size() const {
- return map_->Size();
+ return map_.Size();
}
Elf_Ehdr& GetHeader() const;
@@ -115,7 +114,11 @@ class ElfFileImpl {
// Load segments into memory based on PT_LOAD program headers.
// executable is true at run time, false at compile time.
- bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
+ bool Load(File* file,
+ bool executable,
+ bool low_4gb,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg);
bool Fixup(Elf_Addr base_address);
bool FixupDynamic(Elf_Addr base_address);
@@ -131,11 +134,15 @@ class ElfFileImpl {
bool Strip(File* file, std::string* error_msg);
private:
- ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base);
+ ElfFileImpl(File* file, bool writable, bool program_header_only);
+
+ bool GetLoadedAddressRange(/*out*/uint8_t** vaddr_begin,
+ /*out*/size_t* vaddr_size,
+ /*out*/std::string* error_msg) const;
bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg);
- bool SetMap(File* file, MemMap* map, std::string* error_msg);
+ bool SetMap(File* file, MemMap&& map, std::string* error_msg);
uint8_t* GetProgramHeadersStart() const;
uint8_t* GetSectionHeadersStart() const;
@@ -193,9 +200,9 @@ class ElfFileImpl {
// ELF header mapping. If program_header_only_ is false, will
// actually point to the entire elf file.
- std::unique_ptr<MemMap> map_;
+ MemMap map_;
Elf_Ehdr* header_;
- std::vector<MemMap*> segments_;
+ std::vector<MemMap> segments_;
// Pointer to start of first PT_LOAD program segment after Load()
// when program_header_only_ is true.
@@ -217,9 +224,6 @@ class ElfFileImpl {
SymbolTable* symtab_symbol_table_;
SymbolTable* dynsym_symbol_table_;
- // Override the 'base' p_vaddr in the first LOAD segment with this value (if non-null).
- uint8_t* requested_base_;
-
DISALLOW_COPY_AND_ASSIGN(ElfFileImpl);
};
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aca169b924..fccfce4589 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -615,13 +615,13 @@ extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
}
// Visits arguments on the stack placing them into the shadow frame.
-class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
public:
BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
private:
ShadowFrame* const sf_;
@@ -707,7 +707,7 @@ static void HandleDeoptimization(JValue* result,
explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
// Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
// logic. Just always say we want to continue.
return true;
@@ -824,13 +824,13 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
// to jobjects.
-class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
public:
BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -959,7 +959,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
// Visitor returning a reference argument at a given position in a Quick stack frame.
// NOTE: Only used for testing purposes.
-class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
public:
GetQuickReferenceArgumentAtVisitor(ArtMethod** sp,
const char* shorty,
@@ -972,7 +972,7 @@ class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments";
}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
if (cur_pos_ == arg_pos_) {
Primitive::Type type = GetParamPrimitiveType();
CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
@@ -1014,7 +1014,7 @@ extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(s
}
// Visitor returning all the reference arguments in a Quick stack frame.
-class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
public:
GetQuickReferenceArgumentsVisitor(ArtMethod** sp,
bool is_static,
@@ -1022,7 +1022,7 @@ class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
uint32_t shorty_len)
: QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
Primitive::Type type = GetParamPrimitiveType();
if (type == Primitive::kPrimNot) {
StackReference<mirror::Object>* ref_arg =
@@ -1059,13 +1059,13 @@ std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMetho
// Read object references held in arguments from quick frames and place in a JNI local references,
// so they don't get garbage collected.
-class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
+class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
public:
RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1957,7 +1957,7 @@ class ComputeNativeCallFrameSize {
uint32_t num_stack_entries_;
};
-class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
+class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
public:
explicit ComputeGenericJniFrameSize(bool critical_native)
: num_handle_scope_references_(0), critical_native_(critical_native) {}
@@ -2038,10 +2038,10 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
return sp8;
}
- uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
+ uintptr_t PushHandle(mirror::Object* /* ptr */) override;
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
- void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
+ void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -2117,7 +2117,7 @@ class FillNativeCall {
// Visits arguments on the stack placing them into a region lower down the stack for the benefit
// of transitioning into native code.
-class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
public:
BuildGenericJniFrameVisitor(Thread* self,
bool is_static,
@@ -2150,7 +2150,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
}
}
- void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -2168,7 +2168,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
private:
// A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
- class FillJniCall FINAL : public FillNativeCall {
+ class FillJniCall final : public FillNativeCall {
public:
FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
HandleScope* handle_scope, bool critical_native)
@@ -2177,7 +2177,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
cur_entry_(0),
critical_native_(critical_native) {}
- uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 89694e351a..0f0fb69f4b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -26,7 +26,7 @@ namespace art {
class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use 64-bit ISA for runtime setup to make method size potentially larger
// than necessary (rather than smaller) during CreateCalleeSaveMethod
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -35,7 +35,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
// Do not do any of the finalization. We don't want to run any code, we don't need the heap
// prepared, it actually will be a problem with setting the instruction set to x86_64 in
// SetUpRuntimeOptions.
- void FinalizeSetup() OVERRIDE {
+ void FinalizeSetup() override {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 3e2664c7f9..02eeefe0a0 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -90,11 +90,11 @@ class FaultHandler {
DISALLOW_COPY_AND_ASSIGN(FaultHandler);
};
-class NullPointerHandler FINAL : public FaultHandler {
+class NullPointerHandler final : public FaultHandler {
public:
explicit NullPointerHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
static bool IsValidImplicitCheck(siginfo_t* siginfo) {
// Our implicit NPE checks always limit the range to a page.
@@ -108,31 +108,31 @@ class NullPointerHandler FINAL : public FaultHandler {
DISALLOW_COPY_AND_ASSIGN(NullPointerHandler);
};
-class SuspensionHandler FINAL : public FaultHandler {
+class SuspensionHandler final : public FaultHandler {
public:
explicit SuspensionHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
private:
DISALLOW_COPY_AND_ASSIGN(SuspensionHandler);
};
-class StackOverflowHandler FINAL : public FaultHandler {
+class StackOverflowHandler final : public FaultHandler {
public:
explicit StackOverflowHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override;
private:
DISALLOW_COPY_AND_ASSIGN(StackOverflowHandler);
};
-class JavaStackTraceHandler FINAL : public FaultHandler {
+class JavaStackTraceHandler final : public FaultHandler {
public:
explicit JavaStackTraceHandler(FaultManager* manager);
- bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ bool Action(int sig, siginfo_t* siginfo, void* context) override NO_THREAD_SAFETY_ANALYSIS;
private:
DISALLOW_COPY_AND_ASSIGN(JavaStackTraceHandler);
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index e30fef4fc2..10af10d1a6 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -72,12 +72,12 @@ class AtomicStack {
~AtomicStack() {}
void Reset() {
- DCHECK(mem_map_.get() != nullptr);
+ DCHECK(mem_map_.IsValid());
DCHECK(begin_ != nullptr);
front_index_.store(0, std::memory_order_relaxed);
back_index_.store(0, std::memory_order_relaxed);
debug_is_sorted_ = true;
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
// Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
@@ -252,10 +252,14 @@ class AtomicStack {
// Size in number of elements.
void Init() {
std::string error_msg;
- mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
- PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
- uint8_t* addr = mem_map_->Begin();
+ mem_map_ = MemMap::MapAnonymous(name_.c_str(),
+ /* addr */ nullptr,
+ capacity_ * sizeof(begin_[0]),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
+ uint8_t* addr = mem_map_.Begin();
CHECK(addr != nullptr);
debug_is_sorted_ = true;
begin_ = reinterpret_cast<StackReference<T>*>(addr);
@@ -265,7 +269,7 @@ class AtomicStack {
// Name of the mark stack.
std::string name_;
// Memory mapping of the atomic stack.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// Back index (index after the last element pushed).
AtomicInteger back_index_;
// Front index, used for implementing PopFront.
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index d45a0cc018..bb2beaa94c 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -27,47 +27,50 @@ namespace art {
namespace gc {
namespace accounting {
-Bitmap* Bitmap::CreateFromMemMap(MemMap* mem_map, size_t num_bits) {
- CHECK(mem_map != nullptr);
- return new Bitmap(mem_map, num_bits);
+Bitmap* Bitmap::CreateFromMemMap(MemMap&& mem_map, size_t num_bits) {
+ CHECK(mem_map.IsValid());
+ return new Bitmap(std::move(mem_map), num_bits);
}
-Bitmap::Bitmap(MemMap* mem_map, size_t bitmap_size)
- : mem_map_(mem_map), bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map->Begin())),
+Bitmap::Bitmap(MemMap&& mem_map, size_t bitmap_size)
+ : mem_map_(std::move(mem_map)),
+ bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map_.Begin())),
bitmap_size_(bitmap_size) {
CHECK(bitmap_begin_ != nullptr);
CHECK_NE(bitmap_size, 0U);
}
Bitmap::~Bitmap() {
- // Destroys MemMap via std::unique_ptr<>.
+ // Destroys member MemMap.
}
-MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
+MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
const size_t bitmap_size = RoundUp(
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
- PROT_READ | PROT_WRITE, false, false,
- &error_msg));
- if (UNLIKELY(mem_map.get() == nullptr)) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ bitmap_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
- return nullptr;
}
- return mem_map.release();
+ return mem_map;
}
Bitmap* Bitmap::Create(const std::string& name, size_t num_bits) {
- auto* const mem_map = AllocateMemMap(name, num_bits);
- if (mem_map == nullptr) {
+ MemMap mem_map = AllocateMemMap(name, num_bits);
+ if (UNLIKELY(!mem_map.IsValid())) {
return nullptr;
}
- return CreateFromMemMap(mem_map, num_bits);
+ return CreateFromMemMap(std::move(mem_map), num_bits);
}
void Bitmap::Clear() {
if (bitmap_begin_ != nullptr) {
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
}
@@ -83,14 +86,15 @@ MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::Create(
CHECK_ALIGNED(cover_begin, kAlignment);
CHECK_ALIGNED(cover_end, kAlignment);
const size_t num_bits = (cover_end - cover_begin) / kAlignment;
- auto* const mem_map = Bitmap::AllocateMemMap(name, num_bits);
- return CreateFromMemMap(mem_map, cover_begin, num_bits);
+ MemMap mem_map = Bitmap::AllocateMemMap(name, num_bits);
+ CHECK(mem_map.IsValid());
+ return CreateFromMemMap(std::move(mem_map), cover_begin, num_bits);
}
template<size_t kAlignment>
MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::CreateFromMemMap(
- MemMap* mem_map, uintptr_t begin, size_t num_bits) {
- return new MemoryRangeBitmap(mem_map, begin, num_bits);
+ MemMap&& mem_map, uintptr_t begin, size_t num_bits) {
+ return new MemoryRangeBitmap(std::move(mem_map), begin, num_bits);
}
template class MemoryRangeBitmap<CardTable::kCardSize>;
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index 2d83a8ad2e..ffef5662db 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -24,12 +24,11 @@
#include <vector>
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
namespace art {
-class MemMap;
-
namespace gc {
namespace accounting {
@@ -42,7 +41,7 @@ class Bitmap {
// Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
// mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
// Objects are kAlignement-aligned.
- static Bitmap* CreateFromMemMap(MemMap* mem_map, size_t num_bits);
+ static Bitmap* CreateFromMemMap(MemMap&& mem_map, size_t num_bits);
// offset is the difference from base to a index.
static ALWAYS_INLINE constexpr size_t BitIndexToWordIndex(uintptr_t offset) {
@@ -101,17 +100,17 @@ class Bitmap {
protected:
static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
- Bitmap(MemMap* mem_map, size_t bitmap_size);
+ Bitmap(MemMap&& mem_map, size_t bitmap_size);
~Bitmap();
// Allocate the mem-map for a bitmap based on how many bits are required.
- static MemMap* AllocateMemMap(const std::string& name, size_t num_bits);
+ static MemMap AllocateMemMap(const std::string& name, size_t num_bits);
template<bool kSetBit>
ALWAYS_INLINE bool ModifyBit(uintptr_t bit_index);
// Backing storage for bitmap.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
uintptr_t* const bitmap_begin_;
@@ -127,10 +126,10 @@ class Bitmap {
template<size_t kAlignment>
class MemoryRangeBitmap : public Bitmap {
public:
- static MemoryRangeBitmap* Create(const std::string& name, uintptr_t cover_begin,
- uintptr_t cover_end);
- static MemoryRangeBitmap* CreateFromMemMap(MemMap* mem_map, uintptr_t cover_begin,
- size_t num_bits);
+ static MemoryRangeBitmap* Create(
+ const std::string& name, uintptr_t cover_begin, uintptr_t cover_end);
+ static MemoryRangeBitmap* CreateFromMemMap(
+ MemMap&& mem_map, uintptr_t cover_begin, size_t num_bits);
// Beginning of the memory range that the bitmap covers.
ALWAYS_INLINE uintptr_t CoverBegin() const {
@@ -177,9 +176,10 @@ class MemoryRangeBitmap : public Bitmap {
}
private:
- MemoryRangeBitmap(MemMap* mem_map, uintptr_t begin, size_t num_bits)
- : Bitmap(mem_map, num_bits), cover_begin_(begin), cover_end_(begin + kAlignment * num_bits) {
- }
+ MemoryRangeBitmap(MemMap&& mem_map, uintptr_t begin, size_t num_bits)
+ : Bitmap(std::move(mem_map), num_bits),
+ cover_begin_(begin),
+ cover_end_(begin + kAlignment * num_bits) {}
uintptr_t const cover_begin_;
uintptr_t const cover_end_;
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 357a4985b6..1e7d76c97e 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -213,8 +213,8 @@ inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin,
inline void* CardTable::AddrFromCard(const uint8_t *card_addr) const {
DCHECK(IsValidCard(card_addr))
<< " card_addr: " << reinterpret_cast<const void*>(card_addr)
- << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
- << " end: " << reinterpret_cast<void*>(mem_map_->End());
+ << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+ << " end: " << reinterpret_cast<void*>(mem_map_.End());
uintptr_t offset = card_addr - biased_begin_;
return reinterpret_cast<void*>(offset << kCardShift);
}
@@ -228,16 +228,16 @@ inline uint8_t* CardTable::CardFromAddr(const void *addr) const {
}
inline bool CardTable::IsValidCard(const uint8_t* card_addr) const {
- uint8_t* begin = mem_map_->Begin() + offset_;
- uint8_t* end = mem_map_->End();
+ uint8_t* begin = mem_map_.Begin() + offset_;
+ uint8_t* end = mem_map_.End();
return card_addr >= begin && card_addr < end;
}
inline void CardTable::CheckCardValid(uint8_t* card) const {
DCHECK(IsValidCard(card))
<< " card_addr: " << reinterpret_cast<const void*>(card)
- << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
- << " end: " << reinterpret_cast<void*>(mem_map_->End());
+ << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+ << " end: " << reinterpret_cast<void*>(mem_map_.End());
}
} // namespace accounting
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 22104a30fe..7cddec6242 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -64,15 +64,18 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(
- MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
- false, false, &error_msg));
- CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
+ MemMap mem_map = MemMap::MapAnonymous("card table",
+ /* addr */ nullptr,
+ capacity + 256,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
static_assert(kCardClean == 0, "kCardClean must be 0");
- uint8_t* cardtable_begin = mem_map->Begin();
+ uint8_t* cardtable_begin = mem_map.Begin();
CHECK(cardtable_begin != nullptr);
// We allocated up to a bytes worth of extra space to allow `biased_begin`'s byte value to equal
@@ -87,11 +90,11 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
biased_begin += offset;
}
CHECK_EQ(reinterpret_cast<uintptr_t>(biased_begin) & 0xff, kCardDirty);
- return new CardTable(mem_map.release(), biased_begin, offset);
+ return new CardTable(std::move(mem_map), biased_begin, offset);
}
-CardTable::CardTable(MemMap* mem_map, uint8_t* biased_begin, size_t offset)
- : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
+CardTable::CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset)
+ : mem_map_(std::move(mem_map)), biased_begin_(biased_begin), offset_(offset) {
}
CardTable::~CardTable() {
@@ -100,7 +103,7 @@ CardTable::~CardTable() {
void CardTable::ClearCardTable() {
static_assert(kCardClean == 0, "kCardClean must be 0");
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
void CardTable::ClearCardRange(uint8_t* start, uint8_t* end) {
@@ -118,8 +121,8 @@ bool CardTable::AddrIsInCardTable(const void* addr) const {
void CardTable::CheckAddrIsInCardTable(const uint8_t* addr) const {
uint8_t* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
- uint8_t* begin = mem_map_->Begin() + offset_;
- uint8_t* end = mem_map_->End();
+ uint8_t* begin = mem_map_.Begin() + offset_;
+ uint8_t* end = mem_map_.End();
CHECK(AddrIsInCardTable(addr))
<< "Card table " << this
<< " begin: " << reinterpret_cast<void*>(begin)
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index b8520b7dc0..f163898177 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -20,12 +20,11 @@
#include <memory>
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
namespace art {
-class MemMap;
-
namespace mirror {
class Object;
} // namespace mirror
@@ -66,6 +65,11 @@ class CardTable {
return GetCard(obj) == kCardDirty;
}
+ // Is the object on a clean card?
+ bool IsClean(const mirror::Object* obj) const {
+ return GetCard(obj) == kCardClean;
+ }
+
// Return the state of the card at an address.
uint8_t GetCard(const mirror::Object* obj) const {
return *CardFromAddr(obj);
@@ -133,7 +137,7 @@ class CardTable {
bool AddrIsInCardTable(const void* addr) const;
private:
- CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
+ CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset);
// Returns true iff the card table address is within the bounds of the card table.
bool IsValidCard(const uint8_t* card_addr) const ALWAYS_INLINE;
@@ -144,7 +148,7 @@ class CardTable {
void VerifyCardTable();
// Mmapped pages for the card table
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// Value used to compute card table addresses from object addresses, see GetBiasedBegin
uint8_t* const biased_begin_;
// Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 3a09634c0b..f0a82e0c88 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -33,7 +33,7 @@ class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
space::ContinuousSpace* space)
: ModUnionTableReferenceCache(name, heap, space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+ bool ShouldAddReference(const mirror::Object* ref) const override ALWAYS_INLINE {
return !space_->HasAddress(ref);
}
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0dd05cd6f0..40dc6e146a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -329,8 +329,8 @@ class ModUnionCheckReferences {
class EmptyMarkObjectVisitor : public MarkObjectVisitor {
public:
- mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
- void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {}
+ mirror::Object* MarkObject(mirror::Object* obj) override {return obj;}
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {}
};
void ModUnionTable::FilterCards() {
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 7a3c06a281..8c471bc6c2 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -125,33 +125,33 @@ class ModUnionTableReferenceCache : public ModUnionTable {
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ProcessCards() OVERRIDE;
+ void ProcessCards() override;
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify() OVERRIDE
+ void Verify() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ bool ContainsCardFor(uintptr_t addr) override;
- virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void SetCards() OVERRIDE;
+ void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ void ClearTable() override;
protected:
// Cleared card array, used to update the mod-union table.
@@ -172,27 +172,27 @@ class ModUnionTableCardCache : public ModUnionTable {
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- virtual void ProcessCards() OVERRIDE;
+ void ProcessCards() override;
// Mark all references to the alloc space(s).
- virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+ void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+ void VisitObjects(ObjectCallback callback, void* arg) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Nothing to verify.
- virtual void Verify() OVERRIDE {}
+ void Verify() override {}
- virtual void Dump(std::ostream& os) OVERRIDE;
+ void Dump(std::ostream& os) override;
- virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+ bool ContainsCardFor(uintptr_t addr) override;
- virtual void SetCards() OVERRIDE;
+ void SetCards() override;
- virtual void ClearTable() OVERRIDE;
+ void ClearTable() override;
protected:
// Cleared card bitmap, used to update the mod-union table.
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index d59ff71676..2a382d7df1 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -97,13 +97,13 @@ class ModUnionTableTest : public CommonRuntimeTest {
class CollectVisitedVisitor : public MarkObjectVisitor {
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update ATTRIBUTE_UNUSED) OVERRIDE
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
@@ -122,7 +122,7 @@ class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache {
space::ContinuousSpace* target_space)
: ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
- bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+ bool ShouldAddReference(const mirror::Object* ref) const override {
return target_space_->HasAddress(ref);
}
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 4b5a8c61c1..8bdf6da6fe 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -39,11 +39,14 @@ class ReadBarrierTable {
DCHECK_EQ(kHeapCapacity / kRegionSize,
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
- PROT_READ | PROT_WRITE, false, false, &error_msg);
- CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
+ mem_map_ = MemMap::MapAnonymous("read barrier table",
+ /* addr */ nullptr,
+ capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
- mem_map_.reset(mem_map);
}
void ClearForSpace(space::ContinuousSpace* space) {
uint8_t* entry_start = EntryFromAddr(space->Begin());
@@ -66,14 +69,14 @@ class ReadBarrierTable {
return entry_value == kSetEntryValue;
}
void ClearAll() {
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
void SetAll() {
- memset(mem_map_->Begin(), kSetEntryValue, mem_map_->Size());
+ memset(mem_map_.Begin(), kSetEntryValue, mem_map_.Size());
}
bool IsAllCleared() const {
- for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_->Begin());
- p < reinterpret_cast<uint32_t*>(mem_map_->End()); ++p) {
+ for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_.Begin());
+ p < reinterpret_cast<uint32_t*>(mem_map_.End()); ++p) {
if (*p != 0) {
return false;
}
@@ -90,7 +93,7 @@ class ReadBarrierTable {
uint8_t* EntryFromAddr(const void* heap_addr) const {
DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
- uint8_t* entry_addr = mem_map_->Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
+ uint8_t* entry_addr = mem_map_.Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
DCHECK(IsValidEntry(entry_addr)) << "heap_addr: " << heap_addr
<< " entry_addr: " << reinterpret_cast<void*>(entry_addr);
return entry_addr;
@@ -106,12 +109,12 @@ class ReadBarrierTable {
}
bool IsValidEntry(const uint8_t* entry_addr) const {
- uint8_t* begin = mem_map_->Begin();
- uint8_t* end = mem_map_->End();
+ uint8_t* begin = mem_map_.Begin();
+ uint8_t* end = mem_map_.End();
return entry_addr >= begin && entry_addr < end;
}
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
};
} // namespace accounting
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index ced62cd249..2946486dfb 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -49,21 +49,22 @@ size_t SpaceBitmap<kAlignment>::ComputeHeapSize(uint64_t bitmap_bytes) {
template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
- const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
- CHECK(mem_map != nullptr);
- uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map->Begin());
+ const std::string& name, MemMap&& mem_map, uint8_t* heap_begin, size_t heap_capacity) {
+ CHECK(mem_map.IsValid());
+ uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map.Begin());
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
- return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin, heap_capacity);
+ return new SpaceBitmap(
+ name, std::move(mem_map), bitmap_begin, bitmap_size, heap_begin, heap_capacity);
}
template<size_t kAlignment>
SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name,
- MemMap* mem_map,
+ MemMap&& mem_map,
uintptr_t* bitmap_begin,
size_t bitmap_size,
const void* heap_begin,
size_t heap_capacity)
- : mem_map_(mem_map),
+ : mem_map_(std::move(mem_map)),
bitmap_begin_(reinterpret_cast<Atomic<uintptr_t>*>(bitmap_begin)),
bitmap_size_(bitmap_size),
heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -83,14 +84,17 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
// (we represent one word as an `intptr_t`).
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
- PROT_READ | PROT_WRITE, false, false,
- &error_msg));
- if (UNLIKELY(mem_map.get() == nullptr)) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ bitmap_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
return nullptr;
}
- return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
+ return CreateFromMemMap(name, std::move(mem_map), heap_begin, heap_capacity);
}
template<size_t kAlignment>
@@ -114,7 +118,7 @@ std::string SpaceBitmap<kAlignment>::Dump() const {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Clear() {
if (bitmap_begin_ != nullptr) {
- mem_map_->MadviseDontNeedAndZero();
+ mem_map_.MadviseDontNeedAndZero();
}
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 1237f6e8b5..6a3faefe08 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -24,6 +24,7 @@
#include <vector>
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
namespace art {
@@ -32,7 +33,6 @@ namespace mirror {
class Class;
class Object;
} // namespace mirror
-class MemMap;
namespace gc {
namespace accounting {
@@ -50,8 +50,10 @@ class SpaceBitmap {
// Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
// mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
// Objects are kAlignement-aligned.
- static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
- uint8_t* heap_begin, size_t heap_capacity);
+ static SpaceBitmap* CreateFromMemMap(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* heap_begin,
+ size_t heap_capacity);
~SpaceBitmap();
@@ -215,7 +217,7 @@ class SpaceBitmap {
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
SpaceBitmap(const std::string& name,
- MemMap* mem_map,
+ MemMap&& mem_map,
uintptr_t* bitmap_begin,
size_t bitmap_size,
const void* heap_begin,
@@ -227,7 +229,7 @@ class SpaceBitmap {
bool Modify(const mirror::Object* obj);
// Backing storage for bitmap.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
Atomic<uintptr_t>* const bitmap_begin_;
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index a1d198652e..b9c1dc61b6 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -196,7 +196,7 @@ class AllocRecordStackVisitor : public StackVisitor {
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (trace_->GetDepth() >= max_depth_) {
return false;
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a4095d815f..0dbafde2a5 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -91,11 +91,14 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
size_t num_of_pages = footprint_ / kPageSize;
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
- page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
- RoundUp(max_num_of_pages, kPageSize),
- PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
- page_map_ = page_map_mem_map_->Begin();
+ page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
+ /* addr */ nullptr,
+ RoundUp(max_num_of_pages, kPageSize),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
+ page_map_ = page_map_mem_map_.Begin();
page_map_size_ = num_of_pages;
max_page_map_size_ = max_num_of_pages;
free_page_run_size_map_.resize(num_of_pages);
@@ -1364,8 +1367,8 @@ bool RosAlloc::Trim() {
// Zero out the tail of the page map.
uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
- DCHECK_LE(madvise_begin, page_map_mem_map_->End());
- size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
+ DCHECK_LE(madvise_begin, page_map_mem_map_.End());
+ size_t madvise_size = page_map_mem_map_.End() - madvise_begin;
if (madvise_size > 0) {
DCHECK_ALIGNED(madvise_begin, kPageSize);
DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 30213d55c5..056216724d 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -31,13 +31,12 @@
#include "base/allocator.h"
#include "base/bit_utils.h"
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "thread.h"
namespace art {
-class MemMap;
-
namespace gc {
namespace allocator {
@@ -746,7 +745,7 @@ class RosAlloc {
volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
size_t page_map_size_;
size_t max_page_map_size_;
- std::unique_ptr<MemMap> page_map_mem_map_;
+ MemMap page_map_mem_map_;
// The table that indicates the size of free page runs. These sizes
// are stored here to avoid storing in the free page header and
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 783f2fc4bc..3095f9f679 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -25,6 +25,7 @@
#include "gc/space/region_space.h"
#include "gc/verification.h"
#include "lock_word.h"
+#include "mirror/class.h"
#include "mirror/object-readbarrier-inl.h"
namespace art {
@@ -35,6 +36,25 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
Thread* const self,
mirror::Object* ref,
accounting::ContinuousSpaceBitmap* bitmap) {
+ if (kEnableGenerationalConcurrentCopyingCollection
+ && young_gen_
+ && !done_scanning_.load(std::memory_order_acquire)) {
+ // Everything in the unevac space should be marked for generational CC except for large objects.
+ DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " "
+ << ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
+ // Since the mark bitmap is still filled in from last GC, we can not use that or else the
+ // mutator may see references to the from space. Instead, use the baker pointer itself as
+ // the mark bit.
+ if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
+ // TODO: We don't actually need to scan this object later, we just need to clear the gray
+ // bit.
+ // TODO: We could also set the mark bit here for "free" since this case comes from the
+ // read barrier.
+ PushOntoMarkStack(self, ref);
+ }
+ DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
+ return ref;
+ }
// For the Baker-style RB, in a rare case, we could incorrectly change the object from non-gray
// (black) to gray even though the object has already been marked through. This happens if a
// mutator thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
@@ -103,11 +123,13 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(Thread* const self,
return ref;
}
-template<bool kGrayImmuneObject, bool kFromGCThread>
+template<bool kGrayImmuneObject, bool kNoUnEvac, bool kFromGCThread>
inline mirror::Object* ConcurrentCopying::Mark(Thread* const self,
mirror::Object* from_ref,
mirror::Object* holder,
MemberOffset offset) {
+ // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
if (from_ref == nullptr) {
return nullptr;
}
@@ -149,6 +171,14 @@ inline mirror::Object* ConcurrentCopying::Mark(Thread* const self,
return to_ref;
}
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
+ if (kEnableGenerationalConcurrentCopyingCollection
+ && kNoUnEvac
+ && !region_space_->IsLargeObject(from_ref)) {
+ if (!kFromGCThread) {
+ DCHECK(IsMarkedInUnevacFromSpace(from_ref)) << "Returning unmarked object to mutator";
+ }
+ return from_ref;
+ }
return MarkUnevacFromSpaceRegion(self, from_ref, region_space_bitmap_);
default:
// The reference is in an unused region. Remove memory protection from
@@ -179,7 +209,8 @@ inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* fr
if (UNLIKELY(mark_from_read_barrier_measurements_)) {
ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
} else {
- ret = Mark(self, from_ref);
+ ret = Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self,
+ from_ref);
}
// Only set the mark bit for baker barrier.
if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
@@ -211,6 +242,11 @@ inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_re
// Use load-acquire on the read barrier pointer to ensure that we never see a black (non-gray)
// read barrier state with an unmarked bit due to reordering.
DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
+ if (kEnableGenerationalConcurrentCopyingCollection
+ && young_gen_
+ && !done_scanning_.load(std::memory_order_acquire)) {
+ return from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState();
+ }
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7688b546d9..b4453d97b2 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -60,10 +60,13 @@ static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
// Slow path mark stack size, increase this if the stack is getting full and it is causing
// performance problems.
static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
+// Size (in the number of objects) of the sweep array free buffer.
+static constexpr size_t kSweepArrayChunkFreeSize = 1024;
// Verify that there are no missing card marks.
static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
ConcurrentCopying::ConcurrentCopying(Heap* heap,
+ bool young_gen,
const std::string& name_prefix,
bool measure_read_barrier_slow_path)
: GarbageCollector(heap,
@@ -90,6 +93,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
from_space_num_bytes_at_first_pause_(0),
mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
+ young_gen_(young_gen),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
mark_from_read_barrier_measurements_(false),
@@ -107,6 +111,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
kMarkSweepMarkStackLock) {
static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
"The region space size and the read barrier table region size must match");
+ CHECK(kEnableGenerationalConcurrentCopyingCollection || !young_gen_);
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -125,6 +130,19 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
pooled_mark_stacks_.push_back(mark_stack);
}
}
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ // Allocate sweep array free buffer.
+ std::string error_msg;
+ sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
+ "concurrent copying sweep array free buffer",
+ /* addr */ nullptr,
+ RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(sweep_array_free_buffer_mem_map_.IsValid())
+ << "Couldn't allocate sweep array free buffer: " << error_msg;
+ }
}
void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
@@ -211,7 +229,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closu
explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -232,7 +250,7 @@ class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure
explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
@@ -268,11 +286,36 @@ void ConcurrentCopying::BindBitmaps() {
space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
CHECK(space->IsZygoteSpace() || space->IsImageSpace());
immune_spaces_.AddSpace(space);
- } else if (space == region_space_) {
- // It is OK to clear the bitmap with mutators running since the only place it is read is
- // VisitObjects which has exclusion with CC.
- region_space_bitmap_ = region_space_->GetMarkBitmap();
- region_space_bitmap_->Clear();
+ } else {
+ CHECK(!space->IsZygoteSpace());
+ CHECK(!space->IsImageSpace());
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ if (space == region_space_) {
+ region_space_bitmap_ = region_space_->GetMarkBitmap();
+ } else if (young_gen_ && space->IsContinuousMemMapAllocSpace()) {
+ DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
+ space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
+ }
+ // Age all of the cards for the region space so that we know which evac regions to scan.
+ Runtime::Current()->GetHeap()->GetCardTable()->ModifyCardsAtomic(
+ space->Begin(),
+ space->End(),
+ AgeCardVisitor(),
+ VoidFunctor());
+ } else {
+ if (space == region_space_) {
+ // It is OK to clear the bitmap with mutators running since the only place it is read is
+ // VisitObjects which has exclusion with CC.
+ region_space_bitmap_ = region_space_->GetMarkBitmap();
+ region_space_bitmap_->Clear();
+ }
+ }
+ }
+ }
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
+ CHECK(space->IsLargeObjectSpace());
+ space->AsLargeObjectSpace()->CopyLiveToMarked();
}
}
}
@@ -304,12 +347,14 @@ void ConcurrentCopying::InitializePhase() {
bytes_moved_gc_thread_ = 0;
objects_moved_gc_thread_ = 0;
GcCause gc_cause = GetCurrentIteration()->GetGcCause();
- if (gc_cause == kGcCauseExplicit ||
- gc_cause == kGcCauseCollectorTransition ||
- GetCurrentIteration()->GetClearSoftReferences()) {
- force_evacuate_all_ = true;
- } else {
- force_evacuate_all_ = false;
+
+ force_evacuate_all_ = false;
+ if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+ if (gc_cause == kGcCauseExplicit ||
+ gc_cause == kGcCauseCollectorTransition ||
+ GetCurrentIteration()->GetClearSoftReferences()) {
+ force_evacuate_all_ = true;
+ }
}
if (kUseBakerReadBarrier) {
updated_all_immune_objects_.store(false, std::memory_order_relaxed);
@@ -320,9 +365,13 @@ void ConcurrentCopying::InitializePhase() {
DCHECK(immune_gray_stack_.empty());
}
}
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ done_scanning_.store(false, std::memory_order_release);
+ }
BindBitmaps();
if (kVerboseMode) {
- LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
+ LOG(INFO) << "young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha;
+ LOG(INFO) << "force_evacuate_all=" << std::boolalpha << force_evacuate_all_ << std::noboolalpha;
LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
<< "-" << immune_spaces_.GetLargestImmuneRegion().End();
for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
@@ -330,6 +379,9 @@ void ConcurrentCopying::InitializePhase() {
}
LOG(INFO) << "GC end of InitializePhase";
}
+ if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+ region_space_bitmap_->Clear();
+ }
// Mark all of the zygote large objects without graying them.
MarkZygoteLargeObjects();
}
@@ -341,7 +393,7 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -415,7 +467,7 @@ class ConcurrentCopying::FlipCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
ConcurrentCopying* cc = concurrent_copying_;
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
@@ -425,9 +477,18 @@ class ConcurrentCopying::FlipCallback : public Closure {
}
CHECK_EQ(thread, self);
Locks::mutator_lock_->AssertExclusiveHeld(self);
+ space::RegionSpace::EvacMode evac_mode = space::RegionSpace::kEvacModeLivePercentNewlyAllocated;
+ if (cc->young_gen_) {
+ CHECK(!cc->force_evacuate_all_);
+ evac_mode = space::RegionSpace::kEvacModeNewlyAllocated;
+ } else if (cc->force_evacuate_all_) {
+ evac_mode = space::RegionSpace::kEvacModeForceAll;
+ }
{
TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
- cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
+ // Only change live bytes for full CC.
+ cc->region_space_->SetFromSpace(
+ cc->rb_table_, evac_mode, /*clear_live_bytes*/ !cc->young_gen_);
}
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -438,7 +499,7 @@ class ConcurrentCopying::FlipCallback : public Closure {
cc->is_marking_ = true;
cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal,
std::memory_order_relaxed);
- if (kIsDebugBuild) {
+ if (kIsDebugBuild && !cc->young_gen_) {
cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
}
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -596,7 +657,8 @@ void ConcurrentCopying::VerifyNoMissingCardMarks() {
auto visitor = [&](mirror::Object* obj)
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_) {
- // Objects not on dirty or aged cards should never have references to newly allocated regions.
+ // Objects on clean cards should never have references to newly allocated regions. Note
+ // that aged cards are also not clean.
if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
@@ -752,7 +814,13 @@ inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
DCHECK(obj != nullptr);
DCHECK(immune_spaces_.ContainsObject(obj));
// Update the fields without graying it or pushing it onto the mark stack.
- Scan(obj);
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ // Young GC does not care about references to unevac space. It is safe to not gray these as
+ // long as scan immune objects happens after scanning the dirty cards.
+ Scan<true>(obj);
+ } else {
+ Scan<false>(obj);
+ }
}
class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
@@ -803,7 +871,70 @@ void ConcurrentCopying::MarkingPhase() {
if (kUseBakerReadBarrier) {
gc_grays_immune_objects_ = false;
}
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ if (kVerboseMode) {
+ LOG(INFO) << "GC ScanCardsForSpace";
+ }
+ TimingLogger::ScopedTiming split2("ScanCardsForSpace", GetTimings());
+ WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ CHECK(!done_scanning_.load(std::memory_order_relaxed));
+ if (kIsDebugBuild) {
+ // Leave some time for mutators to race ahead to try and find races between the GC card
+ // scanning and mutators reading references.
+ usleep(10 * 1000);
+ }
+ for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace() || space->IsZygoteSpace()) {
+ // Image and zygote spaces are already handled since we gray the objects in the pause.
+ continue;
+ }
+ // Scan all of the objects on dirty cards in unevac from space, and non moving space. These
+ // are from previous GCs and may reference things in the from space.
+ //
+ // Note that we do not need to process the large-object space (the only discontinuous space)
+ // as it contains only large string objects and large primitive array objects, that have no
+ // reference to other objects, except their class. There is no need to scan these large
+ // objects, as the String class and the primitive array classes are expected to never move
+ // during a minor (young-generation) collection:
+ // - In the case where we run with a boot image, these classes are part of the image space,
+ // which is an immune space.
+ // - In the case where we run without a boot image, these classes are allocated in the region
+ // space (main space), but they are not expected to move during a minor collection (this
+ // would only happen if those classes were allocated between a major and a minor
+ // collections, which is unlikely -- we don't expect any GC to happen before these
+ // fundamental classes are initialized). Note that these classes could move during a major
+ // collection though, but this is fine: in that case, the whole heap is traced and the card
+ // table logic below is not used.
+ Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
+ space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ [this, space](mirror::Object* obj)
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Don't push or gray unevac refs.
+ if (kIsDebugBuild && space == region_space_) {
+ // We may get unevac large objects.
+ if (!region_space_->IsInUnevacFromSpace(obj)) {
+ CHECK(region_space_bitmap_->Test(obj));
+ region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
+ LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+ }
+ }
+ Scan<true>(obj);
+ },
+ accounting::CardTable::kCardDirty - 1);
+ }
+ // Done scanning unevac space.
+ done_scanning_.store(true, std::memory_order_release);
+ if (kVerboseMode) {
+ LOG(INFO) << "GC end of ScanCardsForSpace";
+ }
+ }
{
+ // For a sticky-bit collection, this phase needs to be after the card scanning since the
+ // mutator may read an unevac space object out of an image object. If the image object is no
+ // longer gray it will trigger a read barrier for the unevac space object.
TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
for (auto& space : immune_spaces_.GetSpaces()) {
DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
@@ -956,7 +1087,7 @@ class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -980,7 +1111,7 @@ class ConcurrentCopying::DisableMarkingCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a race with ThreadList::Register().
CHECK(concurrent_copying_->is_marking_);
@@ -1175,7 +1306,7 @@ class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor
}
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
operator()(root);
}
@@ -1202,7 +1333,7 @@ class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
- this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+ this->operator()(ObjPtr<mirror::Object>(ref), mirror::Reference::ReferentOffset(), false);
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
@@ -1341,7 +1472,7 @@ class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
disable_weak_ref_access_(disable_weak_ref_access) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1520,18 +1651,40 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
<< " " << to_ref << " " << to_ref->GetReadBarrierState()
<< " is_marked=" << IsMarked(to_ref);
}
+ space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
bool add_to_live_bytes = false;
- if (region_space_->IsInUnevacFromSpace(to_ref)) {
+ // Invariant: There should be no object from a newly-allocated
+ // region (either large or non-large) on the mark stack.
+ DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref;
+ if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
// Mark the bitmap only in the GC thread here so that we don't need a CAS.
- if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
+ if (!kUseBakerReadBarrier ||
+ !region_space_bitmap_->Set(to_ref)) {
// It may be already marked if we accidentally pushed the same object twice due to the racy
// bitmap read in MarkUnevacFromSpaceRegion.
- Scan(to_ref);
- // Only add to the live bytes if the object was not already marked.
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ CHECK(region_space_->IsLargeObject(to_ref));
+ region_space_->ZeroLiveBytesForLargeObject(to_ref);
+ Scan<true>(to_ref);
+ } else {
+ Scan<false>(to_ref);
+ }
+ // Only add to the live bytes if the object was not already marked and we are not the young
+ // GC.
add_to_live_bytes = true;
}
} else {
- Scan(to_ref);
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
+ // Copied to to-space, set the bit so that the next GC can scan objects.
+ region_space_bitmap_->Set(to_ref);
+ }
+ }
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ Scan<true>(to_ref);
+ } else {
+ Scan<false>(to_ref);
+ }
}
if (kUseBakerReadBarrier) {
DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
@@ -1589,7 +1742,7 @@ class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
: concurrent_copying_(concurrent_copying) {
}
- void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
// This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
// to avoid a deadlock b/31500969.
CHECK(concurrent_copying_->weak_ref_access_enabled_);
@@ -1674,29 +1827,126 @@ void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
}
void ConcurrentCopying::Sweep(bool swap_bitmaps) {
- {
- TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
- accounting::ObjectStack* live_stack = heap_->GetLiveStack();
- if (kEnableFromSpaceAccountingCheck) {
- CHECK_GE(live_stack_freeze_size_, live_stack->Size());
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ // Only sweep objects on the live stack.
+ SweepArray(heap_->GetLiveStack(), /* swap_bitmaps */ false);
+ } else {
+ {
+ TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ if (kEnableFromSpaceAccountingCheck) {
+ // Ensure that nobody inserted items in the live stack after we swapped the stacks.
+ CHECK_GE(live_stack_freeze_size_, live_stack->Size());
+ }
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ }
+ CheckEmptyMarkStack();
+ TimingLogger::ScopedTiming split("Sweep", GetTimings());
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsContinuousMemMapAllocSpace()) {
+ space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
+ if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
+ continue;
+ }
+ TimingLogger::ScopedTiming split2(
+ alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
+ RecordFree(alloc_space->Sweep(swap_bitmaps));
+ }
}
- heap_->MarkAllocStackAsLive(live_stack);
- live_stack->Reset();
+ SweepLargeObjects(swap_bitmaps);
}
+}
+
+// Copied and adapted from MarkSweep::SweepArray.
+void ConcurrentCopying::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
+ // This method is only used when Generational CC collection is enabled.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection);
CheckEmptyMarkStack();
- TimingLogger::ScopedTiming split("Sweep", GetTimings());
- for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->IsContinuousMemMapAllocSpace()) {
- space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
- if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
+ TimingLogger::ScopedTiming t("SweepArray", GetTimings());
+ Thread* self = Thread::Current();
+ mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
+ sweep_array_free_buffer_mem_map_.BaseBegin());
+ size_t chunk_free_pos = 0;
+ ObjectBytePair freed;
+ ObjectBytePair freed_los;
+ // How many objects are left in the array, modified after each space is swept.
+ StackReference<mirror::Object>* objects = allocations->Begin();
+ size_t count = allocations->Size();
+ // Start by sweeping the continuous spaces.
+ for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
+ if (!space->IsAllocSpace() ||
+ space == region_space_ ||
+ immune_spaces_.ContainsSpace(space) ||
+ space->GetLiveBitmap() == nullptr) {
+ continue;
+ }
+ space::AllocSpace* alloc_space = space->AsAllocSpace();
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ if (swap_bitmaps) {
+ std::swap(live_bitmap, mark_bitmap);
+ }
+ StackReference<mirror::Object>* out = objects;
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object* const obj = objects[i].AsMirrorPtr();
+ if (kUseThreadLocalAllocationStack && obj == nullptr) {
continue;
}
- TimingLogger::ScopedTiming split2(
- alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
- RecordFree(alloc_space->Sweep(swap_bitmaps));
+ if (space->HasAddress(obj)) {
+ // This object is in the space, remove it from the array and add it to the sweep buffer
+ // if needed.
+ if (!mark_bitmap->Test(obj)) {
+ if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
+ TimingLogger::ScopedTiming t2("FreeList", GetTimings());
+ freed.objects += chunk_free_pos;
+ freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
+ chunk_free_pos = 0;
+ }
+ chunk_free_buffer[chunk_free_pos++] = obj;
+ }
+ } else {
+ (out++)->Assign(obj);
+ }
+ }
+ if (chunk_free_pos > 0) {
+ TimingLogger::ScopedTiming t2("FreeList", GetTimings());
+ freed.objects += chunk_free_pos;
+ freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
+ chunk_free_pos = 0;
}
+ // All of the references which space contained are no longer in the allocation stack, update
+ // the count.
+ count = out - objects;
}
- SweepLargeObjects(swap_bitmaps);
+ // Handle the large object space.
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ if (large_object_space != nullptr) {
+ accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+ accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
+ if (swap_bitmaps) {
+ std::swap(large_live_objects, large_mark_objects);
+ }
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object* const obj = objects[i].AsMirrorPtr();
+ // Handle large objects.
+ if (kUseThreadLocalAllocationStack && obj == nullptr) {
+ continue;
+ }
+ if (!large_mark_objects->Test(obj)) {
+ ++freed_los.objects;
+ freed_los.bytes += large_object_space->Free(self, obj);
+ }
+ }
+ }
+ {
+ TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
+ RecordFree(freed);
+ RecordFreeLOS(freed_los);
+ t2.NewTiming("ResetStack");
+ allocations->Reset();
+ }
+ sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
}
void ConcurrentCopying::MarkZygoteLargeObjects() {
@@ -1806,7 +2056,7 @@ void ConcurrentCopying::ReclaimPhase() {
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Sweep(false);
+ Sweep(/* swap_bitmaps */ false);
SwapBitmaps();
heap_->UnBindBitmaps();
@@ -1876,6 +2126,7 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
// Remove memory protection from the region space and log debugging information.
region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
+ Thread::Current()->DumpJavaStack(LOG_STREAM(FATAL_WITHOUT_ABORT));
}
CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
} else {
@@ -1890,12 +2141,33 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
if (obj != nullptr) {
LogFromSpaceRefHolder(obj, offset);
+ LOG(FATAL_WITHOUT_ABORT) << "UNEVAC " << region_space_->IsInUnevacFromSpace(obj) << " "
+ << obj << " " << obj->GetMarkBit();
+ if (region_space_->HasAddress(obj)) {
+ region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
+ }
+ LOG(FATAL_WITHOUT_ABORT) << "CARD " << static_cast<size_t>(
+ *Runtime::Current()->GetHeap()->GetCardTable()->CardFromAddr(
+ reinterpret_cast<uint8_t*>(obj)));
+ if (region_space_->HasAddress(obj)) {
+ LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << region_space_bitmap_->Test(obj);
+ } else {
+ accounting::ContinuousSpaceBitmap* mark_bitmap =
+ heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
+ if (mark_bitmap != nullptr) {
+ LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << mark_bitmap->Test(obj);
+ } else {
+ accounting::LargeObjectBitmap* los_bitmap =
+ heap_mark_bitmap_->GetLargeObjectBitmap(obj);
+ LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << los_bitmap->Test(obj);
+ }
+ }
}
ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
LOG(FATAL) << "Invalid reference " << ref
<< " referenced from object " << obj << " at offset " << offset;
}
@@ -1988,7 +2260,7 @@ void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
LOG(FATAL) << "Invalid reference " << ref;
}
} else {
@@ -2076,24 +2348,31 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o
// If `ref` is on the allocation stack, then it may not be
// marked live, but considered marked/alive (but not
// necessarily on the live stack).
- CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack."
- << " obj=" << obj
- << " ref=" << ref
- << " is_los=" << std::boolalpha << is_los << std::noboolalpha;
+ CHECK(IsOnAllocStack(ref))
+ << "Unmarked ref that's not on the allocation stack."
+ << " obj=" << obj
+ << " ref=" << ref
+ << " is_los=" << std::boolalpha << is_los << std::noboolalpha
+ << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha;
}
}
}
// Used to scan ref fields of an object.
+template <bool kNoUnEvac>
class ConcurrentCopying::RefFieldsVisitor {
public:
explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread)
- : collector_(collector), thread_(thread) {}
+ : collector_(collector), thread_(thread) {
+ // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+ }
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
- collector_->Process(obj, offset);
+ collector_->Process<kNoUnEvac>(obj, offset);
}
void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
@@ -2121,7 +2400,10 @@ class ConcurrentCopying::RefFieldsVisitor {
Thread* const thread_;
};
+template <bool kNoUnEvac>
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
+ // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
// Avoid all read barriers during visit references to help performance.
// Don't do this in transaction mode because we may read the old value of an field which may
@@ -2130,7 +2412,7 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
}
DCHECK(!region_space_->IsInFromSpace(to_ref));
DCHECK_EQ(Thread::Current(), thread_running_gc_);
- RefFieldsVisitor visitor(this, thread_running_gc_);
+ RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
@@ -2139,11 +2421,14 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
}
}
+template <bool kNoUnEvac>
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
+ // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
DCHECK_EQ(Thread::Current(), thread_running_gc_);
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(
+ mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, kNoUnEvac, /*kFromGCThread*/true>(
thread_running_gc_,
ref,
/*holder*/ obj,
@@ -2423,7 +2708,8 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self,
accounting::ContinuousSpaceBitmap* mark_bitmap =
heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
CHECK(mark_bitmap != nullptr);
- CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
+ bool previously_marked_in_bitmap = mark_bitmap->AtomicTestAndSet(to_ref);
+ CHECK(!previously_marked_in_bitmap);
}
}
DCHECK(to_ref != nullptr);
@@ -2620,6 +2906,28 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
accounting::LargeObjectBitmap* los_bitmap =
heap_mark_bitmap_->GetLargeObjectBitmap(ref);
bool is_los = mark_bitmap == nullptr;
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ // The sticky-bit CC collector is only compatible with Baker-style read barriers.
+ DCHECK(kUseBakerReadBarrier);
+ // Not done scanning, use AtomicSetReadBarrierPointer.
+ if (!done_scanning_.load(std::memory_order_acquire)) {
+ // Since the mark bitmap is still filled in from last GC, we can not use that or else the
+ // mutator may see references to the from space. Instead, use the Baker pointer itself as
+ // the mark bit.
+ if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
+ // TODO: We don't actually need to scan this object later, we just need to clear the gray
+ // bit.
+ // Also make sure the object is marked.
+ if (is_los) {
+ los_bitmap->AtomicTestAndSet(ref);
+ } else {
+ mark_bitmap->AtomicTestAndSet(ref);
+ }
+ PushOntoMarkStack(self, ref);
+ }
+ return ref;
+ }
+ }
if (!is_los && mark_bitmap->Test(ref)) {
// Already marked.
} else if (is_los && los_bitmap->Test(ref)) {
@@ -2696,7 +3004,7 @@ void ConcurrentCopying::FinishPhase() {
}
// kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
// positives.
- if (!kVerifyNoMissingCardMarks) {
+ if (!kEnableGenerationalConcurrentCopyingCollection && !kVerifyNoMissingCardMarks) {
TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
// We do not currently use the region space cards at all, madvise them away to save ram.
heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
@@ -2804,7 +3112,8 @@ mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(Thread* c
}
ScopedTrace tr(__FUNCTION__);
const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
- mirror::Object* ret = Mark(self, from_ref);
+ mirror::Object* ret =
+ Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, from_ref);
if (measure_read_barrier_slow_path_) {
rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 448525d013..1a7464a05f 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -66,11 +66,12 @@ class ConcurrentCopying : public GarbageCollector {
static constexpr bool kGrayDirtyImmuneObjects = true;
explicit ConcurrentCopying(Heap* heap,
+ bool young_gen,
const std::string& name_prefix = "",
bool measure_read_barrier_slow_path = false);
~ConcurrentCopying();
- virtual void RunPhases() OVERRIDE
+ void RunPhases() override
REQUIRES(!immune_gray_stack_lock_,
!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
@@ -86,13 +87,15 @@ class ConcurrentCopying : public GarbageCollector {
void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
- return kGcTypePartial;
+ GcType GetGcType() const override {
+ return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+ ? kGcTypeSticky
+ : kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ CollectorType GetCollectorType() const override {
return kCollectorTypeCC;
}
- virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
+ void RevokeAllThreadLocalBuffers() override;
void SetRegionSpace(space::RegionSpace* region_space) {
DCHECK(region_space != nullptr);
region_space_ = region_space;
@@ -110,8 +113,8 @@ class ConcurrentCopying : public GarbageCollector {
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
- template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
// Mark object `from_ref`, copying it to the to-space if needed.
+ template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false>
ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
mirror::Object* from_ref,
mirror::Object* holder = nullptr,
@@ -141,7 +144,7 @@ class ConcurrentCopying : public GarbageCollector {
void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
+ mirror::Object* IsMarked(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -155,27 +158,30 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
// Scan the reference fields of object `to_ref`.
+ template <bool kNoUnEvac>
void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
// Process a field.
+ template <bool kNoUnEvac>
void Process(mirror::Object* obj, MemberOffset offset)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -199,26 +205,32 @@ class ConcurrentCopying : public GarbageCollector {
void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
- ObjPtr<mirror::Reference> reference) OVERRIDE
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) override
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
+ mirror::Object* MarkObject(mirror::Object* from_ref) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
- bool do_atomic_update) OVERRIDE
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
- bool do_atomic_update) OVERRIDE
+ bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) override
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
+ // Sweep unmarked objects to complete the garbage collection. Full GCs sweep
+ // all allocation spaces (except the region space). Sticky-bit GCs just sweep
+ // a subset of the heap.
void Sweep(bool swap_bitmaps)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ // Sweep only pointers within an array.
+ void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void SweepLargeObjects(bool swap_bitmaps)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void MarkZygoteLargeObjects()
@@ -282,7 +294,7 @@ class ConcurrentCopying : public GarbageCollector {
mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
+ void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
// Set the read barrier mark entrypoints to non-null.
void ActivateReadBarrierEntrypoints();
@@ -347,6 +359,12 @@ class ConcurrentCopying : public GarbageCollector {
Atomic<uint64_t> cumulative_bytes_moved_;
Atomic<uint64_t> cumulative_objects_moved_;
+ // Generational "sticky", only trace through dirty objects in region space.
+ const bool young_gen_;
+ // If true, the GC thread is done scanning marked objects on dirty and aged
+ // card (see ConcurrentCopying::MarkingPhase).
+ Atomic<bool> done_scanning_;
+
// The skipped blocks are memory blocks/chucks that were copies of
// objects that were unused due to lost races (cas failures) at
// object copy/forward pointer install. They are reused.
@@ -381,6 +399,11 @@ class ConcurrentCopying : public GarbageCollector {
// ObjPtr since the GC may transition to suspended and runnable between phases.
mirror::Class* java_lang_Object_;
+ // Sweep array free buffer, used to sweep the spaces based on an array more
+ // efficiently, by recording dead objects to be freed in batches (see
+ // ConcurrentCopying::SweepArray).
+ MemMap sweep_array_free_buffer_mem_map_;
+
class ActivateReadBarrierEntrypointsCallback;
class ActivateReadBarrierEntrypointsCheckpoint;
class AssertToSpaceInvariantFieldVisitor;
@@ -394,7 +417,7 @@ class ConcurrentCopying : public GarbageCollector {
template <bool kConcurrent> class GrayImmuneObjectVisitor;
class ImmuneSpaceScanObjVisitor;
class LostCopyVisitor;
- class RefFieldsVisitor;
+ template <bool kNoUnEvac> class RefFieldsVisitor;
class RevokeThreadLocalMarkStackCheckpoint;
class ScopedGcGraysImmuneObjects;
class ThreadFlipVisitor;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 9767807fb8..7bd87bda7a 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -40,22 +40,22 @@ class DummyOatFile : public OatFile {
class DummyImageSpace : public space::ImageSpace {
public:
- DummyImageSpace(MemMap* map,
- accounting::ContinuousSpaceBitmap* live_bitmap,
+ DummyImageSpace(MemMap&& map,
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
std::unique_ptr<DummyOatFile>&& oat_file,
- std::unique_ptr<MemMap>&& oat_map)
+ MemMap&& oat_map)
: ImageSpace("DummyImageSpace",
/*image_location*/"",
- map,
- live_bitmap,
- map->End()),
+ std::move(map),
+ std::move(live_bitmap),
+ map.End()),
oat_map_(std::move(oat_map)) {
oat_file_ = std::move(oat_file);
oat_file_non_owned_ = oat_file_.get();
}
private:
- std::unique_ptr<MemMap> oat_map_;
+ MemMap oat_map_;
};
class ImmuneSpacesTest : public CommonRuntimeTest {
@@ -83,39 +83,37 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
uint8_t* oat_begin,
size_t oat_size) {
std::string error_str;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
- image_begin,
- image_size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- &error_str));
- if (map == nullptr) {
+ MemMap map = MemMap::MapAnonymous("DummyImageSpace",
+ image_begin,
+ image_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ &error_str);
+ if (!map.IsValid()) {
LOG(ERROR) << error_str;
return nullptr;
}
CHECK(!live_bitmaps_.empty());
std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
live_bitmaps_.pop_back();
- std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
- oat_begin,
- oat_size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- &error_str));
- if (oat_map == nullptr) {
+ MemMap oat_map = MemMap::MapAnonymous("OatMap",
+ oat_begin,
+ oat_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ &error_str);
+ if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
return nullptr;
}
- std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
+ std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
- new (map->Begin()) ImageHeader(
- /*image_begin*/PointerToLowMemUInt32(map->Begin()),
- /*image_size*/map->Size(),
+ new (map.Begin()) ImageHeader(
+ /*image_begin*/PointerToLowMemUInt32(map.Begin()),
+ /*image_size*/map.Size(),
sections,
- /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
+ /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
/*oat_checksum*/0u,
// The oat file data in the header is always right after the image space.
/*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
@@ -131,8 +129,8 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
/*is_pic*/false,
ImageHeader::kStorageModeUncompressed,
/*storage_size*/0u);
- return new DummyImageSpace(map.release(),
- live_bitmap.release(),
+ return new DummyImageSpace(std::move(map),
+ std::move(live_bitmap),
std::move(oat_file),
std::move(oat_map));
}
@@ -141,18 +139,17 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
// returned address.
static uint8_t* GetContinuousMemoryRegion(size_t size) {
std::string error_str;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
- nullptr,
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- &error_str));
- if (map == nullptr) {
+ MemMap map = MemMap::MapAnonymous("reserve",
+ /* addr */ nullptr,
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ true,
+ &error_str);
+ if (!map.IsValid()) {
LOG(ERROR) << "Failed to allocate memory region " << error_str;
return nullptr;
}
- return map->Begin();
+ return map.Begin();
}
private:
@@ -170,19 +167,19 @@ class DummySpace : public space::ContinuousSpace {
end,
/*limit*/end) {}
- space::SpaceType GetType() const OVERRIDE {
+ space::SpaceType GetType() const override {
return space::kSpaceTypeMallocSpace;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 23359640fe..23b2719bf0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -103,12 +103,15 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
is_concurrent_(is_concurrent),
live_stack_freeze_size_(0) {
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous(
- "mark sweep sweep array free buffer", nullptr,
+ sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
+ "mark sweep sweep array free buffer",
+ /* addr */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
- PROT_READ | PROT_WRITE, false, false, &error_msg);
- CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
- sweep_array_free_buffer_mem_map_.reset(mem_map);
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(sweep_array_free_buffer_mem_map_.IsValid())
+ << "Couldn't allocate sweep array free buffer: " << error_msg;
}
void MarkSweep::InitializePhase() {
@@ -443,7 +446,7 @@ class MarkSweep::MarkObjectSlowPath {
!large_object_space->Contains(obj)))) {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
// which is called from LOG(FATAL) but before the abort message.
@@ -575,7 +578,7 @@ class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
public:
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
@@ -604,7 +607,7 @@ class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
- void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// See if the root is on any space bitmap.
auto* heap = Runtime::Current()->GetHeap();
@@ -1106,8 +1109,7 @@ class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
public:
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
- virtual mirror::Object* IsMarked(mirror::Object* obj)
- OVERRIDE
+ mirror::Object* IsMarked(mirror::Object* obj) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
@@ -1141,7 +1143,7 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1151,14 +1153,14 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
}
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
ScopedTrace trace("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* const self = Thread::Current();
@@ -1207,7 +1209,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Thread* self = Thread::Current();
mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
- sweep_array_free_buffer_mem_map_->BaseBegin());
+ sweep_array_free_buffer_mem_map_.BaseBegin());
size_t chunk_free_pos = 0;
ObjectBytePair freed;
ObjectBytePair freed_los;
@@ -1300,7 +1302,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
t2.NewTiming("ResetStack");
allocations->Reset();
}
- sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
+ sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
}
void MarkSweep::Sweep(bool swap_bitmaps) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 5e0fe0607f..ff9597cfe7 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -57,7 +57,7 @@ class MarkSweep : public GarbageCollector {
~MarkSweep() {}
- virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
+ void RunPhases() override REQUIRES(!mark_stack_lock_);
void InitializePhase();
void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -72,11 +72,11 @@ class MarkSweep : public GarbageCollector {
return is_concurrent_;
}
- virtual GcType GetGcType() const OVERRIDE {
+ GcType GetGcType() const override {
return kGcTypeFull;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ CollectorType GetCollectorType() const override {
return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
}
@@ -187,25 +187,25 @@ class MarkSweep : public GarbageCollector {
void VerifyIsLive(const mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
- size_t count,
- const RootInfo& info) OVERRIDE
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object.
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ mirror::Object* MarkObject(mirror::Object* obj) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -215,8 +215,8 @@ class MarkSweep : public GarbageCollector {
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
- bool do_atomic_update) OVERRIDE
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -231,7 +231,7 @@ class MarkSweep : public GarbageCollector {
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
@@ -278,8 +278,7 @@ class MarkSweep : public GarbageCollector {
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void ProcessMarkStack()
- OVERRIDE
+ void ProcessMarkStack() override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -351,7 +350,10 @@ class MarkSweep : public GarbageCollector {
// Verification.
size_t live_stack_freeze_size_;
- std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
+ // Sweep array free buffer, used to sweep the spaces based on an array more
+ // efficiently, by recording dead objects to be freed in batches (see
+ // MarkSweep::SweepArray).
+ MemMap sweep_array_free_buffer_mem_map_;
private:
class CardScanTask;
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 8b0d3ddf42..76c44a35bb 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,7 @@ namespace collector {
class PartialMarkSweep : public MarkSweep {
public:
// Virtual as overridden by StickyMarkSweep.
- virtual GcType GetGcType() const OVERRIDE {
+ GcType GetGcType() const override {
return kGcTypePartial;
}
@@ -37,7 +37,7 @@ class PartialMarkSweep : public MarkSweep {
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index d1d45c8df6..bb42be6cd4 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,7 @@ class SemiSpace : public GarbageCollector {
~SemiSpace() {}
- virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
virtual void InitializePhase();
virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
@@ -72,10 +72,10 @@ class SemiSpace : public GarbageCollector {
virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
void MarkReachableObjects()
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const OVERRIDE {
+ GcType GetGcType() const override {
return kGcTypePartial;
}
- virtual CollectorType GetCollectorType() const OVERRIDE {
+ CollectorType GetCollectorType() const override {
return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
}
@@ -106,11 +106,11 @@ class SemiSpace : public GarbageCollector {
void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+ mirror::Object* MarkObject(mirror::Object* root) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
- bool do_atomic_update) OVERRIDE
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
+ bool do_atomic_update) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void ScanObject(mirror::Object* obj)
@@ -145,11 +145,12 @@ class SemiSpace : public GarbageCollector {
void SweepSystemWeaks()
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) OVERRIDE
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
@@ -162,12 +163,12 @@ class SemiSpace : public GarbageCollector {
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
- virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
- bool do_atomic_update) OVERRIDE
+ bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ bool do_atomic_update) override
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 45f912f63a..f65413d153 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -24,17 +24,16 @@ namespace art {
namespace gc {
namespace collector {
-class StickyMarkSweep FINAL : public PartialMarkSweep {
+class StickyMarkSweep final : public PartialMarkSweep {
public:
- GcType GetGcType() const OVERRIDE {
+ GcType GetGcType() const override {
return kGcTypeSticky;
}
StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
~StickyMarkSweep() {}
- virtual void MarkConcurrentRoots(VisitRootFlags flags)
- OVERRIDE
+ void MarkConcurrentRoots(VisitRootFlags flags) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -42,15 +41,15 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
- void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
void MarkReachableObjects()
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void Sweep(bool swap_bitmaps)
- OVERRIDE
+ override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 222be142a1..589e9a4826 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -102,7 +102,8 @@ static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
// threads (lower pauses, use less memory bandwidth).
-static constexpr double kStickyGcThroughputAdjustment = 1.0;
+static constexpr double kStickyGcThroughputAdjustment =
+ kEnableGenerationalConcurrentCopyingCollection ? 0.5 : 1.0;
// Whether or not we compact the zygote in PreZygoteFork.
static constexpr bool kCompactZygote = kMovingCollector;
// How many reserve entries are at the end of the allocation stack, these are only needed if the
@@ -260,6 +261,8 @@ Heap::Heap(size_t initial_size,
verify_object_mode_(kVerifyObjectModeDisabled),
disable_moving_gc_count_(0),
semi_space_collector_(nullptr),
+ active_concurrent_copying_collector_(nullptr),
+ young_concurrent_copying_collector_(nullptr),
concurrent_copying_collector_(nullptr),
is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
use_tlab_(use_tlab),
@@ -309,24 +312,63 @@ Heap::Heap(size_t initial_size,
ChangeCollector(desired_collector_type_);
live_bitmap_.reset(new accounting::HeapBitmap(this));
mark_bitmap_.reset(new accounting::HeapBitmap(this));
- // Requested begin for the alloc space, to follow the mapped image and oat files
- uint8_t* requested_alloc_space_begin = nullptr;
- if (foreground_collector_type_ == kCollectorTypeCC) {
- // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
- // image (dex2oat for target).
- requested_alloc_space_begin = kPreferredAllocSpaceBegin;
+
+ // We don't have hspace compaction enabled with GSS or CC.
+ if (foreground_collector_type_ == kCollectorTypeGSS ||
+ foreground_collector_type_ == kCollectorTypeCC) {
+ use_homogeneous_space_compaction_for_oom_ = false;
+ }
+ bool support_homogeneous_space_compaction =
+ background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
+ use_homogeneous_space_compaction_for_oom_;
+ // We may use the same space the main space for the non moving space if we don't need to compact
+ // from the main space.
+ // This is not the case if we support homogeneous compaction or have a moving background
+ // collector type.
+ bool separate_non_moving_space = is_zygote ||
+ support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
+ IsMovingGc(background_collector_type_);
+ if (foreground_collector_type_ == kCollectorTypeGSS) {
+ separate_non_moving_space = false;
}
+ // Requested begin for the alloc space, to follow the mapped image and oat files
+ uint8_t* request_begin = nullptr;
+ // Calculate the extra space required after the boot image, see allocations below.
+ size_t heap_reservation_size = separate_non_moving_space
+ ? non_moving_space_capacity
+ : ((is_zygote && foreground_collector_type_ != kCollectorTypeCC) ? capacity_ : 0u);
+ heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
// Load image space(s).
std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
+ MemMap heap_reservation;
if (space::ImageSpace::LoadBootImage(image_file_name,
image_instruction_set,
+ heap_reservation_size,
&boot_image_spaces,
- &requested_alloc_space_begin)) {
+ &heap_reservation)) {
+ DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
+ DCHECK(!boot_image_spaces.empty());
+ request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
+ DCHECK(!heap_reservation.IsValid() || request_begin == heap_reservation.Begin())
+ << "request_begin=" << static_cast<const void*>(request_begin)
+ << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
boot_image_spaces_.push_back(space.get());
AddSpace(space.release());
}
+ } else {
+ if (foreground_collector_type_ == kCollectorTypeCC) {
+ // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
+ // when there's no image (dex2oat for target).
+ request_begin = kPreferredAllocSpaceBegin;
+ }
+ // Gross hack to make dex2oat deterministic.
+ if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
+ // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
+ // b/26849108
+ request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
+ }
}
/*
@@ -342,41 +384,12 @@ Heap::Heap(size_t initial_size,
+-main alloc space2 / bump space 2 (capacity_)+-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
*/
- // We don't have hspace compaction enabled with GSS or CC.
- if (foreground_collector_type_ == kCollectorTypeGSS ||
- foreground_collector_type_ == kCollectorTypeCC) {
- use_homogeneous_space_compaction_for_oom_ = false;
- }
- bool support_homogeneous_space_compaction =
- background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
- use_homogeneous_space_compaction_for_oom_;
- // We may use the same space the main space for the non moving space if we don't need to compact
- // from the main space.
- // This is not the case if we support homogeneous compaction or have a moving background
- // collector type.
- bool separate_non_moving_space = is_zygote ||
- support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
- IsMovingGc(background_collector_type_);
- if (foreground_collector_type_ == kCollectorTypeGSS) {
- separate_non_moving_space = false;
- }
- std::unique_ptr<MemMap> main_mem_map_1;
- std::unique_ptr<MemMap> main_mem_map_2;
- // Gross hack to make dex2oat deterministic.
- if (foreground_collector_type_ == kCollectorTypeMS &&
- requested_alloc_space_begin == nullptr &&
- Runtime::Current()->IsAotCompiler()) {
- // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
- // b/26849108
- requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
- }
- uint8_t* request_begin = requested_alloc_space_begin;
- if (request_begin != nullptr && separate_non_moving_space) {
- request_begin += non_moving_space_capacity;
- }
+ MemMap main_mem_map_1;
+ MemMap main_mem_map_2;
+
std::string error_str;
- std::unique_ptr<MemMap> non_moving_space_mem_map;
+ MemMap non_moving_space_mem_map;
if (separate_non_moving_space) {
ScopedTrace trace2("Create separate non moving space");
// If we are the zygote, the non moving space becomes the zygote space when we run
@@ -385,11 +398,16 @@ Heap::Heap(size_t initial_size,
const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
// Reserve the non moving mem map before the other two since it needs to be at a specific
// address.
- non_moving_space_mem_map.reset(MapAnonymousPreferredAddress(space_name,
- requested_alloc_space_begin,
- non_moving_space_capacity,
- &error_str));
- CHECK(non_moving_space_mem_map != nullptr) << error_str;
+ DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
+ if (heap_reservation.IsValid()) {
+ non_moving_space_mem_map = heap_reservation.RemapAtEnd(
+ heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
+ } else {
+ non_moving_space_mem_map = MapAnonymousPreferredAddress(
+ space_name, request_begin, non_moving_space_capacity, &error_str);
+ }
+ CHECK(non_moving_space_mem_map.IsValid()) << error_str;
+ DCHECK(!heap_reservation.IsValid());
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
}
@@ -397,27 +415,33 @@ Heap::Heap(size_t initial_size,
if (foreground_collector_type_ != kCollectorTypeCC) {
ScopedTrace trace2("Create main mem map");
if (separate_non_moving_space || !is_zygote) {
- main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
- request_begin,
- capacity_,
- &error_str));
+ main_mem_map_1 = MapAnonymousPreferredAddress(
+ kMemMapSpaceName[0], request_begin, capacity_, &error_str);
} else {
// If no separate non-moving space and we are the zygote, the main space must come right
// after the image space to avoid a gap. This is required since we want the zygote space to
// be adjacent to the image space.
- main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, true, false,
- &error_str));
- }
- CHECK(main_mem_map_1.get() != nullptr) << error_str;
+ DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
+ main_mem_map_1 = MemMap::MapAnonymous(
+ kMemMapSpaceName[0],
+ request_begin,
+ capacity_,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ heap_reservation.IsValid() ? &heap_reservation : nullptr,
+ &error_str);
+ }
+ CHECK(main_mem_map_1.IsValid()) << error_str;
+ DCHECK(!heap_reservation.IsValid());
}
if (support_homogeneous_space_compaction ||
background_collector_type_ == kCollectorTypeSS ||
foreground_collector_type_ == kCollectorTypeSS) {
ScopedTrace trace2("Create main mem map 2");
- main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
- capacity_, &error_str));
- CHECK(main_mem_map_2.get() != nullptr) << error_str;
+ main_mem_map_2 = MapAnonymousPreferredAddress(
+ kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
+ CHECK(main_mem_map_2.IsValid()) << error_str;
}
// Create the non moving space first so that bitmaps don't take up the address range.
@@ -425,24 +449,27 @@ Heap::Heap(size_t initial_size,
ScopedTrace trace2("Add non moving space");
// Non moving space is always dlmalloc since we currently don't have support for multiple
// active rosalloc spaces.
- const size_t size = non_moving_space_mem_map->Size();
- non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
- non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
- initial_size, size, size, false);
+ const size_t size = non_moving_space_mem_map.Size();
+ non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
+ "zygote / non moving space",
+ kDefaultStartingSize,
+ initial_size,
+ size,
+ size,
+ /* can_move_objects */ false);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
- << requested_alloc_space_begin;
+ << non_moving_space_mem_map.Begin();
AddSpace(non_moving_space_);
}
// Create other spaces based on whether or not we have a moving GC.
if (foreground_collector_type_ == kCollectorTypeCC) {
CHECK(separate_non_moving_space);
// Reserve twice the capacity, to allow evacuating every region for explicit GCs.
- MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
- capacity_ * 2,
- request_begin);
- CHECK(region_space_mem_map != nullptr) << "No region space mem map";
- region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
+ MemMap region_space_mem_map =
+ space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
+ CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
+ region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
AddSpace(region_space_);
} else if (IsMovingGc(foreground_collector_type_) &&
foreground_collector_type_ != kCollectorTypeGSS) {
@@ -450,16 +477,16 @@ Heap::Heap(size_t initial_size,
// We only to create the bump pointer if the foreground collector is a compacting GC.
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
- main_mem_map_1.release());
+ std::move(main_mem_map_1));
CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
- main_mem_map_2.release());
+ std::move(main_mem_map_2));
CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(temp_space_);
CHECK(separate_non_moving_space);
} else {
- CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
+ CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
CHECK(main_space_ != nullptr);
AddSpace(main_space_);
if (!separate_non_moving_space) {
@@ -469,19 +496,23 @@ Heap::Heap(size_t initial_size,
if (foreground_collector_type_ == kCollectorTypeGSS) {
CHECK_EQ(foreground_collector_type_, background_collector_type_);
// Create bump pointer spaces instead of a backup space.
- main_mem_map_2.release();
- bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
- kGSSBumpPointerSpaceCapacity, nullptr);
+ main_mem_map_2.Reset();
+ bump_pointer_space_ = space::BumpPointerSpace::Create(
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
- temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
- kGSSBumpPointerSpaceCapacity, nullptr);
+ temp_space_ = space::BumpPointerSpace::Create(
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
- } else if (main_mem_map_2.get() != nullptr) {
+ } else if (main_mem_map_2.IsValid()) {
const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
- main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
- growth_limit_, capacity_, name, true));
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
+ initial_size,
+ growth_limit_,
+ capacity_,
+ name,
+ /* can_move_objects */ true));
CHECK(main_space_backup_.get() != nullptr);
// Add the space so its accounted for in the heap_begin and heap_end.
AddSpace(main_space_backup_.get());
@@ -596,11 +627,26 @@ Heap::Heap(size_t initial_size,
}
if (MayUseCollector(kCollectorTypeCC)) {
concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
+ /*young_gen*/false,
"",
measure_gc_performance);
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
+ this,
+ /*young_gen*/true,
+ "young",
+ measure_gc_performance);
+ }
+ active_concurrent_copying_collector_ = concurrent_copying_collector_;
DCHECK(region_space_ != nullptr);
concurrent_copying_collector_->SetRegionSpace(region_space_);
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ young_concurrent_copying_collector_->SetRegionSpace(region_space_);
+ }
garbage_collectors_.push_back(concurrent_copying_collector_);
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ garbage_collectors_.push_back(young_concurrent_copying_collector_);
+ }
}
}
if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
@@ -615,10 +661,10 @@ Heap::Heap(size_t initial_size,
first_space = space;
}
}
- bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
+ bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
- MemMap::DumpMaps(LOG_STREAM(ERROR), true);
+ MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse */ true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
@@ -634,14 +680,18 @@ Heap::Heap(size_t initial_size,
}
}
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
- uint8_t* request_begin,
- size_t capacity,
- std::string* out_error_str) {
+MemMap Heap::MapAnonymousPreferredAddress(const char* name,
+ uint8_t* request_begin,
+ size_t capacity,
+ std::string* out_error_str) {
while (true) {
- MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
- PROT_READ | PROT_WRITE, true, false, out_error_str);
- if (map != nullptr || request_begin == nullptr) {
+ MemMap map = MemMap::MapAnonymous(name,
+ request_begin,
+ capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb*/ true,
+ out_error_str);
+ if (map.IsValid() || request_begin == nullptr) {
return map;
}
// Retry a second time with no specified request begin.
@@ -653,7 +703,7 @@ bool Heap::MayUseCollector(CollectorType type) const {
return foreground_collector_type_ == type || background_collector_type_ == type;
}
-space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
+space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
size_t initial_size,
size_t growth_limit,
size_t capacity,
@@ -662,12 +712,21 @@ space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
space::MallocSpace* malloc_space = nullptr;
if (kUseRosAlloc) {
// Create rosalloc space.
- malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
- initial_size, growth_limit, capacity,
- low_memory_mode_, can_move_objects);
+ malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
+ name,
+ kDefaultStartingSize,
+ initial_size,
+ growth_limit,
+ capacity,
+ low_memory_mode_,
+ can_move_objects);
} else {
- malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
- initial_size, growth_limit, capacity,
+ malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
+ name,
+ kDefaultStartingSize,
+ initial_size,
+ growth_limit,
+ capacity,
can_move_objects);
}
if (collector::SemiSpace::kUseRememberedSet) {
@@ -681,7 +740,9 @@ space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
return malloc_space;
}
-void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
+void Heap::CreateMainMallocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ size_t growth_limit,
size_t capacity) {
// Is background compaction is enabled?
bool can_move_objects = IsMovingGc(background_collector_type_) !=
@@ -700,7 +761,10 @@ void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t gr
RemoveRememberedSet(main_space_);
}
const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
- main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
+ main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
+ initial_size,
+ growth_limit,
+ capacity, name,
can_move_objects);
SetSpaceAsDefault(main_space_);
VLOG(heap) << "Created main space " << main_space_;
@@ -1285,7 +1349,7 @@ class TrimIndirectReferenceTableClosure : public Closure {
public:
explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
}
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
thread->GetJniEnv()->TrimLocals();
// If thread is a running mutator, then act on behalf of the trim thread.
// See the code in ThreadList::RunCheckpoint.
@@ -2014,17 +2078,17 @@ void Heap::TransitionCollector(CollectorType collector_type) {
if (!IsMovingGc(collector_type_)) {
// Create the bump pointer space from the backup space.
CHECK(main_space_backup_ != nullptr);
- std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
+ MemMap mem_map = main_space_backup_->ReleaseMemMap();
// We are transitioning from non moving GC -> moving GC, since we copied from the bump
// pointer space last transition it will be protected.
- CHECK(mem_map != nullptr);
- mem_map->Protect(PROT_READ | PROT_WRITE);
+ CHECK(mem_map.IsValid());
+ mem_map.Protect(PROT_READ | PROT_WRITE);
bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
- mem_map.release());
+ std::move(mem_map));
AddSpace(bump_pointer_space_);
collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
// Use the now empty main space mem map for the bump pointer temp space.
- mem_map.reset(main_space_->ReleaseMemMap());
+ mem_map = main_space_->ReleaseMemMap();
// Unset the pointers just in case.
if (dlmalloc_space_ == main_space_) {
dlmalloc_space_ = nullptr;
@@ -2040,7 +2104,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
RemoveRememberedSet(main_space_backup_.get());
main_space_backup_.reset(nullptr); // Deletes the space.
temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
- mem_map.release());
+ std::move(mem_map));
AddSpace(temp_space_);
}
break;
@@ -2050,37 +2114,35 @@ void Heap::TransitionCollector(CollectorType collector_type) {
case kCollectorTypeCMS: {
if (IsMovingGc(collector_type_)) {
CHECK(temp_space_ != nullptr);
- std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+ MemMap mem_map = temp_space_->ReleaseMemMap();
RemoveSpace(temp_space_);
temp_space_ = nullptr;
- mem_map->Protect(PROT_READ | PROT_WRITE);
- CreateMainMallocSpace(mem_map.get(),
+ mem_map.Protect(PROT_READ | PROT_WRITE);
+ CreateMainMallocSpace(std::move(mem_map),
kDefaultInitialSize,
- std::min(mem_map->Size(), growth_limit_),
- mem_map->Size());
- mem_map.release();
+ std::min(mem_map.Size(), growth_limit_),
+ mem_map.Size());
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
- mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+ mem_map = bump_pointer_space_->ReleaseMemMap();
RemoveSpace(bump_pointer_space_);
bump_pointer_space_ = nullptr;
const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
// Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
if (kIsDebugBuild && kUseRosAlloc) {
- mem_map->Protect(PROT_READ | PROT_WRITE);
+ mem_map.Protect(PROT_READ | PROT_WRITE);
}
main_space_backup_.reset(CreateMallocSpaceFromMemMap(
- mem_map.get(),
+ std::move(mem_map),
kDefaultInitialSize,
- std::min(mem_map->Size(), growth_limit_),
- mem_map->Size(),
+ std::min(mem_map.Size(), growth_limit_),
+ mem_map.Size(),
name,
true));
if (kIsDebugBuild && kUseRosAlloc) {
- mem_map->Protect(PROT_NONE);
+ main_space_backup_->GetMemMap()->Protect(PROT_NONE);
}
- mem_map.release();
}
break;
}
@@ -2122,6 +2184,9 @@ void Heap::ChangeCollector(CollectorType collector_type) {
gc_plan_.clear();
switch (collector_type_) {
case kCollectorTypeCC: {
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ gc_plan_.push_back(collector::kGcTypeSticky);
+ }
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
ChangeAllocator(kAllocatorTypeRegionTLAB);
@@ -2161,7 +2226,8 @@ void Heap::ChangeCollector(CollectorType collector_type) {
}
if (IsGcConcurrent()) {
concurrent_start_bytes_ =
- std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
+ std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
+ kMinConcurrentRemainingBytes;
} else {
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
@@ -2169,7 +2235,7 @@ void Heap::ChangeCollector(CollectorType collector_type) {
}
// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
-class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
+class ZygoteCompactingCollector final : public collector::SemiSpace {
public:
ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
: SemiSpace(heap, false, "zygote collector"),
@@ -2325,11 +2391,13 @@ void Heap::PreZygoteFork() {
if (reset_main_space) {
main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
- MemMap* mem_map = main_space_->ReleaseMemMap();
+ MemMap mem_map = main_space_->ReleaseMemMap();
RemoveSpace(main_space_);
space::Space* old_main_space = main_space_;
- CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
- mem_map->Size());
+ CreateMainMallocSpace(std::move(mem_map),
+ kDefaultInitialSize,
+ std::min(mem_map.Size(), growth_limit_),
+ mem_map.Size());
delete old_main_space;
AddSpace(main_space_);
} else {
@@ -2569,12 +2637,19 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
collector = semi_space_collector_;
break;
case kCollectorTypeCC:
- collector = concurrent_copying_collector_;
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ // TODO: Other threads must do the flip checkpoint before they start poking at
+ // active_concurrent_copying_collector_. So we should not concurrency here.
+ active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
+ young_concurrent_copying_collector_ : concurrent_copying_collector_;
+ active_concurrent_copying_collector_->SetRegionSpace(region_space_);
+ }
+ collector = active_concurrent_copying_collector_;
break;
default:
LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
}
- if (collector != concurrent_copying_collector_) {
+ if (collector != active_concurrent_copying_collector_) {
temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
if (kIsDebugBuild) {
// Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
@@ -2716,7 +2791,7 @@ class RootMatchesObjectVisitor : public SingleRootVisitor {
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
void VisitRoot(mirror::Object* root, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == obj_) {
LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
}
@@ -2773,7 +2848,7 @@ class VerifyReferenceVisitor : public SingleRootVisitor {
root->AsMirrorPtr(), RootInfo(kRootVMInternal));
}
- virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+ void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -3206,10 +3281,10 @@ void Heap::ProcessCards(TimingLogger* timings,
}
struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
- virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
+ mirror::Object* MarkObject(mirror::Object* obj) override {
return obj;
}
- virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
}
};
@@ -3437,7 +3512,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
uint64_t target_size;
collector::GcType gc_type = collector_ran->GetGcType();
// Use the multiplier to grow more for foreground.
- const double multiplier = HeapGrowthMultiplier();
+ const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
+ // foreground.
const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
@@ -3453,6 +3529,12 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
collector::GcType non_sticky_gc_type = NonStickyGcType();
// Find what the next non sticky collector will be.
collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ if (non_sticky_collector == nullptr) {
+ non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
+ }
+ CHECK(non_sticky_collector != nullptr);
+ }
// If the throughput of the current sticky GC >= throughput of the non sticky collector, then
// do another sticky collection next.
// We also check that the bytes allocated aren't over the footprint limit in order to prevent a
@@ -3573,7 +3655,7 @@ class Heap::ConcurrentGCTask : public HeapTask {
public:
ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
: HeapTask(target_time), cause_(cause), force_full_(force_full) {}
- virtual void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->ConcurrentGC(self, cause_, force_full_);
heap->ClearConcurrentGCRequest();
@@ -3631,7 +3713,7 @@ class Heap::CollectorTransitionTask : public HeapTask {
public:
explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
- virtual void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->DoPendingCollectorTransition();
heap->ClearPendingCollectorTransition(self);
@@ -3673,7 +3755,7 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint
class Heap::HeapTrimTask : public HeapTask {
public:
explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
- virtual void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->Trim(self);
heap->ClearPendingTrim(self);
@@ -4116,7 +4198,7 @@ void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, si
class Heap::TriggerPostForkCCGcTask : public HeapTask {
public:
explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
// Trigger a GC, if not already done. The first GC after fork, whenever
// takes place, will adjust the thresholds to normal levels.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5c34c56e09..90bac20e8e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -709,8 +709,15 @@ class Heap {
return zygote_space_ != nullptr;
}
+ // Returns the active concurrent copying collector.
collector::ConcurrentCopying* ConcurrentCopyingCollector() {
- return concurrent_copying_collector_;
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) ||
+ (active_concurrent_copying_collector_ == young_concurrent_copying_collector_));
+ } else {
+ DCHECK_EQ(active_concurrent_copying_collector_, concurrent_copying_collector_);
+ }
+ return active_concurrent_copying_collector_;
}
CollectorType CurrentCollectorType() {
@@ -835,8 +842,10 @@ class Heap {
void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
// Create a mem map with a preferred base address.
- static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
- size_t capacity, std::string* out_error_str);
+ static MemMap MapAnonymousPreferredAddress(const char* name,
+ uint8_t* request_begin,
+ size_t capacity,
+ std::string* out_error_str);
bool SupportHSpaceCompaction() const {
// Returns true if we can do hspace compaction
@@ -979,13 +988,13 @@ class Heap {
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
// Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
- void CreateMainMallocSpace(MemMap* mem_map,
+ void CreateMainMallocSpace(MemMap&& mem_map,
size_t initial_size,
size_t growth_limit,
size_t capacity);
// Create a malloc space based on a mem map. Does not set the space as default.
- space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
+ space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
size_t initial_size,
size_t growth_limit,
size_t capacity,
@@ -1335,6 +1344,8 @@ class Heap {
std::vector<collector::GarbageCollector*> garbage_collectors_;
collector::SemiSpace* semi_space_collector_;
+ collector::ConcurrentCopying* active_concurrent_copying_collector_;
+ collector::ConcurrentCopying* young_concurrent_copying_collector_;
collector::ConcurrentCopying* concurrent_copying_collector_;
const bool is_running_on_memory_tool_;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index c6b2120f5b..7cbad3b523 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -29,23 +29,22 @@ namespace gc {
class HeapTest : public CommonRuntimeTest {
public:
- void SetUp() OVERRIDE {
+ void SetUp() override {
MemMap::Init();
std::string error_msg;
// Reserve the preferred address to force the heap to use another one for testing.
- reserved_.reset(MemMap::MapAnonymous("ReserveMap",
- gc::Heap::kPreferredAllocSpaceBegin,
- 16 * KB,
- PROT_READ,
- /*low_4gb*/ true,
- /*reuse*/ false,
- &error_msg));
- ASSERT_TRUE(reserved_ != nullptr) << error_msg;
+ reserved_ = MemMap::MapAnonymous("ReserveMap",
+ gc::Heap::kPreferredAllocSpaceBegin,
+ 16 * KB,
+ PROT_READ,
+ /*low_4gb*/ true,
+ &error_msg);
+ ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
}
private:
- std::unique_ptr<MemMap> reserved_;
+ MemMap reserved_;
};
TEST_F(HeapTest, ClearGrowthLimit) {
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index e95da01d8c..42453f581a 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -28,23 +28,30 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac
uint8_t* requested_begin) {
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
- PROT_READ | PROT_WRITE, true, false,
- &error_msg));
- if (mem_map.get() == nullptr) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ &error_msg);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
return nullptr;
}
- return new BumpPointerSpace(name, mem_map.release());
+ return new BumpPointerSpace(name, std::move(mem_map));
}
-BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
- return new BumpPointerSpace(name, mem_map);
+BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
+ return new BumpPointerSpace(name, std::move(mem_map));
}
BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
- : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
+ : ContinuousMemMapAllocSpace(name,
+ MemMap::Invalid(),
+ begin,
+ begin,
+ limit,
kGcRetentionPolicyAlwaysCollect),
growth_end_(limit),
objects_allocated_(0), bytes_allocated_(0),
@@ -53,10 +60,14 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint
num_blocks_(0) {
}
-BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
- : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
+BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
+ : ContinuousMemMapAllocSpace(name,
+ std::move(mem_map),
+ mem_map.Begin(),
+ mem_map.Begin(),
+ mem_map.End(),
kGcRetentionPolicyAlwaysCollect),
- growth_end_(mem_map->End()),
+ growth_end_(mem_map_.End()),
objects_allocated_(0), bytes_allocated_(0),
block_lock_("Block lock", kBumpPointerSpaceBlockLock),
main_block_size_(0),
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 5ba13ca3ff..02e84b509e 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -35,11 +35,11 @@ namespace space {
// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
// implementation as its intended to be evacuated.
-class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
+class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeBumpPointerSpace;
}
@@ -47,31 +47,31 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
- static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
+ static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
// Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(size_t num_bytes);
mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
// NOPS unless we support free lists.
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
return 0;
}
@@ -94,16 +94,16 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return GetMemMap()->Size();
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return nullptr;
}
// Reset the space to empty.
- void Clear() OVERRIDE REQUIRES(!block_lock_);
+ void Clear() override REQUIRES(!block_lock_);
void Dump(std::ostream& os) const;
@@ -122,7 +122,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return Begin() == End();
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -141,7 +141,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate a new TLAB, returns false if the allocation failed.
bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
- BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
+ BumpPointerSpace* AsBumpPointerSpace() override {
return this;
}
@@ -151,7 +151,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!block_lock_);
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
// Record objects / bytes freed.
void RecordFree(int32_t objects, int32_t bytes) {
@@ -159,14 +159,14 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
protected:
- BumpPointerSpace(const std::string& name, MemMap* mem_map);
+ BumpPointerSpace(const std::string& name, MemMap&& mem_map);
// Allocate a raw block of bytes.
uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 025c3f0ead..36d2161262 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -38,41 +38,73 @@ namespace space {
static constexpr bool kPrefetchDuringDlMallocFreeList = true;
-DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
- void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
- size_t growth_limit, bool can_move_objects, size_t starting_size)
- : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+DlMallocSpace::DlMallocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ void* mspace,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size)
+ : MallocSpace(name,
+ std::move(mem_map),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ /* create_bitmaps */ true,
+ can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
CHECK(mspace != nullptr);
}
-DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
+DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
bool can_move_objects) {
- DCHECK(mem_map != nullptr);
- void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
+ DCHECK(mem_map.IsValid());
+ void* mspace = CreateMspace(mem_map.Begin(), starting_size, initial_size);
if (mspace == nullptr) {
LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
return nullptr;
}
// Protect memory beyond the starting size. morecore will add r/w permissions when necessory
- uint8_t* end = mem_map->Begin() + starting_size;
+ uint8_t* end = mem_map.Begin() + starting_size;
if (capacity - starting_size > 0) {
CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
}
// Everything is set so record in immutable structure and leave
- uint8_t* begin = mem_map->Begin();
+ uint8_t* begin = mem_map.Begin();
if (Runtime::Current()->IsRunningOnMemoryTool()) {
return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
- mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
- can_move_objects, starting_size);
+ std::move(mem_map),
+ initial_size,
+ name,
+ mspace,
+ begin,
+ end,
+ begin + capacity, growth_limit,
+ can_move_objects,
+ starting_size);
} else {
- return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
- growth_limit, can_move_objects, starting_size);
+ return new DlMallocSpace(std::move(mem_map),
+ initial_size,
+ name,
+ mspace,
+ begin,
+ end,
+ begin + capacity,
+ growth_limit,
+ can_move_objects,
+ starting_size);
}
}
@@ -94,15 +126,20 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = kPageSize;
- MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
- requested_begin);
- if (mem_map == nullptr) {
+ MemMap mem_map =
+ CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
return nullptr;
}
- DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
- growth_limit, capacity, can_move_objects);
+ DlMallocSpace* space = CreateFromMemMap(std::move(mem_map),
+ name,
+ starting_size,
+ initial_size,
+ growth_limit,
+ capacity,
+ can_move_objects);
// We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
@@ -152,17 +189,37 @@ mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
return result;
}
-MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
- void* allocator, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit,
+MallocSpace* DlMallocSpace::CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
bool can_move_objects) {
if (Runtime::Current()->IsRunningOnMemoryTool()) {
return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
- mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
- can_move_objects, starting_size_);
+ std::move(mem_map),
+ initial_size_,
+ name,
+ allocator,
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_);
} else {
- return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
- growth_limit, can_move_objects, starting_size_);
+ return new DlMallocSpace(std::move(mem_map),
+ initial_size_,
+ name,
+ allocator,
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_);
}
}
@@ -283,7 +340,7 @@ void DlMallocSpace::Clear() {
live_bitmap_->Clear();
mark_bitmap_->Clear();
SetEnd(Begin() + starting_size_);
- mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
+ mspace_ = CreateMspace(mem_map_.Begin(), starting_size_, initial_size_);
SetFootprintLimit(footprint_limit);
}
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 4c7fcfdeb9..c63ff71849 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -34,9 +34,12 @@ namespace space {
class DlMallocSpace : public MallocSpace {
public:
// Create a DlMallocSpace from an existing mem_map.
- static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
+ static DlMallocSpace* CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
bool can_move_objects);
// Create a DlMallocSpace with the requested sizes. The requested
@@ -47,39 +50,42 @@ class DlMallocSpace : public MallocSpace {
size_t capacity, uint8_t* requested_begin, bool can_move_objects);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ mirror::Object* AllocWithGrowth(Thread* self,
+ size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_) {
+ mirror::Object* Alloc(Thread* self,
+ size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_) {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual(obj, usable_size);
}
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return num_bytes;
}
// DlMallocSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -100,48 +106,60 @@ class DlMallocSpace : public MallocSpace {
return mspace_;
}
- size_t Trim() OVERRIDE;
+ size_t Trim() override;
// Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
// in use, indicated by num_bytes equaling zero.
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
// Returns the number of bytes that the space has currently obtained from the system. This is
// greater or equal to the amount of live data in the space.
- size_t GetFootprint() OVERRIDE;
+ size_t GetFootprint() override;
// Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
- size_t GetFootprintLimit() OVERRIDE;
+ size_t GetFootprintLimit() override;
// Set the maximum number of bytes that the heap is allowed to obtain from the system via
// MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
// allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ void SetFootprintLimit(size_t limit) override;
- MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool can_move_objects);
+ MallocSpace* CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
- virtual void Clear() OVERRIDE;
+ void Clear() override;
- bool IsDlMallocSpace() const OVERRIDE {
+ bool IsDlMallocSpace() const override {
return true;
}
- DlMallocSpace* AsDlMallocSpace() OVERRIDE {
+ DlMallocSpace* AsDlMallocSpace() override {
return this;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
- DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool can_move_objects, size_t starting_size);
+ DlMallocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ void* mspace,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size);
private:
mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -150,7 +168,7 @@ class DlMallocSpace : public MallocSpace {
REQUIRES(lock_);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
+ size_t /*maximum_size*/, bool /*low_memory_mode*/) override {
return CreateMspace(base, morecore_start, initial_size);
}
static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 985eff3025..f308f63386 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -62,19 +62,19 @@ Atomic<uint32_t> ImageSpace::bitmap_index_(0);
ImageSpace::ImageSpace(const std::string& image_filename,
const char* image_location,
- MemMap* mem_map,
- accounting::ContinuousSpaceBitmap* live_bitmap,
+ MemMap&& mem_map,
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
uint8_t* end)
: MemMapSpace(image_filename,
- mem_map,
- mem_map->Begin(),
+ std::move(mem_map),
+ mem_map.Begin(),
end,
end,
kGcRetentionPolicyNeverCollect),
+ live_bitmap_(std::move(live_bitmap)),
oat_file_non_owned_(nullptr),
image_location_(image_location) {
- DCHECK(live_bitmap != nullptr);
- live_bitmap_.reset(live_bitmap);
+ DCHECK(live_bitmap_ != nullptr);
}
static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
@@ -480,52 +480,16 @@ std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
}
// Helper class encapsulating loading, so we can access private ImageSpace members (this is a
-// friend class), but not declare functions in the header.
+// nested class), but not declare functions in the header.
class ImageSpace::Loader {
public:
- static std::unique_ptr<ImageSpace> Load(const std::string& image_location,
- const std::string& image_filename,
- bool is_zygote,
- bool is_global_cache,
- bool validate_oat_file,
- std::string* error_msg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Should this be a RDWR lock? This is only a defensive measure, as at
- // this point the image should exist.
- // However, only the zygote can write into the global dalvik-cache, so
- // restrict to zygote processes, or any process that isn't using
- // /data/dalvik-cache (which we assume to be allowed to write there).
- const bool rw_lock = is_zygote || !is_global_cache;
-
- // Note that we must not use the file descriptor associated with
- // ScopedFlock::GetFile to Init the image file. We want the file
- // descriptor (and the associated exclusive lock) to be released when
- // we leave Create.
- ScopedFlock image = LockedFile::Open(image_filename.c_str(),
- rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
- true /* block */,
- error_msg);
-
- VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
- << image_location;
- // If we are in /system we can assume the image is good. We can also
- // assume this if we are using a relocated image (i.e. image checksum
- // matches) since this is only different by the offset. We need this to
- // make sure that host tests continue to work.
- // Since we are the boot image, pass null since we load the oat file from the boot image oat
- // file name.
- return Init(image_filename.c_str(),
- image_location.c_str(),
- validate_oat_file,
- /* oat_file */nullptr,
- error_msg);
- }
-
static std::unique_ptr<ImageSpace> Init(const char* image_filename,
const char* image_location,
bool validate_oat_file,
const OatFile* oat_file,
- std::string* error_msg)
+ /*inout*/MemMap* image_reservation,
+ /*inout*/MemMap* oat_reservation,
+ /*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(image_filename != nullptr);
CHECK(image_location != nullptr);
@@ -610,53 +574,56 @@ class ImageSpace::Loader {
return nullptr;
}
- std::unique_ptr<MemMap> map;
+ MemMap map;
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
// If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
// avoid reading proc maps for a mapping failure and slowing everything down.
- map.reset(LoadImageFile(image_filename,
- image_location,
- *image_header,
- image_header->GetImageBegin(),
- file->Fd(),
- logger,
- image_header->IsPic() ? nullptr : error_msg));
+ // For the boot image, we have already reserved the memory and we load the image
+ // into the `image_reservation`.
+ map = LoadImageFile(
+ image_filename,
+ image_location,
+ *image_header,
+ image_header->GetImageBegin(),
+ file->Fd(),
+ logger,
+ image_reservation,
+ (image_reservation == nullptr && image_header->IsPic()) ? nullptr : error_msg);
// If the header specifies PIC mode, we can also map at a random low_4gb address since we can
// relocate in-place.
- if (map == nullptr && image_header->IsPic()) {
- map.reset(LoadImageFile(image_filename,
- image_location,
- *image_header,
- /* address */ nullptr,
- file->Fd(),
- logger,
- error_msg));
+ if (!map.IsValid() && image_reservation == nullptr && image_header->IsPic()) {
+ map = LoadImageFile(image_filename,
+ image_location,
+ *image_header,
+ /* address */ nullptr,
+ file->Fd(),
+ logger,
+ /* image_reservation */ nullptr,
+ error_msg);
}
// Were we able to load something and continue?
- if (map == nullptr) {
+ if (!map.IsValid()) {
DCHECK(!error_msg->empty());
return nullptr;
}
- DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader)));
-
- std::unique_ptr<MemMap> image_bitmap_map(MemMap::MapFileAtAddress(nullptr,
- bitmap_section.Size(),
- PROT_READ, MAP_PRIVATE,
- file->Fd(),
- image_bitmap_offset,
- /*low_4gb*/false,
- /*reuse*/false,
- image_filename,
- error_msg));
- if (image_bitmap_map == nullptr) {
+ DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
+
+ MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
+ PROT_READ, MAP_PRIVATE,
+ file->Fd(),
+ image_bitmap_offset,
+ /* low_4gb */ false,
+ image_filename,
+ error_msg);
+ if (!image_bitmap_map.IsValid()) {
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
}
// Loaded the map, use the image header from the file now in case we patch it with
// RelocateInPlace.
- image_header = reinterpret_cast<ImageHeader*>(map->Begin());
+ image_header = reinterpret_cast<ImageHeader*>(map.Begin());
const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
image_filename,
@@ -664,15 +631,15 @@ class ImageSpace::Loader {
// Bitmap only needs to cover until the end of the mirror objects section.
const ImageSection& image_objects = image_header->GetObjectsSection();
// We only want the mirror object, not the ArtFields and ArtMethods.
- uint8_t* const image_end = map->Begin() + image_objects.End();
+ uint8_t* const image_end = map.Begin() + image_objects.End();
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
{
TimingLogger::ScopedTiming timing("CreateImageBitmap", &logger);
bitmap.reset(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(
bitmap_name,
- image_bitmap_map.release(),
- reinterpret_cast<uint8_t*>(map->Begin()),
+ std::move(image_bitmap_map),
+ reinterpret_cast<uint8_t*>(map.Begin()),
// Make sure the bitmap is aligned to card size instead of just bitmap word size.
RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
if (bitmap == nullptr) {
@@ -683,7 +650,7 @@ class ImageSpace::Loader {
{
TimingLogger::ScopedTiming timing("RelocateImage", &logger);
if (!RelocateInPlace(*image_header,
- map->Begin(),
+ map.Begin(),
bitmap.get(),
oat_file,
error_msg)) {
@@ -693,8 +660,8 @@ class ImageSpace::Loader {
// We only want the mirror object, not the ArtFields and ArtMethods.
std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
image_location,
- map.release(),
- bitmap.release(),
+ std::move(map),
+ std::move(bitmap),
image_end));
// VerifyImageAllocations() will be called later in Runtime::Init()
@@ -704,7 +671,7 @@ class ImageSpace::Loader {
// set yet at this point.
if (oat_file == nullptr) {
TimingLogger::ScopedTiming timing("OpenOatFile", &logger);
- space->oat_file_ = OpenOatFile(*space, image_filename, error_msg);
+ space->oat_file_ = OpenOatFile(*space, image_filename, oat_reservation, error_msg);
if (space->oat_file_ == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
@@ -781,13 +748,14 @@ class ImageSpace::Loader {
}
private:
- static MemMap* LoadImageFile(const char* image_filename,
- const char* image_location,
- const ImageHeader& image_header,
- uint8_t* address,
- int fd,
- TimingLogger& logger,
- std::string* error_msg) {
+ static MemMap LoadImageFile(const char* image_filename,
+ const char* image_location,
+ const ImageHeader& image_header,
+ uint8_t* address,
+ int fd,
+ TimingLogger& logger,
+ /*inout*/MemMap* image_reservation,
+ /*out*/std::string* error_msg) {
TimingLogger::ScopedTiming timing("MapImageFile", &logger);
const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
if (storage_mode == ImageHeader::kStorageModeUncompressed) {
@@ -796,10 +764,11 @@ class ImageSpace::Loader {
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
- 0,
- /*low_4gb*/true,
- /*reuse*/false,
+ /* start */ 0,
+ /* low_4gb */ true,
image_filename,
+ /* reuse */ false,
+ image_reservation,
error_msg);
}
@@ -809,45 +778,46 @@ class ImageSpace::Loader {
*error_msg = StringPrintf("Invalid storage mode in image header %d",
static_cast<int>(storage_mode));
}
- return nullptr;
+ return MemMap::Invalid();
}
// Reserve output and decompress into it.
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous(image_location,
- address,
- image_header.GetImageSize(),
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- error_msg));
- if (map != nullptr) {
+ MemMap map = MemMap::MapAnonymous(image_location,
+ address,
+ image_header.GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ image_reservation,
+ error_msg);
+ if (map.IsValid()) {
const size_t stored_size = image_header.GetDataSize();
const size_t decompress_offset = sizeof(ImageHeader); // Skip the header.
- std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
- PROT_READ,
- MAP_PRIVATE,
- fd,
- /*offset*/0,
- /*low_4gb*/false,
- image_filename,
- error_msg));
- if (temp_map == nullptr) {
+ MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ fd,
+ /* offset */ 0,
+ /* low_4gb */ false,
+ image_filename,
+ error_msg);
+ if (!temp_map.IsValid()) {
DCHECK(error_msg == nullptr || !error_msg->empty());
- return nullptr;
+ return MemMap::Invalid();
}
- memcpy(map->Begin(), &image_header, sizeof(ImageHeader));
+ memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
const uint64_t start = NanoTime();
// LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
const size_t decompressed_size = LZ4_decompress_safe(
- reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
- reinterpret_cast<char*>(map->Begin()) + decompress_offset,
+ reinterpret_cast<char*>(temp_map.Begin()) + sizeof(ImageHeader),
+ reinterpret_cast<char*>(map.Begin()) + decompress_offset,
stored_size,
- map->Size() - decompress_offset);
+ map.Size() - decompress_offset);
const uint64_t time = NanoTime() - start;
// Add one 1 ns to prevent possible divide by 0.
VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
- << PrettySize(static_cast<uint64_t>(map->Size()) * MsToNs(1000) / (time + 1))
+ << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
<< "/s)";
if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
if (error_msg != nullptr) {
@@ -856,11 +826,11 @@ class ImageSpace::Loader {
decompressed_size + sizeof(ImageHeader),
image_header.GetImageSize());
}
- return nullptr;
+ return MemMap::Invalid();
}
}
- return map.release();
+ return map;
}
class FixupVisitor : public ValueObject {
@@ -1397,6 +1367,7 @@ class ImageSpace::Loader {
static std::unique_ptr<OatFile> OpenOatFile(const ImageSpace& image,
const char* image_path,
+ /*inout*/MemMap* oat_reservation,
std::string* error_msg) {
const ImageHeader& image_header = image.GetImageHeader();
std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_path);
@@ -1407,10 +1378,10 @@ class ImageSpace::Loader {
oat_filename,
oat_filename,
image_header.GetOatDataBegin(),
- image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
- /*low_4gb*/false,
- nullptr,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ oat_reservation,
error_msg));
if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
@@ -1454,6 +1425,7 @@ class ImageSpace::BootImageLoader {
has_system_(false),
has_cache_(false),
is_global_cache_(true),
+ dalvik_cache_exists_(false),
dalvik_cache_(),
cache_filename_() {
}
@@ -1462,59 +1434,88 @@ class ImageSpace::BootImageLoader {
void FindImageFiles() {
std::string system_filename;
- bool dalvik_cache_exists = false;
bool found_image = FindImageFilenameImpl(image_location_.c_str(),
image_isa_,
&has_system_,
&system_filename,
- &dalvik_cache_exists,
+ &dalvik_cache_exists_,
&dalvik_cache_,
&is_global_cache_,
&has_cache_,
&cache_filename_);
- DCHECK_EQ(dalvik_cache_exists, !dalvik_cache_.empty());
+ DCHECK(!dalvik_cache_exists_ || !dalvik_cache_.empty());
DCHECK_EQ(found_image, has_system_ || has_cache_);
}
bool HasSystem() const { return has_system_; }
bool HasCache() const { return has_cache_; }
- bool DalvikCacheExists() const { return !dalvik_cache_.empty(); }
+ bool DalvikCacheExists() const { return dalvik_cache_exists_; }
bool IsGlobalCache() const { return is_global_cache_; }
const std::string& GetDalvikCache() const {
- DCHECK(DalvikCacheExists());
return dalvik_cache_;
}
const std::string& GetCacheFilename() const {
- DCHECK(DalvikCacheExists());
return cache_filename_;
}
- bool LoadFromSystem(/*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
- /*out*/ uint8_t** oat_file_end,
- /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool LoadFromSystem(size_t extra_reservation_size,
+ /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/MemMap* extra_reservation,
+ /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
std::vector<std::string> locations;
if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
return false;
}
+ uint32_t image_start;
+ uint32_t image_end;
+ uint32_t oat_end;
+ if (!GetBootImageAddressRange(filename, &image_start, &image_end, &oat_end, error_msg)) {
+ return false;
+ }
+ if (locations.size() > 1u) {
+ std::string last_filename = GetSystemImageFilename(locations.back().c_str(), image_isa_);
+ uint32_t dummy;
+ if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
+ return false;
+ }
+ }
+ MemMap image_reservation;
+ MemMap oat_reservation;
+ MemMap local_extra_reservation;
+ if (!ReserveBootImageMemory(image_start,
+ image_end,
+ oat_end,
+ extra_reservation_size,
+ &image_reservation,
+ &oat_reservation,
+ &local_extra_reservation,
+ error_msg)) {
+ return false;
+ }
+
std::vector<std::unique_ptr<ImageSpace>> spaces;
spaces.reserve(locations.size());
for (const std::string& location : locations) {
filename = GetSystemImageFilename(location.c_str(), image_isa_);
- spaces.push_back(Loader::Load(location,
- filename,
- is_zygote_,
- is_global_cache_,
- /* validate_oat_file */ false,
- error_msg));
+ spaces.push_back(Load(location,
+ filename,
+ /* validate_oat_file */ false,
+ &image_reservation,
+ &oat_reservation,
+ error_msg));
if (spaces.back() == nullptr) {
return false;
}
}
- *oat_file_end = GetOatFileEnd(spaces);
+ if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
+ return false;
+ }
+
+ *extra_reservation = std::move(local_extra_reservation);
boot_image_spaces->swap(spaces);
return true;
}
@@ -1522,14 +1523,48 @@ class ImageSpace::BootImageLoader {
bool LoadFromDalvikCache(
bool validate_system_checksums,
bool validate_oat_file,
- /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
- /*out*/ uint8_t** oat_file_end,
- /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ size_t extra_reservation_size,
+ /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/MemMap* extra_reservation,
+ /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(DalvikCacheExists());
std::vector<std::string> locations;
if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
return false;
}
+ uint32_t image_start;
+ uint32_t image_end;
+ uint32_t oat_end;
+ if (!GetBootImageAddressRange(cache_filename_, &image_start, &image_end, &oat_end, error_msg)) {
+ return false;
+ }
+ if (locations.size() > 1u) {
+ std::string last_filename;
+ if (!GetDalvikCacheFilename(locations.back().c_str(),
+ dalvik_cache_.c_str(),
+ &last_filename,
+ error_msg)) {
+ return false;
+ }
+ uint32_t dummy;
+ if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
+ return false;
+ }
+ }
+ MemMap image_reservation;
+ MemMap oat_reservation;
+ MemMap local_extra_reservation;
+ if (!ReserveBootImageMemory(image_start,
+ image_end,
+ oat_end,
+ extra_reservation_size,
+ &image_reservation,
+ &oat_reservation,
+ &local_extra_reservation,
+ error_msg)) {
+ return false;
+ }
+
std::vector<std::unique_ptr<ImageSpace>> spaces;
spaces.reserve(locations.size());
for (const std::string& location : locations) {
@@ -1537,12 +1572,12 @@ class ImageSpace::BootImageLoader {
if (!GetDalvikCacheFilename(location.c_str(), dalvik_cache_.c_str(), &filename, error_msg)) {
return false;
}
- spaces.push_back(Loader::Load(location,
- filename,
- is_zygote_,
- is_global_cache_,
- validate_oat_file,
- error_msg));
+ spaces.push_back(Load(location,
+ filename,
+ validate_oat_file,
+ &image_reservation,
+ &oat_reservation,
+ error_msg));
if (spaces.back() == nullptr) {
return false;
}
@@ -1563,12 +1598,56 @@ class ImageSpace::BootImageLoader {
}
}
}
- *oat_file_end = GetOatFileEnd(spaces);
+ if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
+ return false;
+ }
+
+ *extra_reservation = std::move(local_extra_reservation);
boot_image_spaces->swap(spaces);
return true;
}
private:
+ std::unique_ptr<ImageSpace> Load(const std::string& image_location,
+ const std::string& image_filename,
+ bool validate_oat_file,
+ /*inout*/MemMap* image_reservation,
+ /*inout*/MemMap* oat_reservation,
+ /*out*/std::string* error_msg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Should this be a RDWR lock? This is only a defensive measure, as at
+ // this point the image should exist.
+ // However, only the zygote can write into the global dalvik-cache, so
+ // restrict to zygote processes, or any process that isn't using
+ // /data/dalvik-cache (which we assume to be allowed to write there).
+ const bool rw_lock = is_zygote_ || !is_global_cache_;
+
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image = LockedFile::Open(image_filename.c_str(),
+ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
+ true /* block */,
+ error_msg);
+
+ VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
+ << image_location;
+ // If we are in /system we can assume the image is good. We can also
+ // assume this if we are using a relocated image (i.e. image checksum
+ // matches) since this is only different by the offset. We need this to
+ // make sure that host tests continue to work.
+ // Since we are the boot image, pass null since we load the oat file from the boot image oat
+ // file name.
+ return Loader::Init(image_filename.c_str(),
+ image_location.c_str(),
+ validate_oat_file,
+ /* oat_file */ nullptr,
+ image_reservation,
+ oat_reservation,
+ error_msg);
+ }
+
// Extract boot class path from oat file associated with `image_filename`
// and list all associated image locations.
static bool GetBootClassPathImageLocations(const std::string& image_location,
@@ -1580,10 +1659,10 @@ class ImageSpace::BootImageLoader {
oat_filename,
oat_filename,
/* requested_base */ nullptr,
- /* oat_file_begin */ nullptr,
/* executable */ false,
/* low_4gb */ false,
/* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
error_msg));
if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
@@ -1594,26 +1673,94 @@ class ImageSpace::BootImageLoader {
}
const OatHeader& oat_header = oat_file->GetOatHeader();
const char* boot_classpath = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
- if (boot_classpath == nullptr || boot_classpath[0] == 0) {
- *error_msg = StringPrintf("No boot class path in oat file '%s' for image file %s",
- oat_filename.c_str(),
- image_filename.c_str());
+ all_locations->push_back(image_location);
+ if (boot_classpath != nullptr && boot_classpath[0] != 0) {
+ ExtractMultiImageLocations(image_location, boot_classpath, all_locations);
+ }
+ return true;
+ }
+
+ bool GetBootImageAddressRange(const std::string& filename,
+ /*out*/uint32_t* start,
+ /*out*/uint32_t* end,
+ /*out*/uint32_t* oat_end,
+ /*out*/std::string* error_msg) {
+ ImageHeader system_hdr;
+ if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
+ *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
+ return false;
+ }
+ *start = reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin());
+ CHECK_ALIGNED(*start, kPageSize);
+ *end = RoundUp(*start + system_hdr.GetImageSize(), kPageSize);
+ *oat_end = RoundUp(reinterpret_cast32<uint32_t>(system_hdr.GetOatFileEnd()), kPageSize);
+ return true;
+ }
+
+ bool ReserveBootImageMemory(uint32_t image_start,
+ uint32_t image_end,
+ uint32_t oat_end,
+ size_t extra_reservation_size,
+ /*out*/MemMap* image_reservation,
+ /*out*/MemMap* oat_reservation,
+ /*out*/MemMap* extra_reservation,
+ /*out*/std::string* error_msg) {
+ DCHECK(!image_reservation->IsValid());
+ size_t total_size =
+ dchecked_integral_cast<size_t>(oat_end - image_start) + extra_reservation_size;
+ *image_reservation =
+ MemMap::MapAnonymous("Boot image reservation",
+ reinterpret_cast32<uint8_t*>(image_start),
+ total_size,
+ PROT_NONE,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ /* reservation */ nullptr,
+ error_msg);
+ if (!image_reservation->IsValid()) {
+ return false;
+ }
+ DCHECK(!extra_reservation->IsValid());
+ if (extra_reservation_size != 0u) {
+ DCHECK_ALIGNED(extra_reservation_size, kPageSize);
+ DCHECK_LT(extra_reservation_size, image_reservation->Size());
+ uint8_t* split = image_reservation->End() - extra_reservation_size;
+ *extra_reservation = image_reservation->RemapAtEnd(split,
+ "Boot image extra reservation",
+ PROT_NONE,
+ error_msg);
+ if (!extra_reservation->IsValid()) {
+ return false;
+ }
+ }
+ DCHECK(!oat_reservation->IsValid());
+ *oat_reservation = image_reservation->RemapAtEnd(reinterpret_cast32<uint8_t*>(image_end),
+ "Boot image oat reservation",
+ PROT_NONE,
+ error_msg);
+ if (!oat_reservation->IsValid()) {
return false;
}
- all_locations->push_back(image_location);
- ExtractMultiImageLocations(image_location, boot_classpath, all_locations);
return true;
}
- uint8_t* GetOatFileEnd(const std::vector<std::unique_ptr<ImageSpace>>& spaces) {
- DCHECK(std::is_sorted(
- spaces.begin(),
- spaces.end(),
- [](const std::unique_ptr<ImageSpace>& lhs, const std::unique_ptr<ImageSpace>& rhs) {
- return lhs->GetOatFileEnd() < rhs->GetOatFileEnd();
- }));
- return AlignUp(spaces.back()->GetOatFileEnd(), kPageSize);
+ bool CheckReservationsExhausted(const MemMap& image_reservation,
+ const MemMap& oat_reservation,
+ /*out*/std::string* error_msg) {
+ if (image_reservation.IsValid()) {
+ *error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
+ image_reservation.Begin(),
+ image_reservation.End());
+ return false;
+ }
+ if (oat_reservation.IsValid()) {
+ *error_msg = StringPrintf("Excessive oat reservation after loading boot image: %p-%p",
+ image_reservation.Begin(),
+ image_reservation.End());
+ return false;
+ }
+ return true;
}
const std::string& image_location_;
@@ -1622,6 +1769,7 @@ class ImageSpace::BootImageLoader {
bool has_system_;
bool has_cache_;
bool is_global_cache_;
+ bool dalvik_cache_exists_;
std::string dalvik_cache_;
std::string cache_filename_;
};
@@ -1664,13 +1812,15 @@ static bool CheckSpace(const std::string& cache_filename, std::string* error_msg
bool ImageSpace::LoadBootImage(
const std::string& image_location,
const InstructionSet image_isa,
- /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
- /*out*/ uint8_t** oat_file_end) {
+ size_t extra_reservation_size,
+ /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/MemMap* extra_reservation) {
ScopedTrace trace(__FUNCTION__);
DCHECK(boot_image_spaces != nullptr);
DCHECK(boot_image_spaces->empty());
- DCHECK(oat_file_end != nullptr);
+ DCHECK_ALIGNED(extra_reservation_size, kPageSize);
+ DCHECK(extra_reservation != nullptr);
DCHECK_NE(image_isa, InstructionSet::kNone);
if (image_location.empty()) {
@@ -1727,8 +1877,9 @@ bool ImageSpace::LoadBootImage(
// If we have system image, validate system image checksums, otherwise validate the oat file.
if (loader.LoadFromDalvikCache(/* validate_system_checksums */ loader.HasSystem(),
/* validate_oat_file */ !loader.HasSystem(),
+ extra_reservation_size,
boot_image_spaces,
- oat_file_end,
+ extra_reservation,
&local_error_msg)) {
return true;
}
@@ -1742,7 +1893,10 @@ bool ImageSpace::LoadBootImage(
if (loader.HasSystem() && !relocate) {
std::string local_error_msg;
- if (loader.LoadFromSystem(boot_image_spaces, oat_file_end, &local_error_msg)) {
+ if (loader.LoadFromSystem(extra_reservation_size,
+ boot_image_spaces,
+ extra_reservation,
+ &local_error_msg)) {
return true;
}
error_msgs.push_back(local_error_msg);
@@ -1759,8 +1913,9 @@ bool ImageSpace::LoadBootImage(
if (patch_success) {
if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
/* validate_oat_file */ false,
+ extra_reservation_size,
boot_image_spaces,
- oat_file_end,
+ extra_reservation,
&local_error_msg)) {
return true;
}
@@ -1774,7 +1929,7 @@ bool ImageSpace::LoadBootImage(
// Step 3: We do not have an existing image in /system,
// so generate an image into the dalvik cache.
- if (!loader.HasSystem()) {
+ if (!loader.HasSystem() && loader.DalvikCacheExists()) {
std::string local_error_msg;
if (!dex2oat_enabled) {
local_error_msg = "Image compilation disabled.";
@@ -1784,8 +1939,9 @@ bool ImageSpace::LoadBootImage(
if (compilation_success) {
if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
/* validate_oat_file */ false,
+ extra_reservation_size,
boot_image_spaces,
- oat_file_end,
+ extra_reservation,
&local_error_msg)) {
return true;
}
@@ -1841,7 +1997,13 @@ ImageSpace::~ImageSpace() {
std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
const OatFile* oat_file,
std::string* error_msg) {
- return Loader::Init(image, image, /*validate_oat_file*/false, oat_file, /*out*/error_msg);
+ return Loader::Init(image,
+ image,
+ /* validate_oat_file */ false,
+ oat_file,
+ /* image_reservation */ nullptr,
+ /* oat_reservation */ nullptr,
+ error_msg);
}
const OatFile* ImageSpace::GetOatFile() const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 771ba2acb8..a2490acdbb 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -44,8 +44,9 @@ class ImageSpace : public MemMapSpace {
static bool LoadBootImage(
const std::string& image_location,
const InstructionSet image_isa,
- /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
- /*out*/ uint8_t** oat_file_end) REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t extra_reservation_size,
+ /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+ /*out*/MemMap* extra_reservation) REQUIRES_SHARED(Locks::mutator_lock_);
// Try to open an existing app image space.
static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image,
@@ -86,11 +87,11 @@ class ImageSpace : public MemMapSpace {
return image_location_;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
// ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
// special cases to test against.
return live_bitmap_.get();
@@ -102,7 +103,7 @@ class ImageSpace : public MemMapSpace {
void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
@@ -182,8 +183,8 @@ class ImageSpace : public MemMapSpace {
ImageSpace(const std::string& name,
const char* image_location,
- MemMap* mem_map,
- accounting::ContinuousSpaceBitmap* live_bitmap,
+ MemMap&& mem_map,
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
uint8_t* end);
// The OatFile associated with the image during early startup to
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index f202a43be9..299a413432 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -41,16 +41,16 @@ TEST_F(DexoptTest, ValidateOatFile) {
args.push_back("--dex-file=" + multidex1);
args.push_back("--dex-file=" + dex2);
args.push_back("--oat-file=" + oat_location);
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+ ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
oat_location.c_str(),
oat_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- nullptr,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
+ /* abs_dex_location */ nullptr,
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(oat != nullptr) << error_msg;
@@ -113,7 +113,7 @@ TEST_F(DexoptTest, ValidateOatFile) {
template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
class ImageSpaceLoadingTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
if (kImage) {
options->emplace_back(android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str()),
nullptr);
@@ -150,6 +150,48 @@ TEST_F(ImageSpaceNoRelocateNoDex2oatNoPatchoatTest, Test) {
EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
}
+class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
+ protected:
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
+ const char* android_data = getenv("ANDROID_DATA");
+ CHECK(android_data != nullptr);
+ old_android_data_ = android_data;
+ bad_android_data_ = old_android_data_ + "/no-android-data";
+ int result = setenv("ANDROID_DATA", bad_android_data_.c_str(), /* replace */ 1);
+ CHECK_EQ(result, 0) << strerror(errno);
+ result = mkdir(bad_android_data_.c_str(), /* mode */ 0700);
+ CHECK_EQ(result, 0) << strerror(errno);
+ // Create a regular file "dalvik_cache". GetDalvikCache() shall get EEXIST
+ // when trying to create a directory with the same name and creating a
+ // subdirectory for a particular architecture shall fail.
+ bad_dalvik_cache_ = bad_android_data_ + "/dalvik-cache";
+ int fd = creat(bad_dalvik_cache_.c_str(), /* mode */ 0);
+ CHECK_NE(fd, -1) << strerror(errno);
+ result = close(fd);
+ CHECK_EQ(result, 0) << strerror(errno);
+ ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
+ }
+
+ void TearDown() override {
+ int result = unlink(bad_dalvik_cache_.c_str());
+ CHECK_EQ(result, 0) << strerror(errno);
+ result = rmdir(bad_android_data_.c_str());
+ CHECK_EQ(result, 0) << strerror(errno);
+ result = setenv("ANDROID_DATA", old_android_data_.c_str(), /* replace */ 1);
+ CHECK_EQ(result, 0) << strerror(errno);
+ ImageSpaceLoadingTest<false, true, false, true>::TearDown();
+ }
+
+ private:
+ std::string old_android_data_;
+ std::string bad_android_data_;
+ std::string bad_dalvik_cache_;
+};
+
+TEST_F(NoAccessAndroidDataTest, Test) {
+ EXPECT_TRUE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a24ca32314..09d02518a3 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -39,24 +39,20 @@ namespace art {
namespace gc {
namespace space {
-class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
public:
explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
}
- ~MemoryToolLargeObjectMapSpace() OVERRIDE {
+ ~MemoryToolLargeObjectMapSpace() override {
// Historical note: We were deleting large objects to keep Valgrind happy if there were
// any large objects such as Dex cache arrays which aren't freed since they are held live
// by the class linker.
- MutexLock mu(Thread::Current(), lock_);
- for (auto& m : large_objects_) {
- delete m.second.mem_map;
- }
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE {
+ override {
mirror::Object* obj =
LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
@@ -72,21 +68,21 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
return object_without_rdz;
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
}
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+ size_t Free(Thread* self, mirror::Object* obj) override {
mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
return LargeObjectMapSpace::Free(self, object_with_rdz);
}
- bool Contains(const mirror::Object* obj) const OVERRIDE {
+ bool Contains(const mirror::Object* obj) const override {
return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
}
@@ -139,16 +135,20 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
- PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (UNLIKELY(mem_map == nullptr)) {
+ MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
+ /* addr */ nullptr,
+ num_bytes,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ &error_msg);
+ if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
return nullptr;
}
- mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
+ mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map.Begin());
+ const size_t allocation_size = mem_map.BaseSize();
MutexLock mu(self, lock_);
- large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
- const size_t allocation_size = mem_map->BaseSize();
+ large_objects_.Put(obj, LargeObject {std::move(mem_map), false /* not zygote */});
DCHECK(bytes_allocated != nullptr);
if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
@@ -191,13 +191,11 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
}
- MemMap* mem_map = it->second.mem_map;
- const size_t map_size = mem_map->BaseSize();
+ const size_t map_size = it->second.mem_map.BaseSize();
DCHECK_GE(num_bytes_allocated_, map_size);
size_t allocation_size = map_size;
num_bytes_allocated_ -= allocation_size;
--num_objects_allocated_;
- delete mem_map;
large_objects_.erase(it);
return allocation_size;
}
@@ -206,7 +204,7 @@ size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_s
MutexLock mu(Thread::Current(), lock_);
auto it = large_objects_.find(obj);
CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
- size_t alloc_size = it->second.mem_map->BaseSize();
+ size_t alloc_size = it->second.mem_map.BaseSize();
if (usable_size != nullptr) {
*usable_size = alloc_size;
}
@@ -227,7 +225,7 @@ size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object*
void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
MutexLock mu(Thread::Current(), lock_);
for (auto& pair : large_objects_) {
- MemMap* mem_map = pair.second.mem_map;
+ MemMap* mem_map = &pair.second.mem_map;
callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
callback(nullptr, nullptr, 0, arg);
}
@@ -326,7 +324,7 @@ class AllocationInfo {
size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
DCHECK_GE(info, allocation_info_);
- DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
+ DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_.End()));
return info - allocation_info_;
}
@@ -350,28 +348,37 @@ inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
- PROT_READ | PROT_WRITE, true, false, &error_msg);
- CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
- return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ &error_msg);
+ CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
+ return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
}
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
+FreeListSpace::FreeListSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end)
: LargeObjectSpace(name, begin, end),
- mem_map_(mem_map),
+ mem_map_(std::move(mem_map)),
lock_("free list space lock", kAllocSpaceLock) {
const size_t space_capacity = end - begin;
free_end_ = space_capacity;
CHECK_ALIGNED(space_capacity, kAlignment);
const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
std::string error_msg;
- allocation_info_map_.reset(
+ allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
- false, false, &error_msg));
- CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
- << error_msg;
- allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
+ /* addr */ nullptr,
+ alloc_info_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
+ allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
}
FreeListSpace::~FreeListSpace() {}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index f37d814ffe..26c6463319 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -41,7 +41,7 @@ enum class LargeObjectSpaceType {
// Abstraction implemented by all large object spaces.
class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
public:
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeLargeObjectSpace;
}
void SwapBitmaps();
@@ -49,10 +49,10 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
virtual ~LargeObjectSpace() {}
- uint64_t GetBytesAllocated() OVERRIDE {
+ uint64_t GetBytesAllocated() override {
return num_bytes_allocated_;
}
- uint64_t GetObjectsAllocated() OVERRIDE {
+ uint64_t GetObjectsAllocated() override {
return num_objects_allocated_;
}
uint64_t GetTotalBytesAllocated() const {
@@ -61,22 +61,22 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
uint64_t GetTotalObjectsAllocated() const {
return total_objects_allocated_;
}
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// LargeObjectSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
collector::ObjectBytePair Sweep(bool swap_bitmaps);
- virtual bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
// Current address at which the space begins, which may vary as the space is filled.
@@ -96,7 +96,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
@@ -140,22 +140,22 @@ class LargeObjectMapSpace : public LargeObjectSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
REQUIRES(!lock_);
size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
struct LargeObject {
- MemMap* mem_map;
+ MemMap mem_map;
bool is_zygote;
};
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -164,25 +164,25 @@ class LargeObjectMapSpace : public LargeObjectSpace {
};
// A continuous large object space with a free-list to handle holes.
-class FreeListSpace FINAL : public LargeObjectSpace {
+class FreeListSpace final : public LargeObjectSpace {
public:
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
- size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
- void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
void Dump(std::ostream& os) const REQUIRES(!lock_);
- std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
protected:
- FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
+ FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
size_t GetSlotIndexForAddress(uintptr_t address) const {
DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
@@ -198,8 +198,8 @@ class FreeListSpace FINAL : public LargeObjectSpace {
}
// Removes header from the free blocks set by finding the corresponding iterator and erasing it.
void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
- bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+ bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
class SortByPrevFree {
public:
@@ -210,9 +210,9 @@ class FreeListSpace FINAL : public LargeObjectSpace {
// There is not footer for any allocations at the end of the space, so we keep track of how much
// free space there is at the end manually.
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
// Side table for allocation info, one per page.
- std::unique_ptr<MemMap> allocation_info_map_;
+ MemMap allocation_info_map_;
AllocationInfo* allocation_info_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 6936fdc6d4..445560ad8d 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -40,19 +40,26 @@ using android::base::StringPrintf;
size_t MallocSpace::bitmap_index_ = 0;
-MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool create_bitmaps, bool can_move_objects, size_t starting_size,
+MallocSpace::MallocSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool create_bitmaps,
+ bool can_move_objects,
+ size_t starting_size,
size_t initial_size)
- : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
+ : ContinuousMemMapAllocSpace(
+ name, std::move(mem_map), begin, end, limit, kGcRetentionPolicyAlwaysCollect),
recent_free_pos_(0), lock_("allocation space lock", kAllocSpaceLock),
growth_limit_(growth_limit), can_move_objects_(can_move_objects),
starting_size_(starting_size), initial_size_(initial_size) {
if (create_bitmaps) {
size_t bitmap_index = bitmap_index_++;
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
- CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
- CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.Begin()), kGcCardSize);
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.End()), kGcCardSize);
live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
@@ -70,8 +77,12 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
}
}
-MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
- size_t* growth_limit, size_t* capacity, uint8_t* requested_begin) {
+MemMap MallocSpace::CreateMemMap(const std::string& name,
+ size_t starting_size,
+ size_t* initial_size,
+ size_t* growth_limit,
+ size_t* capacity,
+ uint8_t* requested_begin) {
// Sanity check arguments
if (starting_size > *initial_size) {
*initial_size = starting_size;
@@ -80,13 +91,13 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(*initial_size) << ") is larger than its capacity ("
<< PrettySize(*growth_limit) << ")";
- return nullptr;
+ return MemMap::Invalid();
}
if (*growth_limit > *capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(*growth_limit) << ") is larger than the capacity ("
<< PrettySize(*capacity) << ")";
- return nullptr;
+ return MemMap::Invalid();
}
// Page align growth limit and capacity which will be used to manage mmapped storage
@@ -94,9 +105,13 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
*capacity = RoundUp(*capacity, kPageSize);
std::string error_msg;
- MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity,
- PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (mem_map == nullptr) {
+ MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ *capacity,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ &error_msg);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(*capacity) << ": " << error_msg;
}
@@ -194,18 +209,24 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
VLOG(heap) << "Capacity " << PrettySize(capacity);
// Remap the tail.
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(), alloc_space_name,
- PROT_READ | PROT_WRITE, &error_msg));
- CHECK(mem_map.get() != nullptr) << error_msg;
- void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
- low_memory_mode);
+ MemMap mem_map = GetMemMap()->RemapAtEnd(
+ End(), alloc_space_name, PROT_READ | PROT_WRITE, &error_msg);
+ CHECK(mem_map.IsValid()) << error_msg;
+ void* allocator =
+ CreateAllocator(End(), starting_size_, initial_size_, capacity, low_memory_mode);
// Protect memory beyond the initial size.
- uint8_t* end = mem_map->Begin() + starting_size_;
+ uint8_t* end = mem_map.Begin() + starting_size_;
if (capacity > initial_size_) {
CheckedCall(mprotect, alloc_space_name, end, capacity - initial_size_, PROT_NONE);
}
- *out_malloc_space = CreateInstance(mem_map.release(), alloc_space_name, allocator, End(), end,
- limit_, growth_limit, CanMoveObjects());
+ *out_malloc_space = CreateInstance(std::move(mem_map),
+ alloc_space_name,
+ allocator,
+ End(),
+ end,
+ limit_,
+ growth_limit,
+ CanMoveObjects());
SetLimit(End());
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index c1f4841cb6..6bf2d71c7c 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -113,9 +113,14 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
void SetGrowthLimit(size_t growth_limit);
- virtual MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit,
- size_t growth_limit, bool can_move_objects) = 0;
+ virtual MallocSpace* CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects) = 0;
// Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
// the low memory mode argument specifies that the heap wishes the created space to be more
@@ -128,7 +133,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return can_move_objects_;
}
@@ -137,12 +142,23 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
}
protected:
- MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
- size_t starting_size, size_t initial_size);
-
- static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
- size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
+ MallocSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool create_bitmaps,
+ bool can_move_objects,
+ size_t starting_size,
+ size_t initial_size);
+
+ static MemMap CreateMemMap(const std::string& name,
+ size_t starting_size,
+ size_t* initial_size,
+ size_t* growth_limit,
+ size_t* capacity,
+ uint8_t* requested_begin);
// When true the low memory mode argument specifies that the heap wishes the created allocator to
// be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
index c022171082..f1c1cb8ca2 100644
--- a/runtime/gc/space/memory_tool_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -267,8 +267,8 @@ MemoryToolMallocSpace<S,
kMemoryToolRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::MemoryToolMallocSpace(
- MemMap* mem_map, size_t initial_size, Params... params)
- : S(mem_map, initial_size, params...) {
+ MemMap&& mem_map, size_t initial_size, Params... params)
+ : S(std::move(mem_map), initial_size, params...) {
// Don't want to change the memory tool states of the mem map here as the allocator is already
// initialized at this point and that may interfere with what the allocator does internally. Note
// that the tail beyond the initial size is mprotected.
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index e53f009213..33bddfa4c8 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -29,31 +29,31 @@ template <typename BaseMallocSpaceType,
size_t kMemoryToolRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
-class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace final : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE;
+ override;
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_);
+ override REQUIRES(Locks::mutator_lock_);
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) override {}
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override;
template <typename... Params>
- MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
+ MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
virtual ~MemoryToolMallocSpace() {}
private:
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 436eb2c09b..e04851564d 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -355,6 +355,10 @@ inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
// We make 'top' all usable bytes, as the caller of this
// allocation may use all of 'usable_size' (see mirror::Array::Alloc).
first_reg->SetTop(first_reg->Begin() + allocated);
+ if (!kForEvac) {
+ // Evac doesn't count as newly allocated.
+ first_reg->SetNewlyAllocated();
+ }
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
@@ -364,6 +368,10 @@ inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
} else {
++num_non_free_regions_;
}
+ if (!kForEvac) {
+ // Evac doesn't count as newly allocated.
+ regions_[p].SetNewlyAllocated();
+ }
}
*bytes_allocated = allocated;
if (usable_size != nullptr) {
@@ -407,7 +415,7 @@ inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_alloc
--num_non_free_regions_;
}
}
- if (end_addr < Limit()) {
+ if (kIsDebugBuild && end_addr < Limit()) {
// If we aren't at the end of the space, check that the next region is not a large tail.
Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
DCHECK(!following_reg->IsLargeTail());
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 9c6a73cbf4..f74fa86467 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -31,8 +31,7 @@ namespace space {
static constexpr uint kEvacuateLivePercentThreshold = 75U;
// Whether we protect the unused and cleared regions.
-// Only protect for target builds to prevent flaky test failures (b/63131961).
-static constexpr bool kProtectClearedRegions = kIsTargetBuild;
+static constexpr bool kProtectClearedRegions = true;
// Wether we poison memory areas occupied by dead objects in unevacuated regions.
static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
@@ -46,60 +45,64 @@ static constexpr uint32_t kPoisonDeadObject = 0xBADDB01D; // "BADDROID"
// Whether we check a region's live bytes count against the region bitmap.
static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
-MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
- uint8_t* requested_begin) {
+MemMap RegionSpace::CreateMemMap(const std::string& name,
+ size_t capacity,
+ uint8_t* requested_begin) {
CHECK_ALIGNED(capacity, kRegionSize);
std::string error_msg;
// Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
// even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
- std::unique_ptr<MemMap> mem_map;
+ MemMap mem_map;
while (true) {
- mem_map.reset(MemMap::MapAnonymous(name.c_str(),
- requested_begin,
- capacity + kRegionSize,
- PROT_READ | PROT_WRITE,
- true,
- false,
- &error_msg));
- if (mem_map.get() != nullptr || requested_begin == nullptr) {
+ mem_map = MemMap::MapAnonymous(name.c_str(),
+ requested_begin,
+ capacity + kRegionSize,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ true,
+ &error_msg);
+ if (mem_map.IsValid() || requested_begin == nullptr) {
break;
}
// Retry with no specified request begin.
requested_begin = nullptr;
}
- if (mem_map.get() == nullptr) {
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
MemMap::DumpMaps(LOG_STREAM(ERROR));
- return nullptr;
+ return MemMap::Invalid();
}
- CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
- CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
- CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
- if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
+ CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
+ CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
+ CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
+ if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
// Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
// kRegionSize at the end.
- mem_map->SetSize(capacity);
+ mem_map.SetSize(capacity);
} else {
// Got an unaligned map. Align the both ends.
- mem_map->AlignBy(kRegionSize);
+ mem_map.AlignBy(kRegionSize);
}
- CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
- CHECK_ALIGNED(mem_map->End(), kRegionSize);
- CHECK_EQ(mem_map->Size(), capacity);
- return mem_map.release();
+ CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
+ CHECK_ALIGNED(mem_map.End(), kRegionSize);
+ CHECK_EQ(mem_map.Size(), capacity);
+ return mem_map;
}
-RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
- return new RegionSpace(name, mem_map);
+RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) {
+ return new RegionSpace(name, std::move(mem_map));
}
-RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
- : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map)
+ : ContinuousMemMapAllocSpace(name,
+ std::move(mem_map),
+ mem_map.Begin(),
+ mem_map.End(),
+ mem_map.End(),
kGcRetentionPolicyAlwaysCollect),
region_lock_("Region lock", kRegionSpaceRegionLock),
time_(1U),
- num_regions_(mem_map->Size() / kRegionSize),
+ num_regions_(mem_map_.Size() / kRegionSize),
num_non_free_regions_(0U),
num_evac_regions_(0U),
max_peak_num_non_free_regions_(0U),
@@ -107,11 +110,11 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
current_region_(&full_region_),
evac_region_(nullptr),
cyclic_alloc_region_index_(0U) {
- CHECK_ALIGNED(mem_map->Size(), kRegionSize);
- CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
+ CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
+ CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
DCHECK_GT(num_regions_, 0U);
regions_.reset(new Region[num_regions_]);
- uint8_t* region_addr = mem_map->Begin();
+ uint8_t* region_addr = mem_map_.Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
regions_[i].Init(i, region_addr, region_addr + kRegionSize);
}
@@ -172,15 +175,54 @@ size_t RegionSpace::ToSpaceSize() {
return num_regions * kRegionSize;
}
-inline bool RegionSpace::Region::ShouldBeEvacuated() {
+inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
+ // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || (evac_mode != kEvacModeNewlyAllocated));
DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
// The region should be evacuated if:
+ // - the evacuation is forced (`evac_mode == kEvacModeForceAll`); or
// - the region was allocated after the start of the previous GC (newly allocated region); or
// - the live ratio is below threshold (`kEvacuateLivePercentThreshold`).
- bool result;
+ if (UNLIKELY(evac_mode == kEvacModeForceAll)) {
+ return true;
+ }
+ bool result = false;
if (is_newly_allocated_) {
- result = true;
- } else {
+ // Invariant: newly allocated regions have an undefined live bytes count.
+ DCHECK_EQ(live_bytes_, static_cast<size_t>(-1));
+ if (IsAllocated()) {
+ // We always evacuate newly-allocated non-large regions as we
+ // believe they contain many dead objects (a very simple form of
+ // the generational hypothesis, even before the Sticky-Bit CC
+ // approach).
+ //
+ // TODO: Verify that assertion by collecting statistics on the
+ // number/proportion of live objects in newly allocated regions
+ // in RegionSpace::ClearFromSpace.
+ //
+ // Note that a side effect of evacuating a newly-allocated
+ // non-large region is that the "newly allocated" status will
+ // later be removed, as its live objects will be copied to an
+ // evacuation region, which won't be marked as "newly
+ // allocated" (see RegionSpace::AllocateRegion).
+ result = true;
+ } else {
+ DCHECK(IsLarge());
+ // We never want to evacuate a large region (and the associated
+ // tail regions), except if:
+ // - we are forced to do so (see the `kEvacModeForceAll` case
+ // above); or
+ // - we know that the (sole) object contained in this region is
+ // dead (see the corresponding logic below, in the
+ // `kEvacModeLivePercentNewlyAllocated` case).
+ // For a newly allocated region (i.e. allocated since the
+ // previous GC started), we don't have any liveness information
+ // (the live bytes count is -1 -- also note this region has been
+ // a to-space one between the time of its allocation and now),
+ // so we prefer not to evacuate it.
+ result = false;
+ }
+ } else if (evac_mode == kEvacModeLivePercentNewlyAllocated) {
bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
if (is_live_percent_valid) {
DCHECK(IsInToSpace());
@@ -205,9 +247,48 @@ inline bool RegionSpace::Region::ShouldBeEvacuated() {
return result;
}
+void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
+ // This method is only used when Generational CC collection is enabled.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+
+ // This code uses a logic similar to the one used in RegionSpace::FreeLarge
+ // to traverse the regions supporting `obj`.
+ // TODO: Refactor.
+ DCHECK(IsLargeObject(obj));
+ DCHECK_ALIGNED(obj, kRegionSize);
+ size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
+ DCHECK_GT(obj_size, space::RegionSpace::kRegionSize);
+ // Size of the memory area allocated for `obj`.
+ size_t obj_alloc_size = RoundUp(obj_size, space::RegionSpace::kRegionSize);
+ uint8_t* begin_addr = reinterpret_cast<uint8_t*>(obj);
+ uint8_t* end_addr = begin_addr + obj_alloc_size;
+ DCHECK_ALIGNED(end_addr, kRegionSize);
+
+ // Zero the live bytes of the large region and large tail regions containing the object.
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
+ Region* region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
+ if (addr == begin_addr) {
+ DCHECK(region->IsLarge());
+ } else {
+ DCHECK(region->IsLargeTail());
+ }
+ region->ZeroLiveBytes();
+ }
+ if (kIsDebugBuild && end_addr < Limit()) {
+ // If we aren't at the end of the space, check that the next region is not a large tail.
+ Region* following_region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
+ DCHECK(!following_region->IsLargeTail());
+ }
+}
+
// Determine which regions to evacuate and mark them as
// from-space. Mark the rest as unevacuated from-space.
-void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
+void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table,
+ EvacMode evac_mode,
+ bool clear_live_bytes) {
+ // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
++time_;
if (kUseTableLookupReadBarrier) {
DCHECK(rb_table->IsAllCleared());
@@ -233,12 +314,12 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
DCHECK((state == RegionState::kRegionStateAllocated ||
state == RegionState::kRegionStateLarge) &&
type == RegionType::kRegionTypeToSpace);
- bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
+ bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
if (should_evacuate) {
r->SetAsFromSpace();
DCHECK(r->IsInFromSpace());
} else {
- r->SetAsUnevacFromSpace();
+ r->SetAsUnevacFromSpace(clear_live_bytes);
DCHECK(r->IsInUnevacFromSpace());
}
if (UNLIKELY(state == RegionState::kRegionStateLarge &&
@@ -254,7 +335,7 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
r->SetAsFromSpace();
DCHECK(r->IsInFromSpace());
} else {
- r->SetAsUnevacFromSpace();
+ r->SetAsUnevacFromSpace(clear_live_bytes);
DCHECK(r->IsInUnevacFromSpace());
}
--num_expected_large_tails;
@@ -266,6 +347,8 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
rb_table->Clear(r->Begin(), r->End());
}
}
+ // Invariant: There should be no newly-allocated region in the from-space.
+ DCHECK(!r->is_newly_allocated_);
}
DCHECK_EQ(num_expected_large_tails, 0U);
current_region_ = &full_region_;
@@ -371,8 +454,9 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
++regions_to_clear_bitmap;
}
- // Optimization: If the live bytes are *all* live in a region
- // then the live-bit information for these objects is superfluous:
+ // Optimization (for full CC only): If the live bytes are *all* live
+ // in a region then the live-bit information for these objects is
+ // superfluous:
// - We can determine that these objects are all live by using
// Region::AllAllocatedBytesAreLive (which just checks whether
// `LiveBytes() == static_cast<size_t>(Top() - Begin())`.
@@ -381,19 +465,44 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
// live bits (see RegionSpace::WalkInternal).
// Therefore, we can clear the bits for these objects in the
// (live) region space bitmap (and release the corresponding pages).
- GetLiveBitmap()->ClearRange(
- reinterpret_cast<mirror::Object*>(r->Begin()),
- reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
+ //
+ // This optimization is incompatible with Generational CC, because:
+ // - minor (young-generation) collections need to know which objects
+ // where marked during the previous GC cycle, meaning all mark bitmaps
+ // (this includes the region space bitmap) need to be preserved
+ // between a (minor or major) collection N and a following minor
+ // collection N+1;
+ // - at this stage (in the current GC cycle), we cannot determine
+ // whether the next collection will be a minor or a major one;
+ // This means that we need to be conservative and always preserve the
+ // region space bitmap when using Generational CC.
+ // Note that major collections do not require the previous mark bitmaps
+ // to be preserved, and as matter of fact they do clear the region space
+ // bitmap. But they cannot do so before we know the next GC cycle will
+ // be a major one, so this operation happens at the beginning of such a
+ // major collection, before marking starts.
+ if (!kEnableGenerationalConcurrentCopyingCollection) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
+ }
// Skip over extra regions for which we cleared the bitmaps: we shall not clear them,
// as they are unevac regions that are live.
// Subtract one for the for-loop.
i += regions_to_clear_bitmap - 1;
} else {
- // Only some allocated bytes are live in this unevac region.
- // This should only happen for an allocated non-large region.
- DCHECK(r->IsAllocated()) << r->State();
- if (kPoisonDeadObjectsInUnevacuatedRegions) {
- PoisonDeadObjectsInUnevacuatedRegion(r);
+ // TODO: Explain why we do not poison dead objects in region
+ // `r` when it has an undefined live bytes count (i.e. when
+ // `r->LiveBytes() == static_cast<size_t>(-1)`) with
+ // Generational CC.
+ if (!kEnableGenerationalConcurrentCopyingCollection ||
+ (r->LiveBytes() != static_cast<size_t>(-1))) {
+ // Only some allocated bytes are live in this unevac region.
+ // This should only happen for an allocated non-large region.
+ DCHECK(r->IsAllocated()) << r->State();
+ if (kPoisonDeadObjectsInUnevacuatedRegions) {
+ PoisonDeadObjectsInUnevacuatedRegion(r);
+ }
}
}
}
@@ -746,6 +855,10 @@ RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
Region* r = &regions_[region_index];
if (r->IsFree()) {
r->Unfree(this, time_);
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ // TODO: Add an explanation for this assertion.
+ DCHECK(!for_evac || !r->is_newly_allocated_);
+ }
if (for_evac) {
++num_evac_regions_;
// Evac doesn't count as newly allocated.
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index ef2e1377b9..0bf4f38a4b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -39,19 +39,25 @@ namespace space {
static constexpr bool kCyclicRegionAllocation = true;
// A space that consists of equal-sized regions.
-class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
+class RegionSpace final : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
- SpaceType GetType() const OVERRIDE {
+ enum EvacMode {
+ kEvacModeNewlyAllocated,
+ kEvacModeLivePercentNewlyAllocated,
+ kEvacModeForceAll,
+ };
+
+ SpaceType GetType() const override {
return kSpaceTypeRegionSpace;
}
// Create a region space mem map with the requested sizes. The requested base address is not
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
- static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
- static RegionSpace* Create(const std::string& name, MemMap* mem_map);
+ static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
+ static RegionSpace* Create(const std::string& name, MemMap&& mem_map);
// Allocate `num_bytes`, returns null if the space is full.
mirror::Object* Alloc(Thread* self,
@@ -59,14 +65,14 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!region_lock_);
+ override REQUIRES(!region_lock_);
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self,
size_t num_bytes,
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
// The main allocation routine.
template<bool kForEvac>
ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
@@ -84,29 +90,29 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
- size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ size_t Free(Thread*, mirror::Object*) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ size_t FreeList(Thread*, size_t, mirror::Object**) override {
UNIMPLEMENTED(FATAL);
return 0;
}
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return mark_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
- void Clear() OVERRIDE REQUIRES(!region_lock_);
+ void Clear() override REQUIRES(!region_lock_);
// Remove read and write memory protection from the whole region space,
// i.e. make memory pages backing the region area not readable and not
@@ -182,7 +188,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return num_regions_;
}
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return true;
}
@@ -191,7 +197,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return byte_obj >= Begin() && byte_obj < Limit();
}
- RegionSpace* AsRegionSpace() OVERRIDE {
+ RegionSpace* AsRegionSpace() override {
return this;
}
@@ -206,10 +212,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
WalkInternal<true /* kToSpaceOnly */>(visitor);
}
- accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
@@ -241,6 +247,14 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return false;
}
+ bool IsLargeObject(mirror::Object* ref) {
+ if (HasAddress(ref)) {
+ Region* r = RefToRegionUnlocked(ref);
+ return r->IsLarge();
+ }
+ return false;
+ }
+
bool IsInToSpace(mirror::Object* ref) {
if (HasAddress(ref)) {
Region* r = RefToRegionUnlocked(ref);
@@ -266,9 +280,15 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return r->Type();
}
+ // Zero live bytes for a large object, used by young gen CC for marking newly allocated large
+ // objects.
+ void ZeroLiveBytesForLargeObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Determine which regions to evacuate and tag them as
// from-space. Tag the rest as unevacuated from-space.
- void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
+ void SetFromSpace(accounting::ReadBarrierTable* rb_table,
+ EvacMode evac_mode,
+ bool clear_live_bytes)
REQUIRES(!region_lock_);
size_t FromSpaceSize() REQUIRES(!region_lock_);
@@ -301,7 +321,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
private:
- RegionSpace(const std::string& name, MemMap* mem_map);
+ RegionSpace(const std::string& name, MemMap&& mem_map);
template<bool kToSpaceOnly, typename Visitor>
ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
@@ -397,6 +417,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return is_large;
}
+ void ZeroLiveBytes() {
+ live_bytes_ = 0;
+ }
+
// Large-tail allocated.
bool IsLargeTail() const {
bool is_large_tail = (state_ == RegionState::kRegionStateLargeTail);
@@ -436,6 +460,18 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
void SetAsFromSpace() {
DCHECK(!IsFree() && IsInToSpace());
type_ = RegionType::kRegionTypeFromSpace;
+ if (IsNewlyAllocated()) {
+ // Clear the "newly allocated" status here, as we do not want the
+ // GC to see it when encountering references in the from-space.
+ //
+ // Invariant: There should be no newly-allocated region in the
+ // from-space (when the from-space exists, which is between the calls
+ // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
+ is_newly_allocated_ = false;
+ }
+ // Set live bytes to an invalid value, as we have made an
+ // evacuation decision (possibly based on the percentage of live
+ // bytes).
live_bytes_ = static_cast<size_t>(-1);
}
@@ -443,10 +479,32 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// collection, RegionSpace::ClearFromSpace will preserve the space
// used by this region, and tag it as to-space (see
// Region::SetUnevacFromSpaceAsToSpace below).
- void SetAsUnevacFromSpace() {
+ void SetAsUnevacFromSpace(bool clear_live_bytes) {
+ // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
DCHECK(!IsFree() && IsInToSpace());
type_ = RegionType::kRegionTypeUnevacFromSpace;
- live_bytes_ = 0U;
+ if (IsNewlyAllocated()) {
+ // A newly allocated region set as unevac from-space must be
+ // a large or large tail region.
+ DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
+ // Always clear the live bytes of a newly allocated (large or
+ // large tail) region.
+ clear_live_bytes = true;
+ // Clear the "newly allocated" status here, as we do not want the
+ // GC to see it when encountering (and processing) references in the
+ // from-space.
+ //
+ // Invariant: There should be no newly-allocated region in the
+ // from-space (when the from-space exists, which is between the calls
+ // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
+ is_newly_allocated_ = false;
+ }
+ if (clear_live_bytes) {
+ // Reset the live bytes, as we have made a non-evacuation
+ // decision (possibly based on the percentage of live bytes).
+ live_bytes_ = 0;
+ }
}
// Set this region as to-space. Used by RegionSpace::ClearFromSpace.
@@ -457,7 +515,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
// Return whether this region should be evacuated. Used by RegionSpace::SetFromSpace.
- ALWAYS_INLINE bool ShouldBeEvacuated();
+ ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
void AddLiveBytes(size_t live_bytes) {
DCHECK(IsInUnevacFromSpace());
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index b0402e4b83..10ff1c15b1 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -44,48 +44,88 @@ static constexpr bool kVerifyFreedBytes = false;
// TODO: Fix
// template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
-RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
- art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit, bool can_move_objects,
- size_t starting_size, bool low_memory_mode)
- : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+RosAllocSpace::RosAllocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ art::gc::allocator::RosAlloc* rosalloc,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size,
+ bool low_memory_mode)
+ : MallocSpace(name,
+ std::move(mem_map),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ true,
+ can_move_objects,
starting_size, initial_size),
rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
CHECK(rosalloc != nullptr);
}
-RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
- bool low_memory_mode, bool can_move_objects) {
- DCHECK(mem_map != nullptr);
+RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
+ bool can_move_objects) {
+ DCHECK(mem_map.IsValid());
bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
- allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
- capacity, low_memory_mode, running_on_memory_tool);
+ allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map.Begin(),
+ starting_size,
+ initial_size,
+ capacity,
+ low_memory_mode,
+ running_on_memory_tool);
if (rosalloc == nullptr) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
return nullptr;
}
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
- uint8_t* end = mem_map->Begin() + starting_size;
+ uint8_t* end = mem_map.Begin() + starting_size;
if (capacity - starting_size > 0) {
CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
}
// Everything is set so record in immutable structure and leave
- uint8_t* begin = mem_map->Begin();
+ uint8_t* begin = mem_map.Begin();
// TODO: Fix RosAllocSpace to support ASan. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
if (running_on_memory_tool) {
return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
- mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
- can_move_objects, starting_size, low_memory_mode);
+ std::move(mem_map),
+ initial_size,
+ name,
+ rosalloc,
+ begin,
+ end,
+ begin + capacity,
+ growth_limit,
+ can_move_objects,
+ starting_size,
+ low_memory_mode);
} else {
- return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
- growth_limit, can_move_objects, starting_size, low_memory_mode);
+ return new RosAllocSpace(std::move(mem_map),
+ initial_size,
+ name,
+ rosalloc,
+ begin,
+ end,
+ begin + capacity,
+ growth_limit,
+ can_move_objects,
+ starting_size,
+ low_memory_mode);
}
}
@@ -111,16 +151,21 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = Heap::kDefaultStartingSize;
- MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
- requested_begin);
- if (mem_map == nullptr) {
+ MemMap mem_map =
+ CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+ if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
return nullptr;
}
- RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
- growth_limit, capacity, low_memory_mode,
+ RosAllocSpace* space = CreateFromMemMap(std::move(mem_map),
+ name,
+ starting_size,
+ initial_size,
+ growth_limit,
+ capacity,
+ low_memory_mode,
can_move_objects);
// We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -175,18 +220,39 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
return result;
}
-MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
- void* allocator, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit,
+MallocSpace* RosAllocSpace::CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
bool can_move_objects) {
if (Runtime::Current()->IsRunningOnMemoryTool()) {
return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
- mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
- limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+ std::move(mem_map),
+ initial_size_,
+ name,
+ reinterpret_cast<allocator::RosAlloc*>(allocator),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_,
+ low_memory_mode_);
} else {
- return new RosAllocSpace(mem_map, initial_size_, name,
- reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
- growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+ return new RosAllocSpace(std::move(mem_map),
+ initial_size_,
+ name,
+ reinterpret_cast<allocator::RosAlloc*>(allocator),
+ begin,
+ end,
+ limit,
+ growth_limit,
+ can_move_objects,
+ starting_size_,
+ low_memory_mode_);
}
}
@@ -364,8 +430,11 @@ void RosAllocSpace::Clear() {
mark_bitmap_->Clear();
SetEnd(begin_ + starting_size_);
delete rosalloc_;
- rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
- NonGrowthLimitCapacity(), low_memory_mode_,
+ rosalloc_ = CreateRosAlloc(mem_map_.Begin(),
+ starting_size_,
+ initial_size_,
+ NonGrowthLimitCapacity(),
+ low_memory_mode_,
Runtime::Current()->IsRunningOnMemoryTool());
SetFootprintLimit(footprint_limit);
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 4c17233360..5162a064d1 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -41,31 +41,35 @@ class RosAllocSpace : public MallocSpace {
static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
bool can_move_objects);
- static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
- size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity,
- bool low_memory_mode, bool can_move_objects);
+ static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
+ const std::string& name,
+ size_t starting_size,
+ size_t initial_size,
+ size_t growth_limit,
+ size_t capacity,
+ bool low_memory_mode,
+ bool can_move_objects);
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(!lock_);
+ override REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
- OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ override REQUIRES(Locks::mutator_lock_) {
return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
return AllocationSizeNonvirtual<true>(obj, usable_size);
}
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -89,7 +93,7 @@ class RosAllocSpace : public MallocSpace {
// run without allocating a new run.
ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
size_t* bytes_allocated);
- size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
}
ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
@@ -103,20 +107,25 @@ class RosAllocSpace : public MallocSpace {
return rosalloc_;
}
- size_t Trim() OVERRIDE;
- void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
- size_t GetFootprint() OVERRIDE;
- size_t GetFootprintLimit() OVERRIDE;
- void SetFootprintLimit(size_t limit) OVERRIDE;
+ size_t Trim() override;
+ void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
+ size_t GetFootprint() override;
+ size_t GetFootprintLimit() override;
+ void SetFootprintLimit(size_t limit) override;
- void Clear() OVERRIDE;
+ void Clear() override;
- MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool can_move_objects) OVERRIDE;
+ MallocSpace* CreateInstance(MemMap&& mem_map,
+ const std::string& name,
+ void* allocator,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects) override;
- uint64_t GetBytesAllocated() OVERRIDE;
- uint64_t GetObjectsAllocated() OVERRIDE;
+ uint64_t GetBytesAllocated() override;
+ uint64_t GetObjectsAllocated() override;
size_t RevokeThreadLocalBuffers(Thread* thread);
size_t RevokeAllThreadLocalBuffers();
@@ -126,11 +135,11 @@ class RosAllocSpace : public MallocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
- bool IsRosAllocSpace() const OVERRIDE {
+ bool IsRosAllocSpace() const override {
return true;
}
- RosAllocSpace* AsRosAllocSpace() OVERRIDE {
+ RosAllocSpace* AsRosAllocSpace() override {
return this;
}
@@ -140,16 +149,23 @@ class RosAllocSpace : public MallocSpace {
virtual ~RosAllocSpace();
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
}
void DumpStats(std::ostream& os);
protected:
- RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
- allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, uint8_t* limit,
- size_t growth_limit, bool can_move_objects, size_t starting_size,
+ RosAllocSpace(MemMap&& mem_map,
+ size_t initial_size,
+ const std::string& name,
+ allocator::RosAlloc* rosalloc,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ size_t growth_limit,
+ bool can_move_objects,
+ size_t starting_size,
bool low_memory_mode);
private:
@@ -158,7 +174,7 @@ class RosAllocSpace : public MallocSpace {
size_t* usable_size, size_t* bytes_tl_bulk_allocated);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
- size_t maximum_size, bool low_memory_mode) OVERRIDE {
+ size_t maximum_size, bool low_memory_mode) override {
return CreateRosAlloc(
base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
}
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4f43d9f5c5..545e3d83a4 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -352,7 +352,7 @@ class DiscontinuousSpace : public Space {
return mark_bitmap_.get();
}
- virtual bool IsDiscontinuousSpace() const OVERRIDE {
+ bool IsDiscontinuousSpace() const override {
return true;
}
@@ -377,30 +377,30 @@ class MemMapSpace : public ContinuousSpace {
}
MemMap* GetMemMap() {
- return mem_map_.get();
+ return &mem_map_;
}
const MemMap* GetMemMap() const {
- return mem_map_.get();
+ return &mem_map_;
}
- MemMap* ReleaseMemMap() {
- return mem_map_.release();
+ MemMap ReleaseMemMap() {
+ return std::move(mem_map_);
}
protected:
MemMapSpace(const std::string& name,
- MemMap* mem_map,
+ MemMap&& mem_map,
uint8_t* begin,
uint8_t* end,
uint8_t* limit,
GcRetentionPolicy gc_retention_policy)
: ContinuousSpace(name, gc_retention_policy, begin, end, limit),
- mem_map_(mem_map) {
+ mem_map_(std::move(mem_map)) {
}
// Underlying storage of the space
- std::unique_ptr<MemMap> mem_map_;
+ MemMap mem_map_;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
@@ -409,14 +409,14 @@ class MemMapSpace : public ContinuousSpace {
// Used by the heap compaction interface to enable copying from one type of alloc space to another.
class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
public:
- bool IsAllocSpace() const OVERRIDE {
+ bool IsAllocSpace() const override {
return true;
}
- AllocSpace* AsAllocSpace() OVERRIDE {
+ AllocSpace* AsAllocSpace() override {
return this;
}
- bool IsContinuousMemMapAllocSpace() const OVERRIDE {
+ bool IsContinuousMemMapAllocSpace() const override {
return true;
}
ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
@@ -435,11 +435,11 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
return mark_bitmap_.get();
}
@@ -451,9 +451,13 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
- ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
- uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
- : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
+ ContinuousMemMapAllocSpace(const std::string& name,
+ MemMap&& mem_map,
+ uint8_t* begin,
+ uint8_t* end,
+ uint8_t* limit,
+ GcRetentionPolicy gc_retention_policy)
+ : MemMapSpace(name, std::move(mem_map), begin, end, limit, gc_retention_policy) {
}
private:
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 8c73ef9116..ed85b061ed 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -41,7 +41,8 @@ class CountObjectsAllocated {
size_t* const objects_allocated_;
};
-ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map,
+ZygoteSpace* ZygoteSpace::Create(const std::string& name,
+ MemMap&& mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
accounting::ContinuousSpaceBitmap* mark_bitmap) {
DCHECK(live_bitmap != nullptr);
@@ -49,9 +50,9 @@ ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map,
size_t objects_allocated = 0;
CountObjectsAllocated visitor(&objects_allocated);
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map->Begin()),
- reinterpret_cast<uintptr_t>(mem_map->End()), visitor);
- ZygoteSpace* zygote_space = new ZygoteSpace(name, mem_map, objects_allocated);
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map.Begin()),
+ reinterpret_cast<uintptr_t>(mem_map.End()), visitor);
+ ZygoteSpace* zygote_space = new ZygoteSpace(name, std::move(mem_map), objects_allocated);
CHECK(zygote_space->live_bitmap_.get() == nullptr);
CHECK(zygote_space->mark_bitmap_.get() == nullptr);
zygote_space->live_bitmap_.reset(live_bitmap);
@@ -64,8 +65,12 @@ void ZygoteSpace::Clear() {
UNREACHABLE();
}
-ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
- : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ZygoteSpace::ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated)
+ : ContinuousMemMapAllocSpace(name,
+ std::move(mem_map),
+ mem_map.Begin(),
+ mem_map.End(),
+ mem_map.End(),
kGcRetentionPolicyFullCollect),
objects_allocated_(objects_allocated) {
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 6fe21d99a8..1f73577a3a 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -27,38 +27,39 @@ namespace gc {
namespace space {
// A zygote space is a space which you cannot allocate into or free from.
-class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
+class ZygoteSpace final : public ContinuousMemMapAllocSpace {
public:
// Returns the remaining storage in the out_map field.
- static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
+ static ZygoteSpace* Create(const std::string& name,
+ MemMap&& mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
accounting::ContinuousSpaceBitmap* mark_bitmap)
REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os) const;
- SpaceType GetType() const OVERRIDE {
+ SpaceType GetType() const override {
return kSpaceTypeZygoteSpace;
}
- ZygoteSpace* AsZygoteSpace() OVERRIDE {
+ ZygoteSpace* AsZygoteSpace() override {
return this;
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
- size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE;
+ size_t Free(Thread* self, mirror::Object* ptr) override;
- size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
// ZygoteSpaces don't have thread local state.
- size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) override {
return 0U;
}
- size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() override {
return 0U;
}
@@ -70,13 +71,13 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return objects_allocated_.load(std::memory_order_seq_cst);
}
- void Clear() OVERRIDE;
+ void Clear() override;
- bool CanMoveObjects() const OVERRIDE {
+ bool CanMoveObjects() const override {
return false;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
@@ -85,7 +86,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
}
private:
- ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated);
+ ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated);
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
AtomicInteger objects_allocated_;
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 60105f4e4f..ef85b3942f 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -45,7 +45,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
}
virtual ~SystemWeakHolder() {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -54,7 +54,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
new_weak_condition_.Broadcast(Thread::Current());
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
CHECK(!kUseReadBarrier);
@@ -62,7 +62,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
allow_new_system_weak_ = false;
}
- void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) override
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
new_weak_condition_.Broadcast(Thread::Current());
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 21f511702e..07725b9a56 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -26,6 +26,7 @@
#include "gc_root-inl.h"
#include "handle_scope-inl.h"
#include "heap.h"
+#include "mirror/object-inl.h"
#include "mirror/string.h"
#include "scoped_thread_state_change-inl.h"
#include "thread_list.h"
@@ -43,7 +44,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_(0),
sweep_count_(0) {}
- void Allow() OVERRIDE
+ void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Allow();
@@ -51,7 +52,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
allow_count_++;
}
- void Disallow() OVERRIDE
+ void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Disallow();
@@ -59,7 +60,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
disallow_count_++;
}
- void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
+ void Broadcast(bool broadcast_for_checkpoint) override
REQUIRES(!allow_disallow_lock_) {
SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
@@ -69,7 +70,7 @@ struct CountingSystemWeakHolder : public SystemWeakHolder {
}
}
- void Sweep(IsMarkedVisitor* visitor) OVERRIDE
+ void Sweep(IsMarkedVisitor* visitor) override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
MutexLock mu(Thread::Current(), allow_disallow_lock_);
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 38581ce807..7cb678ba7a 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -33,7 +33,7 @@ class RecursiveTask : public HeapTask {
: HeapTask(NanoTime() + MsToNs(10)), task_processor_(task_processor), counter_(counter),
max_recursion_(max_recursion) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
if (max_recursion_ > 0) {
task_processor_->AddTask(self,
new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
@@ -52,7 +52,7 @@ class WorkUntilDoneTask : public SelfDeletingTask {
WorkUntilDoneTask(TaskProcessor* task_processor, Atomic<bool>* done_running)
: task_processor_(task_processor), done_running_(done_running) {
}
- virtual void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
task_processor_->RunAllTasks(self);
done_running_->store(true, std::memory_order_seq_cst);
}
@@ -105,7 +105,7 @@ class TestOrderTask : public HeapTask {
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
+ void Run(Thread* thread ATTRIBUTE_UNUSED) override {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index fb5db1147f..5d234eaac3 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -87,7 +87,7 @@ void Verification::LogHeapCorruption(ObjPtr<mirror::Object> holder,
bool fatal) const {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
// which is called from LOG(FATAL) but before the abort message.
@@ -198,7 +198,7 @@ class Verification::CollectRootVisitor : public SingleRootVisitor {
CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
void VisitRoot(mirror::Object* obj, const RootInfo& info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
if (obj != nullptr && visited_->insert(obj).second) {
std::ostringstream oss;
oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 986e28ec79..0bd43f95c0 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -133,7 +133,7 @@ class RootVisitor {
// critical.
class SingleRootVisitor : public RootVisitor {
private:
- void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(*roots[i], info);
@@ -141,7 +141,7 @@ class SingleRootVisitor : public RootVisitor {
}
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info) OVERRIDE
+ const RootInfo& info) override
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(roots[i]->AsMirrorPtr(), info);
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index a9230e0bee..464c2b749f 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -80,9 +80,9 @@ DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_c
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
#define STRING_DEX_CACHE_ELEMENT_SIZE 8
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
-#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 511
+#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 1023
DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheMethodCacheSize - 1)))
-#define METHOD_DEX_CACHE_HASH_BITS 9
+#define METHOD_DEX_CACHE_HASH_BITS 10
DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))))
#define CARD_TABLE_CARD_SHIFT 0xa
DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>(art::gc::accounting::CardTable::kCardShift)))
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 28a230291d..9eaf1ec71a 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -259,7 +259,7 @@ class PACKED(4) FixedSizeHandleScope : public HandleScope {
// Scoped handle storage of a fixed size that is stack allocated.
template<size_t kNumReferences>
-class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> {
+class PACKED(4) StackHandleScope final : public FixedSizeHandleScope<kNumReferences> {
public:
explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
ALWAYS_INLINE ~StackHandleScope();
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index a41d28492d..4c7efe666f 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -27,7 +27,7 @@ using hiddenapi::GetActionFromAccessFlags;
class HiddenApiTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
// Do the normal setup.
CommonRuntimeTest::SetUp();
self_ = Thread::Current();
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3f44928e3a..e8a47d1087 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -303,7 +303,7 @@ class EndianOutputBuffered : public EndianOutput {
}
virtual ~EndianOutputBuffered() {}
- void UpdateU4(size_t offset, uint32_t new_value) OVERRIDE {
+ void UpdateU4(size_t offset, uint32_t new_value) override {
DCHECK_LE(offset, length_ - 4);
buffer_[offset + 0] = static_cast<uint8_t>((new_value >> 24) & 0xFF);
buffer_[offset + 1] = static_cast<uint8_t>((new_value >> 16) & 0xFF);
@@ -312,12 +312,12 @@ class EndianOutputBuffered : public EndianOutput {
}
protected:
- void HandleU1List(const uint8_t* values, size_t count) OVERRIDE {
+ void HandleU1List(const uint8_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
buffer_.insert(buffer_.end(), values, values + count);
}
- void HandleU1AsU2List(const uint8_t* values, size_t count) OVERRIDE {
+ void HandleU1AsU2List(const uint8_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
// All 8-bits are grouped in 2 to make 16-bit block like Java Char
if (count & 1) {
@@ -330,7 +330,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU2List(const uint16_t* values, size_t count) OVERRIDE {
+ void HandleU2List(const uint16_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint16_t value = *values;
@@ -340,7 +340,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU4List(const uint32_t* values, size_t count) OVERRIDE {
+ void HandleU4List(const uint32_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint32_t value = *values;
@@ -352,7 +352,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleU8List(const uint64_t* values, size_t count) OVERRIDE {
+ void HandleU8List(const uint64_t* values, size_t count) override {
DCHECK_EQ(length_, buffer_.size());
for (size_t i = 0; i < count; ++i) {
uint64_t value = *values;
@@ -368,7 +368,7 @@ class EndianOutputBuffered : public EndianOutput {
}
}
- void HandleEndRecord() OVERRIDE {
+ void HandleEndRecord() override {
DCHECK_EQ(buffer_.size(), length_);
if (kIsDebugBuild && started_) {
uint32_t stored_length =
@@ -388,7 +388,7 @@ class EndianOutputBuffered : public EndianOutput {
std::vector<uint8_t> buffer_;
};
-class FileEndianOutput FINAL : public EndianOutputBuffered {
+class FileEndianOutput final : public EndianOutputBuffered {
public:
FileEndianOutput(File* fp, size_t reserved_size)
: EndianOutputBuffered(reserved_size), fp_(fp), errors_(false) {
@@ -402,7 +402,7 @@ class FileEndianOutput FINAL : public EndianOutputBuffered {
}
protected:
- void HandleFlush(const uint8_t* buffer, size_t length) OVERRIDE {
+ void HandleFlush(const uint8_t* buffer, size_t length) override {
if (!errors_) {
errors_ = !fp_->WriteFully(buffer, length);
}
@@ -413,14 +413,14 @@ class FileEndianOutput FINAL : public EndianOutputBuffered {
bool errors_;
};
-class VectorEndianOuputput FINAL : public EndianOutputBuffered {
+class VectorEndianOuputput final : public EndianOutputBuffered {
public:
VectorEndianOuputput(std::vector<uint8_t>& data, size_t reserved_size)
: EndianOutputBuffered(reserved_size), full_data_(data) {}
~VectorEndianOuputput() {}
protected:
- void HandleFlush(const uint8_t* buf, size_t length) OVERRIDE {
+ void HandleFlush(const uint8_t* buf, size_t length) override {
size_t old_size = full_data_.size();
full_data_.resize(old_size + length);
memcpy(full_data_.data() + old_size, buf, length);
@@ -604,7 +604,7 @@ class Hprof : public SingleRootVisitor {
}
void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::mutator_lock_);
void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
uint32_t thread_serial);
diff --git a/runtime/image.cc b/runtime/image.cc
index b7a872c821..028c515c91 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '4', '\0' }; // Half DexCache F&M arrays.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '3', '\0' }; // Image relocations.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 950a54d61e..8ab4a9b47e 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -78,14 +78,18 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
const size_t table_bytes = max_count * sizeof(IrtEntry);
- table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
- PROT_READ | PROT_WRITE, false, false, error_msg));
- if (table_mem_map_.get() == nullptr && error_msg->empty()) {
+ table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
+ /* addr */ nullptr,
+ table_bytes,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ error_msg);
+ if (!table_mem_map_.IsValid() && error_msg->empty()) {
*error_msg = "Unable to map memory for indirect ref table";
}
- if (table_mem_map_.get() != nullptr) {
- table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+ if (table_mem_map_.IsValid()) {
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
} else {
table_ = nullptr;
}
@@ -125,7 +129,7 @@ void IndirectReferenceTable::ConstexprChecks() {
}
bool IndirectReferenceTable::IsValid() const {
- return table_mem_map_.get() != nullptr;
+ return table_mem_map_.IsValid();
}
// Holes:
@@ -217,20 +221,19 @@ bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
// Note: the above check also ensures that there is no overflow below.
const size_t table_bytes = new_size * sizeof(IrtEntry);
- std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
- nullptr,
- table_bytes,
- PROT_READ | PROT_WRITE,
- false,
- false,
- error_msg));
- if (new_map == nullptr) {
+ MemMap new_map = MemMap::MapAnonymous("indirect ref table",
+ /* addr */ nullptr,
+ table_bytes,
+ PROT_READ | PROT_WRITE,
+ /* is_low_4gb */ false,
+ error_msg);
+ if (!new_map.IsValid()) {
return false;
}
- memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size());
+ memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
table_mem_map_ = std::move(new_map);
- table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
max_entries_ = new_size;
return true;
@@ -444,7 +447,7 @@ void IndirectReferenceTable::Trim() {
ScopedTrace trace(__PRETTY_FUNCTION__);
const size_t top_index = Capacity();
auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
- uint8_t* release_end = table_mem_map_->End();
+ uint8_t* release_end = table_mem_map_.End();
madvise(release_start, release_end - release_start, MADV_DONTNEED);
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index d2093f2818..8c63c0045f 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -27,6 +27,7 @@
#include "base/bit_utils.h"
#include "base/macros.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "gc_root.h"
#include "obj_ptr.h"
@@ -41,8 +42,6 @@ namespace mirror {
class Object;
} // namespace mirror
-class MemMap;
-
// Maintain a table of indirect references. Used for local/global JNI references.
//
// The table contains object references, where the strong (local/global) references are part of the
@@ -398,7 +397,7 @@ class IndirectReferenceTable {
IRTSegmentState segment_state_;
// Mem map where we store the indirect refs.
- std::unique_ptr<MemMap> table_mem_map_;
+ MemMap table_mem_map_;
// bottom of the stack. Do not directly access the object references
// in this as they are roots. Use Get() that has a read barrier.
IrtEntry* table_;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4196e19383..b42433cad3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -85,7 +85,7 @@ class InstallStubsClassVisitor : public ClassVisitor {
explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
: instrumentation_(instrumentation) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
instrumentation_->InstallStubsForClass(klass.Ptr());
return true; // we visit all classes.
}
@@ -264,7 +264,7 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
// existing instrumentation frames.
static void InstrumentationInstallStack(Thread* thread, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- struct InstallStackVisitor FINAL : public StackVisitor {
+ struct InstallStackVisitor final : public StackVisitor {
InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
: StackVisitor(thread_in, context, kInstrumentationStackWalk),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -273,7 +273,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
last_return_pc_(0) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr) {
if (kVerboseInstrumentation) {
@@ -429,7 +429,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
REQUIRES(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- struct RestoreStackVisitor FINAL : public StackVisitor {
+ struct RestoreStackVisitor final : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
: StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
@@ -439,7 +439,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 8ac26afe9f..9146245895 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -36,7 +36,7 @@
namespace art {
namespace instrumentation {
-class TestInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class TestInstrumentationListener final : public instrumentation::InstrumentationListener {
public:
TestInstrumentationListener()
: received_method_enter_event(false),
@@ -59,7 +59,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_enter_event = true;
}
@@ -68,7 +68,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
Handle<mirror::Object> return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_object_event = true;
}
@@ -77,7 +77,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_event = true;
}
@@ -85,7 +85,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_unwind_event = true;
}
@@ -93,7 +93,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_dex_pc_moved_event = true;
}
@@ -102,7 +102,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_read_event = true;
}
@@ -112,7 +112,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
Handle<mirror::Object> field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_object_event = true;
}
@@ -122,19 +122,19 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_event = true;
}
void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_thrown_event = true;
}
void ExceptionHandled(Thread* self ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_handled_event = true;
}
@@ -142,7 +142,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_branch_event = true;
}
@@ -151,12 +151,12 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtMethod* callee ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_invoke_virtual_or_interface_event = true;
}
void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
received_watched_frame_pop = true;
}
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b56c48d78c..8b4fe44c15 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -86,7 +86,7 @@ TEST_F(InternTableTest, CrossHash) {
class TestPredicate : public IsMarkedVisitor {
public:
- mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Object* IsMarked(mirror::Object* s) override REQUIRES_SHARED(Locks::mutator_lock_) {
bool erased = false;
for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 69dae31b37..17b3cd45aa 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -116,10 +116,10 @@ UNARY_INTRINSIC(MterpLongNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVRegLong, SetJ)
UNARY_INTRINSIC(MterpLongNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVRegLong, SetJ);
// java.lang.Long.rotateRight(JI)J
-BINARY_JJ_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
+BINARY_JI_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
// java.lang.Long.rotateLeft(JI)J
-BINARY_JJ_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
+BINARY_JI_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
// java.lang.Long.signum(J)I
UNARY_INTRINSIC(MterpLongSignum, Signum, GetVRegLong, SetI);
diff --git a/runtime/interpreter/mterp/arm/instruction_end.S b/runtime/interpreter/mterp/arm/instruction_end.S
index 32c725c7d9..f90ebd0221 100644
--- a/runtime/interpreter/mterp/arm/instruction_end.S
+++ b/runtime/interpreter/mterp/arm/instruction_end.S
@@ -1,3 +1,5 @@
+ .type artMterpAsmInstructionEnd, #object
+ .hidden artMterpAsmInstructionEnd
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_end_alt.S b/runtime/interpreter/mterp/arm/instruction_end_alt.S
index f90916fc02..0b66dbb947 100644
--- a/runtime/interpreter/mterp/arm/instruction_end_alt.S
+++ b/runtime/interpreter/mterp/arm/instruction_end_alt.S
@@ -1,3 +1,5 @@
+ .type artMterpAsmAltInstructionEnd, #object
+ .hidden artMterpAsmAltInstructionEnd
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_end_sister.S b/runtime/interpreter/mterp/arm/instruction_end_sister.S
index c5f4886697..71c0300f6d 100644
--- a/runtime/interpreter/mterp/arm/instruction_end_sister.S
+++ b/runtime/interpreter/mterp/arm/instruction_end_sister.S
@@ -1,3 +1,5 @@
+ .type artMterpAsmSisterEnd, #object
+ .hidden artMterpAsmSisterEnd
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_start.S b/runtime/interpreter/mterp/arm/instruction_start.S
index 8874c20540..b7e9cf51e4 100644
--- a/runtime/interpreter/mterp/arm/instruction_start.S
+++ b/runtime/interpreter/mterp/arm/instruction_start.S
@@ -1,4 +1,6 @@
+ .type artMterpAsmInstructionStart, #object
+ .hidden artMterpAsmInstructionStart
.global artMterpAsmInstructionStart
artMterpAsmInstructionStart = .L_op_nop
.text
diff --git a/runtime/interpreter/mterp/arm/instruction_start_alt.S b/runtime/interpreter/mterp/arm/instruction_start_alt.S
index 0c9ffdb7d6..7a67ba064c 100644
--- a/runtime/interpreter/mterp/arm/instruction_start_alt.S
+++ b/runtime/interpreter/mterp/arm/instruction_start_alt.S
@@ -1,4 +1,6 @@
+ .type artMterpAsmAltInstructionStart, #object
+ .hidden artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionStart
artMterpAsmAltInstructionStart = .L_ALT_op_nop
.text
diff --git a/runtime/interpreter/mterp/arm/instruction_start_sister.S b/runtime/interpreter/mterp/arm/instruction_start_sister.S
index 2ec51f7261..0036061605 100644
--- a/runtime/interpreter/mterp/arm/instruction_start_sister.S
+++ b/runtime/interpreter/mterp/arm/instruction_start_sister.S
@@ -1,4 +1,6 @@
+ .type artMterpAsmSisterStart, #object
+ .hidden artMterpAsmSisterStart
.global artMterpAsmSisterStart
.text
.balign 4
diff --git a/runtime/interpreter/mterp/arm/op_iget.S b/runtime/interpreter/mterp/arm/op_iget.S
index c45880b1c4..1684a768df 100644
--- a/runtime/interpreter/mterp/arm/op_iget.S
+++ b/runtime/interpreter/mterp/arm/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
+%default { "is_object":"0", "helper":"MterpIGetU32"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean.S b/runtime/interpreter/mterp/arm/op_iget_boolean.S
index 9da6c8add6..f23cb3aa97 100644
--- a/runtime/interpreter/mterp/arm/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_iget_boolean.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
+%include "arm/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte.S b/runtime/interpreter/mterp/arm/op_iget_byte.S
index 3d1f52d359..9c4f37c8ac 100644
--- a/runtime/interpreter/mterp/arm/op_iget_byte.S
+++ b/runtime/interpreter/mterp/arm/op_iget_byte.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
+%include "arm/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char.S b/runtime/interpreter/mterp/arm/op_iget_char.S
index 6b7154d8b9..80c4227ed2 100644
--- a/runtime/interpreter/mterp/arm/op_iget_char.S
+++ b/runtime/interpreter/mterp/arm/op_iget_char.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
+%include "arm/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object.S b/runtime/interpreter/mterp/arm/op_iget_object.S
index a35b1c8976..e30b129efe 100644
--- a/runtime/interpreter/mterp/arm/op_iget_object.S
+++ b/runtime/interpreter/mterp/arm/op_iget_object.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
+%include "arm/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_short.S b/runtime/interpreter/mterp/arm/op_iget_short.S
index 3254c07fd1..dd6bc9991c 100644
--- a/runtime/interpreter/mterp/arm/op_iget_short.S
+++ b/runtime/interpreter/mterp/arm/op_iget_short.S
@@ -1 +1 @@
-%include "arm/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
+%include "arm/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
index 30405bd94c..46e9ec869b 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide.S
@@ -9,7 +9,7 @@
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGet64InstanceFromMterp
+ bl MterpIGetU64
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/arm/op_iput.S b/runtime/interpreter/mterp/arm/op_iput.S
index d224cd8a77..a16795da64 100644
--- a/runtime/interpreter/mterp/arm/op_iput.S
+++ b/runtime/interpreter/mterp/arm/op_iput.S
@@ -1,11 +1,11 @@
-%default { "is_object":"0", "handler":"artSet32InstanceFromMterp" }
+%default { "is_object":"0", "helper":"MterpIPutU32" }
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern $handler
+ .extern $helper
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -14,7 +14,7 @@
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl $handler
+ bl $helper
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean.S b/runtime/interpreter/mterp/arm/op_iput_boolean.S
index c9e8589de5..57edadddd7 100644
--- a/runtime/interpreter/mterp/arm/op_iput_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_iput_boolean.S
@@ -1 +1 @@
-%include "arm/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "arm/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte.S b/runtime/interpreter/mterp/arm/op_iput_byte.S
index c9e8589de5..ab283b90fb 100644
--- a/runtime/interpreter/mterp/arm/op_iput_byte.S
+++ b/runtime/interpreter/mterp/arm/op_iput_byte.S
@@ -1 +1 @@
-%include "arm/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "arm/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char.S b/runtime/interpreter/mterp/arm/op_iput_char.S
index 5046f6b6a3..0fe5d964cc 100644
--- a/runtime/interpreter/mterp/arm/op_iput_char.S
+++ b/runtime/interpreter/mterp/arm/op_iput_char.S
@@ -1 +1 @@
-%include "arm/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "arm/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_object.S b/runtime/interpreter/mterp/arm/op_iput_object.S
index d942e846f2..4f401eb4f0 100644
--- a/runtime/interpreter/mterp/arm/op_iput_object.S
+++ b/runtime/interpreter/mterp/arm/op_iput_object.S
@@ -3,7 +3,7 @@
mov r1, rPC
mov r2, rINST
mov r3, rSELF
- bl MterpIputObject
+ bl MterpIPutObj
cmp r0, #0
beq MterpException
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_iput_short.S b/runtime/interpreter/mterp/arm/op_iput_short.S
index 5046f6b6a3..cc983630ff 100644
--- a/runtime/interpreter/mterp/arm/op_iput_short.S
+++ b/runtime/interpreter/mterp/arm/op_iput_short.S
@@ -1 +1 @@
-%include "arm/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "arm/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide.S b/runtime/interpreter/mterp/arm/op_iput_wide.S
index 3dda1877b5..6a414738d9 100644
--- a/runtime/interpreter/mterp/arm/op_iput_wide.S
+++ b/runtime/interpreter/mterp/arm/op_iput_wide.S
@@ -1,5 +1,5 @@
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -8,7 +8,7 @@
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl artSet64InstanceFromMterp
+ bl MterpIPutU64
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S
index 3c813efb31..575a8c0760 100644
--- a/runtime/interpreter/mterp/arm/op_sget.S
+++ b/runtime/interpreter/mterp/arm/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"MterpGet32Static" }
+%default { "is_object":"0", "helper":"MterpSGetU32" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S
index eb06aa881c..df1a0246b5 100644
--- a/runtime/interpreter/mterp/arm/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_sget_boolean.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"MterpGetBooleanStatic"}
+%include "arm/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S
index 9f4c9046a2..8ad3ff0e65 100644
--- a/runtime/interpreter/mterp/arm/op_sget_byte.S
+++ b/runtime/interpreter/mterp/arm/op_sget_byte.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"MterpGetByteStatic"}
+%include "arm/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S
index dd8c991264..523951490a 100644
--- a/runtime/interpreter/mterp/arm/op_sget_char.S
+++ b/runtime/interpreter/mterp/arm/op_sget_char.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"MterpGetCharStatic"}
+%include "arm/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S
index e1d9eaee29..e61a5a7b21 100644
--- a/runtime/interpreter/mterp/arm/op_sget_object.S
+++ b/runtime/interpreter/mterp/arm/op_sget_object.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
+%include "arm/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S
index c0d61c4d33..49493ebc68 100644
--- a/runtime/interpreter/mterp/arm/op_sget_short.S
+++ b/runtime/interpreter/mterp/arm/op_sget_short.S
@@ -1 +1 @@
-%include "arm/op_sget.S" {"helper":"MterpGetShortStatic"}
+%include "arm/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
index aeee016294..5981ec4957 100644
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -4,12 +4,12 @@
*/
/* sget-wide vAA, field@BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGet64Static
+ bl MterpSGetU64
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S
index 494df8aa5d..c4a8978cd1 100644
--- a/runtime/interpreter/mterp/arm/op_sput.S
+++ b/runtime/interpreter/mterp/arm/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"MterpSet32Static"}
+%default { "helper":"MterpSPutU32"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S
index 47bed0a2ce..0c37623fb6 100644
--- a/runtime/interpreter/mterp/arm/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/arm/op_sput_boolean.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"MterpSetBooleanStatic"}
+%include "arm/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S
index b4d22b4fd8..8d4e754229 100644
--- a/runtime/interpreter/mterp/arm/op_sput_byte.S
+++ b/runtime/interpreter/mterp/arm/op_sput_byte.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"MterpSetByteStatic"}
+%include "arm/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S
index 58a957d1f6..442b56f7b1 100644
--- a/runtime/interpreter/mterp/arm/op_sput_char.S
+++ b/runtime/interpreter/mterp/arm/op_sput_char.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"MterpSetCharStatic"}
+%include "arm/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_object.S b/runtime/interpreter/mterp/arm/op_sput_object.S
index 6d3a9a7110..c58918fbbb 100644
--- a/runtime/interpreter/mterp/arm/op_sput_object.S
+++ b/runtime/interpreter/mterp/arm/op_sput_object.S
@@ -3,7 +3,7 @@
mov r1, rPC
mov r2, rINST
mov r3, rSELF
- bl MterpSputObject
+ bl MterpSPutObj
cmp r0, #0
beq MterpException
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S
index 88c321127e..0eb533fe3c 100644
--- a/runtime/interpreter/mterp/arm/op_sput_short.S
+++ b/runtime/interpreter/mterp/arm/op_sput_short.S
@@ -1 +1 @@
-%include "arm/op_sput.S" {"helper":"MterpSetShortStatic"}
+%include "arm/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
index 1e8fcc9b75..0ed4017ce9 100644
--- a/runtime/interpreter/mterp/arm/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sput_wide.S
@@ -3,7 +3,7 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
mov r1, rINST, lsr #8 @ r1<- AA
@@ -11,7 +11,7 @@
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSet64Static
+ bl MterpSPutU64
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
diff --git a/runtime/interpreter/mterp/arm64/instruction_end.S b/runtime/interpreter/mterp/arm64/instruction_end.S
index 32c725c7d9..f90ebd0221 100644
--- a/runtime/interpreter/mterp/arm64/instruction_end.S
+++ b/runtime/interpreter/mterp/arm64/instruction_end.S
@@ -1,3 +1,5 @@
+ .type artMterpAsmInstructionEnd, #object
+ .hidden artMterpAsmInstructionEnd
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_end_alt.S b/runtime/interpreter/mterp/arm64/instruction_end_alt.S
index f90916fc02..0b66dbb947 100644
--- a/runtime/interpreter/mterp/arm64/instruction_end_alt.S
+++ b/runtime/interpreter/mterp/arm64/instruction_end_alt.S
@@ -1,3 +1,5 @@
+ .type artMterpAsmAltInstructionEnd, #object
+ .hidden artMterpAsmAltInstructionEnd
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_end_sister.S b/runtime/interpreter/mterp/arm64/instruction_end_sister.S
index c5f4886697..71c0300f6d 100644
--- a/runtime/interpreter/mterp/arm64/instruction_end_sister.S
+++ b/runtime/interpreter/mterp/arm64/instruction_end_sister.S
@@ -1,3 +1,5 @@
+ .type artMterpAsmSisterEnd, #object
+ .hidden artMterpAsmSisterEnd
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_start.S b/runtime/interpreter/mterp/arm64/instruction_start.S
index 8874c20540..b7e9cf51e4 100644
--- a/runtime/interpreter/mterp/arm64/instruction_start.S
+++ b/runtime/interpreter/mterp/arm64/instruction_start.S
@@ -1,4 +1,6 @@
+ .type artMterpAsmInstructionStart, #object
+ .hidden artMterpAsmInstructionStart
.global artMterpAsmInstructionStart
artMterpAsmInstructionStart = .L_op_nop
.text
diff --git a/runtime/interpreter/mterp/arm64/instruction_start_alt.S b/runtime/interpreter/mterp/arm64/instruction_start_alt.S
index 0c9ffdb7d6..7a67ba064c 100644
--- a/runtime/interpreter/mterp/arm64/instruction_start_alt.S
+++ b/runtime/interpreter/mterp/arm64/instruction_start_alt.S
@@ -1,4 +1,6 @@
+ .type artMterpAsmAltInstructionStart, #object
+ .hidden artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionStart
artMterpAsmAltInstructionStart = .L_ALT_op_nop
.text
diff --git a/runtime/interpreter/mterp/arm64/instruction_start_sister.S b/runtime/interpreter/mterp/arm64/instruction_start_sister.S
index 2ec51f7261..0036061605 100644
--- a/runtime/interpreter/mterp/arm64/instruction_start_sister.S
+++ b/runtime/interpreter/mterp/arm64/instruction_start_sister.S
@@ -1,4 +1,6 @@
+ .type artMterpAsmSisterStart, #object
+ .hidden artMterpAsmSisterStart
.global artMterpAsmSisterStart
.text
.balign 4
diff --git a/runtime/interpreter/mterp/arm64/op_iget.S b/runtime/interpreter/mterp/arm64/op_iget.S
index d9feac7765..cb453ac524 100644
--- a/runtime/interpreter/mterp/arm64/op_iget.S
+++ b/runtime/interpreter/mterp/arm64/op_iget.S
@@ -1,4 +1,4 @@
-%default { "extend":"", "is_object":"0", "helper":"artGet32InstanceFromMterp"}
+%default { "extend":"", "is_object":"0", "helper":"MterpIGetU32"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean.S b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
index f6ea4dd8b5..3b17144478 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp", "extend":"uxtb w0, w0" }
+%include "arm64/op_iget.S" { "helper":"MterpIGetU8", "extend":"uxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte.S b/runtime/interpreter/mterp/arm64/op_iget_byte.S
index 497e2bf253..d5ef1d3c2b 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_byte.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetByteInstanceFromMterp", "extend":"sxtb w0, w0" }
+%include "arm64/op_iget.S" { "helper":"MterpIGetI8", "extend":"sxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char.S b/runtime/interpreter/mterp/arm64/op_iget_char.S
index 4669859121..68e1435201 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_char.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_char.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetCharInstanceFromMterp", "extend":"uxth w0, w0" }
+%include "arm64/op_iget.S" { "helper":"MterpIGetU16", "extend":"uxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object.S b/runtime/interpreter/mterp/arm64/op_iget_object.S
index eb7bdeaee3..40ddadd971 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_object.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_object.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
+%include "arm64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short.S b/runtime/interpreter/mterp/arm64/op_iget_short.S
index 6f0a5055d7..714f4b9aef 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_short.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_short.S
@@ -1 +1 @@
-%include "arm64/op_iget.S" { "helper":"artGetShortInstanceFromMterp", "extend":"sxth w0, w0" }
+%include "arm64/op_iget.S" { "helper":"MterpIGetI16", "extend":"sxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide.S b/runtime/interpreter/mterp/arm64/op_iget_wide.S
index 02ef0a71fd..4fc735ce6b 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_wide.S
@@ -9,7 +9,7 @@
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGet64InstanceFromMterp
+ bl MterpIGetU64
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/arm64/op_iput.S b/runtime/interpreter/mterp/arm64/op_iput.S
index a8c0e61a8f..5e21d5c6de 100644
--- a/runtime/interpreter/mterp/arm64/op_iput.S
+++ b/runtime/interpreter/mterp/arm64/op_iput.S
@@ -1,11 +1,11 @@
-%default { "is_object":"0", "handler":"artSet32InstanceFromMterp" }
+%default { "is_object":"0", "helper":"MterpIPutU32" }
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern $handler
+ .extern $helper
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -14,7 +14,7 @@
GET_VREG w2, w2 // w2<- fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl $handler
+ bl $helper
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm64/op_iput_boolean.S b/runtime/interpreter/mterp/arm64/op_iput_boolean.S
index bbf53192c5..12a278ccba 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_boolean.S
@@ -1 +1 @@
-%include "arm64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "arm64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_byte.S b/runtime/interpreter/mterp/arm64/op_iput_byte.S
index bbf53192c5..82b99e9765 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_byte.S
@@ -1 +1 @@
-%include "arm64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "arm64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_char.S b/runtime/interpreter/mterp/arm64/op_iput_char.S
index 150d8794ca..427d92d9c0 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_char.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_char.S
@@ -1 +1 @@
-%include "arm64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "arm64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_object.S b/runtime/interpreter/mterp/arm64/op_iput_object.S
index 37a649be6b..0c0441a3f0 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_object.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_object.S
@@ -3,7 +3,7 @@
mov x1, xPC
mov w2, wINST
mov x3, xSELF
- bl MterpIputObject
+ bl MterpIPutObj
cbz w0, MterpException
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm64/op_iput_short.S b/runtime/interpreter/mterp/arm64/op_iput_short.S
index 150d8794ca..67f1ace8bf 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_short.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_short.S
@@ -1 +1 @@
-%include "arm64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "arm64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide.S b/runtime/interpreter/mterp/arm64/op_iput_wide.S
index e1ab1271f5..be6aeb0e3d 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_wide.S
@@ -1,5 +1,5 @@
/* iput-wide vA, vB, field//CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -8,7 +8,7 @@
VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl artSet64InstanceFromMterp
+ bl MterpIPutU64
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
diff --git a/runtime/interpreter/mterp/arm64/op_sget.S b/runtime/interpreter/mterp/arm64/op_sget.S
index 84e71ac15e..00b07fa484 100644
--- a/runtime/interpreter/mterp/arm64/op_sget.S
+++ b/runtime/interpreter/mterp/arm64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" }
+%default { "is_object":"0", "helper":"MterpSGetU32", "extend":"" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm64/op_sget_boolean.S b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
index 868f41cb7f..73f3a107f4 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"uxtb w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpSGetU8", "extend":"uxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_byte.S b/runtime/interpreter/mterp/arm64/op_sget_byte.S
index e135aa737a..38c0da614f 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_byte.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"sxtb w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpSGetI8", "extend":"sxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_char.S b/runtime/interpreter/mterp/arm64/op_sget_char.S
index 05d57ac20b..c0801bfa2b 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_char.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_char.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"uxth w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpSGetU16", "extend":"uxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_object.S b/runtime/interpreter/mterp/arm64/op_sget_object.S
index 1faaf6eb8e..69d6adb549 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_object.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_object.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
+%include "arm64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_short.S b/runtime/interpreter/mterp/arm64/op_sget_short.S
index 5900231b06..81e043453e 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_short.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_short.S
@@ -1 +1 @@
-%include "arm64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"sxth w0, w0"}
+%include "arm64/op_sget.S" {"helper":"MterpSGetI16", "extend":"sxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_wide.S b/runtime/interpreter/mterp/arm64/op_sget_wide.S
index 92f3f7dd66..546ab9482d 100644
--- a/runtime/interpreter/mterp/arm64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_sget_wide.S
@@ -4,12 +4,12 @@
*/
/* sget-wide vAA, field//BBBB */
- .extern MterpGet64StaticFromCode
+ .extern MterpSGetU64
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGet64Static
+ bl MterpSGetU64
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w4, wINST, #8 // w4<- AA
cbnz x3, MterpException // bail out
diff --git a/runtime/interpreter/mterp/arm64/op_sput.S b/runtime/interpreter/mterp/arm64/op_sput.S
index e322af0e76..7a0dc30c52 100644
--- a/runtime/interpreter/mterp/arm64/op_sput.S
+++ b/runtime/interpreter/mterp/arm64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"MterpSet32Static"}
+%default { "helper":"MterpSPutU32"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/arm64/op_sput_boolean.S b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
index 9928f31c98..3d0c7c0262 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
+%include "arm64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_byte.S b/runtime/interpreter/mterp/arm64/op_sput_byte.S
index 16d6ba96e0..489cf92149 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_byte.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"MterpSetByteStatic"}
+%include "arm64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_char.S b/runtime/interpreter/mterp/arm64/op_sput_char.S
index ab5e8152b9..f79d311c17 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_char.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_char.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"MterpSetCharStatic"}
+%include "arm64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_object.S b/runtime/interpreter/mterp/arm64/op_sput_object.S
index c176da273f..a64965614b 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_object.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_object.S
@@ -3,7 +3,7 @@
mov x1, xPC
mov x2, xINST
mov x3, xSELF
- bl MterpSputObject
+ bl MterpSPutObj
cbz w0, MterpException
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm64/op_sput_short.S b/runtime/interpreter/mterp/arm64/op_sput_short.S
index b54f88ad48..06482cd7a0 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_short.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_short.S
@@ -1 +1 @@
-%include "arm64/op_sput.S" {"helper":"MterpSetShortStatic"}
+%include "arm64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S
index 4aeb8ff316..58b3c42333 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_wide.S
@@ -3,7 +3,7 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
lsr w1, wINST, #8 // w1<- AA
@@ -11,7 +11,7 @@
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSet64Static
+ bl MterpSPutU64
cbnz w0, MterpException // 0 on success, -1 on failure
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
index 0785ac5e32..33717de640 100644
--- a/runtime/interpreter/mterp/mips/op_iget.S
+++ b/runtime/interpreter/mterp/mips/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
+%default { "is_object":"0", "helper":"MterpIGetU32"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean.S b/runtime/interpreter/mterp/mips/op_iget_boolean.S
index a932c37a82..f2ef68d8b2 100644
--- a/runtime/interpreter/mterp/mips/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_iget_boolean.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
+%include "mips/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte.S b/runtime/interpreter/mterp/mips/op_iget_byte.S
index e498a8c774..0c8fb7cf2c 100644
--- a/runtime/interpreter/mterp/mips/op_iget_byte.S
+++ b/runtime/interpreter/mterp/mips/op_iget_byte.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
+%include "mips/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char.S b/runtime/interpreter/mterp/mips/op_iget_char.S
index efd5b99372..69d04c4fa8 100644
--- a/runtime/interpreter/mterp/mips/op_iget_char.S
+++ b/runtime/interpreter/mterp/mips/op_iget_char.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
+%include "mips/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object.S b/runtime/interpreter/mterp/mips/op_iget_object.S
index 8fa96521f6..bea330a14b 100644
--- a/runtime/interpreter/mterp/mips/op_iget_object.S
+++ b/runtime/interpreter/mterp/mips/op_iget_object.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
+%include "mips/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_short.S b/runtime/interpreter/mterp/mips/op_iget_short.S
index efc5de4890..357c7918e4 100644
--- a/runtime/interpreter/mterp/mips/op_iget_short.S
+++ b/runtime/interpreter/mterp/mips/op_iget_short.S
@@ -1 +1 @@
-%include "mips/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
+%include "mips/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
index e1d83a48f5..858a8898f4 100644
--- a/runtime/interpreter/mterp/mips/op_iget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_iget_wide.S
@@ -10,7 +10,7 @@
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGet64InstanceFromMterp)
+ JAL(MterpIGetU64)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
diff --git a/runtime/interpreter/mterp/mips/op_iput.S b/runtime/interpreter/mterp/mips/op_iput.S
index 9133d60ed8..4dd4075e92 100644
--- a/runtime/interpreter/mterp/mips/op_iput.S
+++ b/runtime/interpreter/mterp/mips/op_iput.S
@@ -1,11 +1,11 @@
-%default { "handler":"artSet32InstanceFromMterp" }
+%default { "helper":"MterpIPutU32" }
/*
* General 32-bit instance field put.
*
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern $handler
+ .extern $helper
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -14,7 +14,7 @@
GET_VREG(a2, a2) # a2 <- fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL($handler)
+ JAL($helper)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean.S b/runtime/interpreter/mterp/mips/op_iput_boolean.S
index da28c978a4..55ac4cedaa 100644
--- a/runtime/interpreter/mterp/mips/op_iput_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_iput_boolean.S
@@ -1 +1 @@
-%include "mips/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "mips/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte.S b/runtime/interpreter/mterp/mips/op_iput_byte.S
index da28c978a4..61e489be11 100644
--- a/runtime/interpreter/mterp/mips/op_iput_byte.S
+++ b/runtime/interpreter/mterp/mips/op_iput_byte.S
@@ -1 +1 @@
-%include "mips/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "mips/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char.S b/runtime/interpreter/mterp/mips/op_iput_char.S
index 389b0bf19b..2caad1e0a6 100644
--- a/runtime/interpreter/mterp/mips/op_iput_char.S
+++ b/runtime/interpreter/mterp/mips/op_iput_char.S
@@ -1 +1 @@
-%include "mips/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "mips/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_object.S b/runtime/interpreter/mterp/mips/op_iput_object.S
index cfa56ec657..c96a4d4ec7 100644
--- a/runtime/interpreter/mterp/mips/op_iput_object.S
+++ b/runtime/interpreter/mterp/mips/op_iput_object.S
@@ -9,7 +9,7 @@
move a1, rPC
move a2, rINST
move a3, rSELF
- JAL(MterpIputObject)
+ JAL(MterpIPutObj)
beqz v0, MterpException
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_iput_short.S b/runtime/interpreter/mterp/mips/op_iput_short.S
index 389b0bf19b..414a15bd70 100644
--- a/runtime/interpreter/mterp/mips/op_iput_short.S
+++ b/runtime/interpreter/mterp/mips/op_iput_short.S
@@ -1 +1 @@
-%include "mips/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "mips/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide.S b/runtime/interpreter/mterp/mips/op_iput_wide.S
index bc3d758c5e..dccb6b71b1 100644
--- a/runtime/interpreter/mterp/mips/op_iput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_iput_wide.S
@@ -1,5 +1,5 @@
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -8,7 +8,7 @@
EAS2(a2, rFP, a2) # a2 <- &fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL(artSet64InstanceFromMterp)
+ JAL(MterpIPutU64)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
index 635df8aa1f..8750a17a41 100644
--- a/runtime/interpreter/mterp/mips/op_sget.S
+++ b/runtime/interpreter/mterp/mips/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"MterpGet32Static" }
+%default { "is_object":"0", "helper":"MterpSGetU32" }
/*
* General SGET handler.
*
diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S
index 7829970d84..7a7012e81f 100644
--- a/runtime/interpreter/mterp/mips/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_sget_boolean.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"MterpGetBooleanStatic"}
+%include "mips/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S
index ee0834201b..a2f1dbf606 100644
--- a/runtime/interpreter/mterp/mips/op_sget_byte.S
+++ b/runtime/interpreter/mterp/mips/op_sget_byte.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"MterpGetByteStatic"}
+%include "mips/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S
index d8b477a7bc..07d40416a3 100644
--- a/runtime/interpreter/mterp/mips/op_sget_char.S
+++ b/runtime/interpreter/mterp/mips/op_sget_char.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"MterpGetCharStatic"}
+%include "mips/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S
index 2dc00c386c..0a3c9eef88 100644
--- a/runtime/interpreter/mterp/mips/op_sget_object.S
+++ b/runtime/interpreter/mterp/mips/op_sget_object.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
+%include "mips/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S
index ab55d93060..29604430f2 100644
--- a/runtime/interpreter/mterp/mips/op_sget_short.S
+++ b/runtime/interpreter/mterp/mips/op_sget_short.S
@@ -1 +1 @@
-%include "mips/op_sget.S" {"helper":"MterpGetShortStatic"}
+%include "mips/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
index ec4295ad03..76f78cb35d 100644
--- a/runtime/interpreter/mterp/mips/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sget_wide.S
@@ -2,12 +2,12 @@
* 64-bit SGET handler.
*/
/* sget-wide vAA, field@BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGet64Static)
+ JAL(MterpSGetU64)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
index 37f8687aaa..547de3964a 100644
--- a/runtime/interpreter/mterp/mips/op_sput.S
+++ b/runtime/interpreter/mterp/mips/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"MterpSet32Static"}
+%default { "helper":"MterpSPutU32"}
/*
* General SPUT handler.
*
diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S
index 6426cd40eb..0137430acc 100644
--- a/runtime/interpreter/mterp/mips/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/mips/op_sput_boolean.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"MterpSetBooleanStatic"}
+%include "mips/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S
index c68d18f2f7..5ae4256a98 100644
--- a/runtime/interpreter/mterp/mips/op_sput_byte.S
+++ b/runtime/interpreter/mterp/mips/op_sput_byte.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"MterpSetByteStatic"}
+%include "mips/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S
index 9b8983e4c6..83787a7753 100644
--- a/runtime/interpreter/mterp/mips/op_sput_char.S
+++ b/runtime/interpreter/mterp/mips/op_sput_char.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"MterpSetCharStatic"}
+%include "mips/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_object.S b/runtime/interpreter/mterp/mips/op_sput_object.S
index 4f9034ec0e..55c88a6816 100644
--- a/runtime/interpreter/mterp/mips/op_sput_object.S
+++ b/runtime/interpreter/mterp/mips/op_sput_object.S
@@ -9,7 +9,7 @@
move a1, rPC
move a2, rINST
move a3, rSELF
- JAL(MterpSputObject)
+ JAL(MterpSPutObj)
beqz v0, MterpException
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S
index 5a57ed9922..df99b4414d 100644
--- a/runtime/interpreter/mterp/mips/op_sput_short.S
+++ b/runtime/interpreter/mterp/mips/op_sput_short.S
@@ -1 +1 @@
-%include "mips/op_sput.S" {"helper":"MterpSetShortStatic"}
+%include "mips/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
index c090007968..cfaaaee6f3 100644
--- a/runtime/interpreter/mterp/mips/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sput_wide.S
@@ -2,7 +2,7 @@
* 64-bit SPUT handler.
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPA(a1) # a1 <- AA
@@ -10,7 +10,7 @@
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(MterpSet64Static)
+ JAL(MterpSPutU64)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips64/op_iget.S b/runtime/interpreter/mterp/mips64/op_iget.S
index 4158603e4a..a8ce94c3ba 100644
--- a/runtime/interpreter/mterp/mips64/op_iget.S
+++ b/runtime/interpreter/mterp/mips64/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
+%default { "is_object":"0", "helper":"MterpIGetU32"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean.S b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
index e64b7982f8..dc2a42ad76 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
+%include "mips64/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte.S b/runtime/interpreter/mterp/mips64/op_iget_byte.S
index fefe53f308..c5bf6506e6 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_byte.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
+%include "mips64/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char.S b/runtime/interpreter/mterp/mips64/op_iget_char.S
index 9caf40ecff..3bf0c5aab9 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_char.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_char.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
+%include "mips64/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object.S b/runtime/interpreter/mterp/mips64/op_iget_object.S
index ce3421a94f..23fa187192 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_object.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_object.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
+%include "mips64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short.S b/runtime/interpreter/mterp/mips64/op_iget_short.S
index e2d122d2af..a9927fc982 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_short.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_short.S
@@ -1 +1 @@
-%include "mips64/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
+%include "mips64/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide.S b/runtime/interpreter/mterp/mips64/op_iget_wide.S
index ca793e0a27..08bf544265 100644
--- a/runtime/interpreter/mterp/mips64/op_iget_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_iget_wide.S
@@ -3,14 +3,14 @@
*
* for: iget-wide
*/
- .extern artGet64InstanceFromMterp
+ .extern MterpIGetU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGet64InstanceFromMterp
+ jal MterpIGetU64
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
diff --git a/runtime/interpreter/mterp/mips64/op_iput.S b/runtime/interpreter/mterp/mips64/op_iput.S
index a906a0fc82..9a789e612d 100644
--- a/runtime/interpreter/mterp/mips64/op_iput.S
+++ b/runtime/interpreter/mterp/mips64/op_iput.S
@@ -1,4 +1,4 @@
-%default { "helper":"artSet32InstanceFromMterp" }
+%default { "helper":"MterpIPutU32" }
/*
* General 32-bit instance field put.
*
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean.S b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
index 3034fa59d5..8e1d083759 100644
--- a/runtime/interpreter/mterp/mips64/op_iput_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
@@ -1 +1 @@
-%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" }
+%include "mips64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte.S b/runtime/interpreter/mterp/mips64/op_iput_byte.S
index 3034fa59d5..ce3b614b0c 100644
--- a/runtime/interpreter/mterp/mips64/op_iput_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_iput_byte.S
@@ -1 +1 @@
-%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" }
+%include "mips64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char.S b/runtime/interpreter/mterp/mips64/op_iput_char.S
index 4c2fa28776..1d587fad6b 100644
--- a/runtime/interpreter/mterp/mips64/op_iput_char.S
+++ b/runtime/interpreter/mterp/mips64/op_iput_char.S
@@ -1 +1 @@
-%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" }
+%include "mips64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object.S b/runtime/interpreter/mterp/mips64/op_iput_object.S
index 9a42f54669..dd1938ec63 100644
--- a/runtime/interpreter/mterp/mips64/op_iput_object.S
+++ b/runtime/interpreter/mterp/mips64/op_iput_object.S
@@ -1,10 +1,10 @@
- .extern MterpIputObject
+ .extern MterpIPutObj
EXPORT_PC
daddu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
move a2, rINST
move a3, rSELF
- jal MterpIputObject
+ jal MterpIPutObj
beqzc v0, MterpException
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short.S b/runtime/interpreter/mterp/mips64/op_iput_short.S
index 4c2fa28776..dd68bbeaaa 100644
--- a/runtime/interpreter/mterp/mips64/op_iput_short.S
+++ b/runtime/interpreter/mterp/mips64/op_iput_short.S
@@ -1 +1 @@
-%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" }
+%include "mips64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide.S b/runtime/interpreter/mterp/mips64/op_iput_wide.S
index 9b790f812a..62726908fb 100644
--- a/runtime/interpreter/mterp/mips64/op_iput_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_iput_wide.S
@@ -1,5 +1,5 @@
/* iput-wide vA, vB, field//CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
@@ -8,7 +8,7 @@
dlsa a2, a2, rFP, 2 # a2 <- &fp[A]
ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST 2
- jal artSet64InstanceFromMterp
+ jal MterpIPutU64
bnez v0, MterpPossibleException # bail out
ADVANCE 2
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S
index 71046dba1a..b7b0382b1c 100644
--- a/runtime/interpreter/mterp/mips64/op_sget.S
+++ b/runtime/interpreter/mterp/mips64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" }
+%default { "is_object":"0", "helper":"MterpSGetU32", "extend":"" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
index ec1ce9eb14..fe2deb1479 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"and v0, v0, 0xff"}
+%include "mips64/op_sget.S" {"helper":"MterpSGetU8", "extend":"and v0, v0, 0xff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S
index 6a802f63ea..a7e2bef4bc 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_byte.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"seb v0, v0"}
+%include "mips64/op_sget.S" {"helper":"MterpSGetI8", "extend":"seb v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S
index 483d085719..ed86f32588 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_char.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_char.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"and v0, v0, 0xffff"}
+%include "mips64/op_sget.S" {"helper":"MterpSGetU16", "extend":"and v0, v0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S
index 2250696a97..3b260e6ee2 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_object.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_object.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
+%include "mips64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S
index b257bbbba1..f708a201ce 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_short.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_short.S
@@ -1 +1 @@
-%include "mips64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"seh v0, v0"}
+%include "mips64/op_sget.S" {"helper":"MterpSGetI16", "extend":"seh v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S
index ace64f8e80..7c31252aed 100644
--- a/runtime/interpreter/mterp/mips64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_sget_wide.S
@@ -3,12 +3,12 @@
*
*/
/* sget-wide vAA, field//BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGet64Static
+ jal MterpSGetU64
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a4, rINST, 8 # a4 <- AA
bnez a3, MterpException # bail out
diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S
index 466f3339c2..28b8c3ea26 100644
--- a/runtime/interpreter/mterp/mips64/op_sput.S
+++ b/runtime/interpreter/mterp/mips64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"MterpSet32Static" }
+%default { "helper":"MterpSPutU32" }
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
index eba58f7fa1..2e769d5e1a 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
+%include "mips64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S
index 80a26c0161..0b04b590ee 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_byte.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"MterpSetByteStatic"}
+%include "mips64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S
index c0d5bf3bba..4a80375d65 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_char.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_char.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"MterpSetCharStatic"}
+%include "mips64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_object.S b/runtime/interpreter/mterp/mips64/op_sput_object.S
index ef4c685116..ff43967666 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_object.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_object.S
@@ -1,10 +1,10 @@
- .extern MterpSputObject
+ .extern MterpSPutObj
EXPORT_PC
daddu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
move a2, rINST
move a3, rSELF
- jal MterpSputObject
+ jal MterpSPutObj
beqzc v0, MterpException
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S
index b001832bc4..c00043b6b7 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_short.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_short.S
@@ -1 +1 @@
-%include "mips64/op_sput.S" {"helper":"MterpSetShortStatic"}
+%include "mips64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S
index aa3d5b4157..bfb6983bb4 100644
--- a/runtime/interpreter/mterp/mips64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_sput_wide.S
@@ -3,7 +3,7 @@
*
*/
/* sput-wide vAA, field//BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a1, rINST, 8 # a2 <- AA
@@ -11,7 +11,7 @@
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSet64Static
+ jal MterpSPutU64
bnezc v0, MterpException # 0 on success, -1 on failure
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index abbc50936d..7b37c9aaef 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -490,7 +490,7 @@ extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint
return true;
}
-extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+extern "C" size_t MterpSPutObj(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -498,7 +498,7 @@ extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_pt
(self, *shadow_frame, inst, inst_data);
}
-extern "C" size_t MterpIputObject(ShadowFrame* shadow_frame,
+extern "C" size_t MterpIPutObj(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data,
Thread* self)
@@ -734,11 +734,11 @@ ALWAYS_INLINE RetType artGetInstanceFromMterpFast(uint32_t field_idx,
return artGetInstanceFromMterp<PrimType, RetType, Getter, kType>(field_idx, obj, referrer, self);
}
-#define ART_GET_FIELD_FROM_MTERP(Kind, PrimType, RetType, Ptr) \
-extern "C" RetType artGet ## Kind ## InstanceFromMterp(uint32_t field_idx, \
- mirror::Object* obj, \
- ArtMethod* referrer, \
- Thread* self) \
+#define ART_GET_FIELD_FROM_MTERP(Suffix, Kind, PrimType, RetType, Ptr) \
+extern "C" RetType MterpIGet ## Suffix(uint32_t field_idx, \
+ mirror::Object* obj, \
+ ArtMethod* referrer, \
+ Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
struct Getter { /* Specialize the field load depending on the field type */ \
static RetType Get(mirror::Object* o, ArtField* f) REQUIRES_SHARED(Locks::mutator_lock_) { \
@@ -748,62 +748,74 @@ extern "C" RetType artGet ## Kind ## InstanceFromMterp(uint32_t field_idx,
return artGetInstanceFromMterpFast<PrimType, RetType, Getter>(field_idx, obj, referrer, self); \
} \
-ART_GET_FIELD_FROM_MTERP(Byte, int8_t, ssize_t, )
-ART_GET_FIELD_FROM_MTERP(Boolean, uint8_t, size_t, )
-ART_GET_FIELD_FROM_MTERP(Short, int16_t, ssize_t, )
-ART_GET_FIELD_FROM_MTERP(Char, uint16_t, size_t, )
-ART_GET_FIELD_FROM_MTERP(32, uint32_t, size_t, )
-ART_GET_FIELD_FROM_MTERP(64, uint64_t, uint64_t, )
-ART_GET_FIELD_FROM_MTERP(Obj, mirror::HeapReference<mirror::Object>, mirror::Object*, .Ptr())
+ART_GET_FIELD_FROM_MTERP(I8, Byte, int8_t, ssize_t, )
+ART_GET_FIELD_FROM_MTERP(U8, Boolean, uint8_t, size_t, )
+ART_GET_FIELD_FROM_MTERP(I16, Short, int16_t, ssize_t, )
+ART_GET_FIELD_FROM_MTERP(U16, Char, uint16_t, size_t, )
+ART_GET_FIELD_FROM_MTERP(U32, 32, uint32_t, size_t, )
+ART_GET_FIELD_FROM_MTERP(U64, 64, uint64_t, uint64_t, )
+ART_GET_FIELD_FROM_MTERP(Obj, Obj, mirror::HeapReference<mirror::Object>, mirror::Object*, .Ptr())
#undef ART_GET_FIELD_FROM_MTERP
-extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx,
- mirror::Object* obj,
- uint8_t new_value,
- ArtMethod* referrer)
+extern "C" ssize_t MterpIPutU8(uint32_t field_idx,
+ mirror::Object* obj,
+ uint8_t new_value,
+ ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- if (type == Primitive::kPrimBoolean) {
- field->SetBoolean<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimByte, type);
- field->SetByte<false>(obj, new_value);
- }
+ field->SetBoolean<false>(obj, new_value);
return 0; // success
}
return -1; // failure
}
-extern "C" ssize_t artSet16InstanceFromMterp(uint32_t field_idx,
- mirror::Object* obj,
- uint16_t new_value,
- ArtMethod* referrer)
+extern "C" ssize_t MterpIPutI8(uint32_t field_idx,
+ mirror::Object* obj,
+ uint8_t new_value,
+ ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int16_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- if (type == Primitive::kPrimChar) {
- field->SetChar<false>(obj, new_value);
- } else {
- DCHECK_EQ(Primitive::kPrimShort, type);
- field->SetShort<false>(obj, new_value);
- }
+ field->SetByte<false>(obj, new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" ssize_t MterpIPutU16(uint32_t field_idx,
+ mirror::Object* obj,
+ uint16_t new_value,
+ ArtMethod* referrer)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->SetChar<false>(obj, new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" ssize_t MterpIPutI16(uint32_t field_idx,
+ mirror::Object* obj,
+ uint16_t new_value,
+ ArtMethod* referrer)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->SetShort<false>(obj, new_value);
return 0; // success
}
return -1; // failure
}
-extern "C" ssize_t artSet32InstanceFromMterp(uint32_t field_idx,
- mirror::Object* obj,
- uint32_t new_value,
- ArtMethod* referrer)
+extern "C" ssize_t MterpIPutU32(uint32_t field_idx,
+ mirror::Object* obj,
+ uint32_t new_value,
+ ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int32_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
field->Set32<false>(obj, new_value);
return 0; // success
@@ -811,13 +823,12 @@ extern "C" ssize_t artSet32InstanceFromMterp(uint32_t field_idx,
return -1; // failure
}
-extern "C" ssize_t artSet64InstanceFromMterp(uint32_t field_idx,
- mirror::Object* obj,
- uint64_t* new_value,
- ArtMethod* referrer)
+extern "C" ssize_t MterpIPutU64(uint32_t field_idx,
+ mirror::Object* obj,
+ uint64_t* new_value,
+ ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int64_t));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
field->Set64<false>(obj, *new_value);
return 0; // success
@@ -830,8 +841,7 @@ extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx,
mirror::Object* new_value,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
+ ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (LIKELY(field != nullptr && obj != nullptr)) {
field->SetObj<false>(obj, new_value);
return 0; // success
@@ -858,9 +868,9 @@ ALWAYS_INLINE return_type MterpGetStatic(uint32_t field_idx,
return res;
}
-extern "C" int32_t MterpGetBooleanStatic(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int32_t MterpSGetU8(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpGetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
referrer,
@@ -868,9 +878,9 @@ extern "C" int32_t MterpGetBooleanStatic(uint32_t field_idx,
&ArtField::GetBoolean);
}
-extern "C" int32_t MterpGetByteStatic(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int32_t MterpSGetI8(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpGetStatic<int8_t, Primitive::kPrimByte>(field_idx,
referrer,
@@ -878,9 +888,9 @@ extern "C" int32_t MterpGetByteStatic(uint32_t field_idx,
&ArtField::GetByte);
}
-extern "C" uint32_t MterpGetCharStatic(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" uint32_t MterpSGetU16(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpGetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
referrer,
@@ -888,9 +898,9 @@ extern "C" uint32_t MterpGetCharStatic(uint32_t field_idx,
&ArtField::GetChar);
}
-extern "C" int32_t MterpGetShortStatic(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int32_t MterpSGetI16(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpGetStatic<int16_t, Primitive::kPrimShort>(field_idx,
referrer,
@@ -898,9 +908,9 @@ extern "C" int32_t MterpGetShortStatic(uint32_t field_idx,
&ArtField::GetShort);
}
-extern "C" mirror::Object* MterpGetObjStatic(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" mirror::Object* MterpSGetObj(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpGetStatic<ObjPtr<mirror::Object>, Primitive::kPrimNot>(field_idx,
referrer,
@@ -908,9 +918,9 @@ extern "C" mirror::Object* MterpGetObjStatic(uint32_t field_idx,
&ArtField::GetObject).Ptr();
}
-extern "C" int32_t MterpGet32Static(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int32_t MterpSGetU32(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpGetStatic<int32_t, Primitive::kPrimInt>(field_idx,
referrer,
@@ -918,7 +928,7 @@ extern "C" int32_t MterpGet32Static(uint32_t field_idx,
&ArtField::GetInt);
}
-extern "C" int64_t MterpGet64Static(uint32_t field_idx, ArtMethod* referrer, Thread* self)
+extern "C" int64_t MterpSGetU64(uint32_t field_idx, ArtMethod* referrer, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpGetStatic<int64_t, Primitive::kPrimLong>(field_idx,
referrer,
@@ -946,10 +956,10 @@ int MterpSetStatic(uint32_t field_idx,
return res;
}
-extern "C" int MterpSetBooleanStatic(uint32_t field_idx,
- uint8_t new_value,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int MterpSPutU8(uint32_t field_idx,
+ uint8_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpSetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
new_value,
@@ -958,10 +968,10 @@ extern "C" int MterpSetBooleanStatic(uint32_t field_idx,
&ArtField::SetBoolean<false>);
}
-extern "C" int MterpSetByteStatic(uint32_t field_idx,
- int8_t new_value,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int MterpSPutI8(uint32_t field_idx,
+ int8_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpSetStatic<int8_t, Primitive::kPrimByte>(field_idx,
new_value,
@@ -970,10 +980,10 @@ extern "C" int MterpSetByteStatic(uint32_t field_idx,
&ArtField::SetByte<false>);
}
-extern "C" int MterpSetCharStatic(uint32_t field_idx,
- uint16_t new_value,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int MterpSPutU16(uint32_t field_idx,
+ uint16_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpSetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
new_value,
@@ -982,10 +992,10 @@ extern "C" int MterpSetCharStatic(uint32_t field_idx,
&ArtField::SetChar<false>);
}
-extern "C" int MterpSetShortStatic(uint32_t field_idx,
- int16_t new_value,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int MterpSPutI16(uint32_t field_idx,
+ int16_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpSetStatic<int16_t, Primitive::kPrimShort>(field_idx,
new_value,
@@ -994,10 +1004,10 @@ extern "C" int MterpSetShortStatic(uint32_t field_idx,
&ArtField::SetShort<false>);
}
-extern "C" int MterpSet32Static(uint32_t field_idx,
- int32_t new_value,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int MterpSPutU32(uint32_t field_idx,
+ int32_t new_value,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpSetStatic<int32_t, Primitive::kPrimInt>(field_idx,
new_value,
@@ -1006,10 +1016,10 @@ extern "C" int MterpSet32Static(uint32_t field_idx,
&ArtField::SetInt<false>);
}
-extern "C" int MterpSet64Static(uint32_t field_idx,
- int64_t* new_value,
- ArtMethod* referrer,
- Thread* self)
+extern "C" int MterpSPutU64(uint32_t field_idx,
+ int64_t* new_value,
+ ArtMethod* referrer,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
return MterpSetStatic<int64_t, Primitive::kPrimLong>(field_idx,
*new_value,
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index fd5d647624..394a84924c 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -398,6 +398,8 @@ ENTRY ExecuteMterpImpl
/* File: arm/instruction_start.S */
+ .type artMterpAsmInstructionStart, #object
+ .hidden artMterpAsmInstructionStart
.global artMterpAsmInstructionStart
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -2255,7 +2257,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGet32InstanceFromMterp
+ bl MterpIGetU32
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2285,7 +2287,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGet64InstanceFromMterp
+ bl MterpIGetU64
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2314,7 +2316,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetObjInstanceFromMterp
+ bl MterpIGetObj
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2346,7 +2348,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetBooleanInstanceFromMterp
+ bl MterpIGetU8
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2378,7 +2380,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetByteInstanceFromMterp
+ bl MterpIGetI8
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2410,7 +2412,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetCharInstanceFromMterp
+ bl MterpIGetU16
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2442,7 +2444,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
- bl artGetShortInstanceFromMterp
+ bl MterpIGetI16
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
@@ -2468,7 +2470,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet32InstanceFromMterp
+ .extern MterpIPutU32
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -2477,7 +2479,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl artSet32InstanceFromMterp
+ bl MterpIPutU32
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
@@ -2489,7 +2491,7 @@ artMterpAsmInstructionStart = .L_op_nop
.L_op_iput_wide: /* 0x5a */
/* File: arm/op_iput_wide.S */
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -2498,7 +2500,7 @@ artMterpAsmInstructionStart = .L_op_nop
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl artSet64InstanceFromMterp
+ bl MterpIPutU64
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
@@ -2514,7 +2516,7 @@ artMterpAsmInstructionStart = .L_op_nop
mov r1, rPC
mov r2, rINST
mov r3, rSELF
- bl MterpIputObject
+ bl MterpIPutObj
cmp r0, #0
beq MterpException
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
@@ -2532,7 +2534,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutU8
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -2541,7 +2543,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl artSet8InstanceFromMterp
+ bl MterpIPutU8
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
@@ -2560,7 +2562,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutI8
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -2569,7 +2571,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl artSet8InstanceFromMterp
+ bl MterpIPutI8
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
@@ -2588,7 +2590,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutU16
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -2597,7 +2599,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl artSet16InstanceFromMterp
+ bl MterpIPutU16
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
@@ -2616,7 +2618,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutI16
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
@@ -2625,7 +2627,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
- bl artSet16InstanceFromMterp
+ bl MterpIPutI16
cmp r0, #0
bne MterpPossibleException
ADVANCE 2 @ advance rPC
@@ -2644,12 +2646,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern MterpGet32Static
+ .extern MterpSGetU32
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGet32Static
+ bl MterpSGetU32
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2674,12 +2676,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* sget-wide vAA, field@BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGet64Static
+ bl MterpSGetU64
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
@@ -2703,12 +2705,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern MterpGetObjStatic
+ .extern MterpSGetObj
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGetObjStatic
+ bl MterpSGetObj
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2736,12 +2738,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern MterpGetBooleanStatic
+ .extern MterpSGetU8
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGetBooleanStatic
+ bl MterpSGetU8
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2769,12 +2771,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern MterpGetByteStatic
+ .extern MterpSGetI8
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGetByteStatic
+ bl MterpSGetI8
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2802,12 +2804,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern MterpGetCharStatic
+ .extern MterpSGetU16
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGetCharStatic
+ bl MterpSGetU16
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2835,12 +2837,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field@BBBB */
- .extern MterpGetShortStatic
+ .extern MterpSGetI16
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
- bl MterpGetShortStatic
+ bl MterpSGetI16
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r2, rINST, lsr #8 @ r2<- AA
PREFETCH_INST 2
@@ -2873,7 +2875,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSet32Static
+ bl MterpSPutU32
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2889,7 +2891,7 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
FETCH r0, 1 @ r0<- field ref BBBB
mov r1, rINST, lsr #8 @ r1<- AA
@@ -2897,7 +2899,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSet64Static
+ bl MterpSPutU64
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2913,7 +2915,7 @@ artMterpAsmInstructionStart = .L_op_nop
mov r1, rPC
mov r2, rINST
mov r3, rSELF
- bl MterpSputObject
+ bl MterpSPutObj
cmp r0, #0
beq MterpException
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
@@ -2938,7 +2940,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSetBooleanStatic
+ bl MterpSPutU8
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2964,7 +2966,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSetByteStatic
+ bl MterpSPutI8
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -2990,7 +2992,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSetCharStatic
+ bl MterpSPutU16
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -3016,7 +3018,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr r2, [rFP, #OFF_FP_METHOD]
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSetShortStatic
+ bl MterpSPutI16
cmp r0, #0 @ 0 on success, -1 on failure
bne MterpException
ADVANCE 2 @ Past exception point - now advance rPC
@@ -7513,6 +7515,8 @@ constvalop_long_to_double:
.balign 128
/* File: arm/instruction_end.S */
+ .type artMterpAsmInstructionEnd, #object
+ .hidden artMterpAsmInstructionEnd
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7524,6 +7528,8 @@ artMterpAsmInstructionEnd:
*/
/* File: arm/instruction_start_sister.S */
+ .type artMterpAsmSisterStart, #object
+ .hidden artMterpAsmSisterStart
.global artMterpAsmSisterStart
.text
.balign 4
@@ -7593,11 +7599,15 @@ d2l_maybeNaN:
bx lr @ return 0 for NaN
/* File: arm/instruction_end_sister.S */
+ .type artMterpAsmSisterEnd, #object
+ .hidden artMterpAsmSisterEnd
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
/* File: arm/instruction_start_alt.S */
+ .type artMterpAsmAltInstructionStart, #object
+ .hidden artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionStart
artMterpAsmAltInstructionStart = .L_ALT_op_nop
.text
@@ -12213,6 +12223,8 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
.balign 128
/* File: arm/instruction_end_alt.S */
+ .type artMterpAsmAltInstructionEnd, #object
+ .hidden artMterpAsmAltInstructionEnd
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 213f7ff842..5f4aa4f256 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -429,6 +429,8 @@ ENTRY ExecuteMterpImpl
/* File: arm64/instruction_start.S */
+ .type artMterpAsmInstructionStart, #object
+ .hidden artMterpAsmInstructionStart
.global artMterpAsmInstructionStart
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -2192,7 +2194,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGet32InstanceFromMterp
+ bl MterpIGetU32
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2222,7 +2224,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGet64InstanceFromMterp
+ bl MterpIGetU64
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
PREFETCH_INST 2
@@ -2249,7 +2251,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetObjInstanceFromMterp
+ bl MterpIGetObj
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2281,7 +2283,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetBooleanInstanceFromMterp
+ bl MterpIGetU8
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
uxtb w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2313,7 +2315,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetByteInstanceFromMterp
+ bl MterpIGetI8
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
sxtb w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2345,7 +2347,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetCharInstanceFromMterp
+ bl MterpIGetU16
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
uxth w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2377,7 +2379,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
- bl artGetShortInstanceFromMterp
+ bl MterpIGetI16
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
sxth w0, w0
ubfx w2, wINST, #8, #4 // w2<- A
@@ -2403,7 +2405,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet32InstanceFromMterp
+ .extern MterpIPutU32
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -2412,7 +2414,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w2, w2 // w2<- fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl artSet32InstanceFromMterp
+ bl MterpIPutU32
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2423,7 +2425,7 @@ artMterpAsmInstructionStart = .L_op_nop
.L_op_iput_wide: /* 0x5a */
/* File: arm64/op_iput_wide.S */
/* iput-wide vA, vB, field//CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -2432,7 +2434,7 @@ artMterpAsmInstructionStart = .L_op_nop
VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl artSet64InstanceFromMterp
+ bl MterpIPutU64
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
@@ -2447,7 +2449,7 @@ artMterpAsmInstructionStart = .L_op_nop
mov x1, xPC
mov w2, wINST
mov x3, xSELF
- bl MterpIputObject
+ bl MterpIPutObj
cbz w0, MterpException
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2464,7 +2466,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutU8
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -2473,7 +2475,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w2, w2 // w2<- fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl artSet8InstanceFromMterp
+ bl MterpIPutU8
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2491,7 +2493,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutI8
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -2500,7 +2502,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w2, w2 // w2<- fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl artSet8InstanceFromMterp
+ bl MterpIPutI8
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2518,7 +2520,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutU16
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -2527,7 +2529,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w2, w2 // w2<- fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl artSet16InstanceFromMterp
+ bl MterpIPutU16
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2545,7 +2547,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutI16
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
@@ -2554,7 +2556,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG w2, w2 // w2<- fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
- bl artSet16InstanceFromMterp
+ bl MterpIPutI16
cbnz w0, MterpPossibleException
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2572,12 +2574,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern MterpGet32Static
+ .extern MterpSGetU32
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGet32Static
+ bl MterpSGetU32
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
@@ -2607,7 +2609,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGet64Static
+ bl MterpSGetU64
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w4, wINST, #8 // w4<- AA
cbnz x3, MterpException // bail out
@@ -2628,12 +2630,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern MterpGetObjStatic
+ .extern MterpSGetObj
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGetObjStatic
+ bl MterpSGetObj
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
@@ -2661,12 +2663,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern MterpGetBooleanStatic
+ .extern MterpSGetU8
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGetBooleanStatic
+ bl MterpSGetU8
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
uxtb w0, w0
@@ -2694,12 +2696,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern MterpGetByteStatic
+ .extern MterpSGetI8
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGetByteStatic
+ bl MterpSGetI8
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
sxtb w0, w0
@@ -2727,12 +2729,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern MterpGetCharStatic
+ .extern MterpSGetU16
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGetCharStatic
+ bl MterpSGetU16
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
uxth w0, w0
@@ -2760,12 +2762,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, field//BBBB */
- .extern MterpGetShortStatic
+ .extern MterpSGetI16
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
- bl MterpGetShortStatic
+ bl MterpSGetI16
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- AA
sxth w0, w0
@@ -2798,7 +2800,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSet32Static
+ bl MterpSPutU32
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2813,7 +2815,7 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sput-wide vAA, field//BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
FETCH w0, 1 // w0<- field ref BBBB
lsr w1, wINST, #8 // w1<- AA
@@ -2821,7 +2823,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSet64Static
+ bl MterpSPutU64
cbnz w0, MterpException // 0 on success, -1 on failure
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from wINST
@@ -2836,7 +2838,7 @@ artMterpAsmInstructionStart = .L_op_nop
mov x1, xPC
mov x2, xINST
mov x3, xSELF
- bl MterpSputObject
+ bl MterpSPutObj
cbz w0, MterpException
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2860,7 +2862,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSetBooleanStatic
+ bl MterpSPutU8
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2885,7 +2887,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSetByteStatic
+ bl MterpSPutI8
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2910,7 +2912,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSetCharStatic
+ bl MterpSPutU16
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -2935,7 +2937,7 @@ artMterpAsmInstructionStart = .L_op_nop
ldr x2, [xFP, #OFF_FP_METHOD]
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSetShortStatic
+ bl MterpSPutI16
cbnz w0, MterpException // 0 on success
ADVANCE 2 // Past exception point - now advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
@@ -7078,6 +7080,8 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
/* File: arm64/instruction_end.S */
+ .type artMterpAsmInstructionEnd, #object
+ .hidden artMterpAsmInstructionEnd
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7089,6 +7093,8 @@ artMterpAsmInstructionEnd:
*/
/* File: arm64/instruction_start_sister.S */
+ .type artMterpAsmSisterStart, #object
+ .hidden artMterpAsmSisterStart
.global artMterpAsmSisterStart
.text
.balign 4
@@ -7096,6 +7102,8 @@ artMterpAsmSisterStart:
/* File: arm64/instruction_end_sister.S */
+ .type artMterpAsmSisterEnd, #object
+ .hidden artMterpAsmSisterEnd
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
@@ -7409,6 +7417,8 @@ MterpProfileActive:
/* File: arm64/instruction_start_alt.S */
+ .type artMterpAsmAltInstructionStart, #object
+ .hidden artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionStart
artMterpAsmAltInstructionStart = .L_ALT_op_nop
.text
@@ -11768,6 +11778,8 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
.balign 128
/* File: arm64/instruction_end_alt.S */
+ .type artMterpAsmAltInstructionEnd, #object
+ .hidden artMterpAsmAltInstructionEnd
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index c749057ee6..fb7d52eed4 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -2677,7 +2677,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGet32InstanceFromMterp)
+ JAL(MterpIGetU32)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2706,7 +2706,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGet64InstanceFromMterp)
+ JAL(MterpIGetU64)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2732,7 +2732,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetObjInstanceFromMterp)
+ JAL(MterpIGetObj)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2763,7 +2763,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetBooleanInstanceFromMterp)
+ JAL(MterpIGetU8)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2794,7 +2794,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetByteInstanceFromMterp)
+ JAL(MterpIGetI8)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2825,7 +2825,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetCharInstanceFromMterp)
+ JAL(MterpIGetU16)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2856,7 +2856,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- JAL(artGetShortInstanceFromMterp)
+ JAL(MterpIGetI16)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
@@ -2880,7 +2880,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet32InstanceFromMterp
+ .extern MterpIPutU32
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2889,7 +2889,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a2, a2) # a2 <- fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL(artSet32InstanceFromMterp)
+ JAL(MterpIPutU32)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -2900,7 +2900,7 @@ artMterpAsmInstructionStart = .L_op_nop
.L_op_iput_wide: /* 0x5a */
/* File: mips/op_iput_wide.S */
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2909,7 +2909,7 @@ artMterpAsmInstructionStart = .L_op_nop
EAS2(a2, rFP, a2) # a2 <- &fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL(artSet64InstanceFromMterp)
+ JAL(MterpIPutU64)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -2930,7 +2930,7 @@ artMterpAsmInstructionStart = .L_op_nop
move a1, rPC
move a2, rINST
move a3, rSELF
- JAL(MterpIputObject)
+ JAL(MterpIPutObj)
beqz v0, MterpException
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -2947,7 +2947,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutU8
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2956,7 +2956,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a2, a2) # a2 <- fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL(artSet8InstanceFromMterp)
+ JAL(MterpIPutU8)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -2974,7 +2974,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutI8
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2983,7 +2983,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a2, a2) # a2 <- fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL(artSet8InstanceFromMterp)
+ JAL(MterpIPutI8)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3001,7 +3001,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutU16
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -3010,7 +3010,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a2, a2) # a2 <- fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL(artSet16InstanceFromMterp)
+ JAL(MterpIPutU16)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3028,7 +3028,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutI16
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -3037,7 +3037,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG(a2, a2) # a2 <- fp[A]
lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST(2) # load rINST
- JAL(artSet16InstanceFromMterp)
+ JAL(MterpIPutI16)
bnez v0, MterpPossibleException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3054,12 +3054,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGet32Static
+ .extern MterpSGetU32
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGet32Static)
+ JAL(MterpSGetU32)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3080,12 +3080,12 @@ artMterpAsmInstructionStart = .L_op_nop
* 64-bit SGET handler.
*/
/* sget-wide vAA, field@BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGet64Static)
+ JAL(MterpSGetU64)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
@@ -3104,12 +3104,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetObjStatic
+ .extern MterpSGetObj
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGetObjStatic)
+ JAL(MterpSGetObj)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3134,12 +3134,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetBooleanStatic
+ .extern MterpSGetU8
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGetBooleanStatic)
+ JAL(MterpSGetU8)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3164,12 +3164,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetByteStatic
+ .extern MterpSGetI8
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGetByteStatic)
+ JAL(MterpSGetI8)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3194,12 +3194,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetCharStatic
+ .extern MterpSGetU16
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGetCharStatic)
+ JAL(MterpSGetU16)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3224,12 +3224,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetShortStatic
+ .extern MterpSGetI16
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
lw a1, OFF_FP_METHOD(rFP) # a1 <- method
move a2, rSELF # a2 <- self
- JAL(MterpGetShortStatic)
+ JAL(MterpSGetI16)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
@@ -3260,7 +3260,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(MterpSet32Static)
+ JAL(MterpSPutU32)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3274,7 +3274,7 @@ artMterpAsmInstructionStart = .L_op_nop
* 64-bit SPUT handler.
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPA(a1) # a1 <- AA
@@ -3282,7 +3282,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(MterpSet64Static)
+ JAL(MterpSPutU64)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3303,7 +3303,7 @@ artMterpAsmInstructionStart = .L_op_nop
move a1, rPC
move a2, rINST
move a3, rSELF
- JAL(MterpSputObject)
+ JAL(MterpSPutObj)
beqz v0, MterpException
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3327,7 +3327,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(MterpSetBooleanStatic)
+ JAL(MterpSPutU8)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3352,7 +3352,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(MterpSetByteStatic)
+ JAL(MterpSPutI8)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3377,7 +3377,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(MterpSetCharStatic)
+ JAL(MterpSPutU16)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -3402,7 +3402,7 @@ artMterpAsmInstructionStart = .L_op_nop
lw a2, OFF_FP_METHOD(rFP) # a2 <- method
move a3, rSELF # a3 <- self
PREFETCH_INST(2) # load rINST
- JAL(MterpSetShortStatic)
+ JAL(MterpSPutI16)
bnez v0, MterpException # bail out
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index f9b270b01e..65616919c9 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -2246,14 +2246,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGet32InstanceFromMterp
+ .extern MterpIGetU32
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGet32InstanceFromMterp
+ jal MterpIGetU32
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2276,14 +2276,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget-wide
*/
- .extern artGet64InstanceFromMterp
+ .extern MterpIGetU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGet64InstanceFromMterp
+ jal MterpIGetU64
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2303,14 +2303,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetObjInstanceFromMterp
+ .extern MterpIGetObj
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetObjInstanceFromMterp
+ jal MterpIGetObj
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2335,14 +2335,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetBooleanInstanceFromMterp
+ .extern MterpIGetU8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetBooleanInstanceFromMterp
+ jal MterpIGetU8
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2367,14 +2367,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetByteInstanceFromMterp
+ .extern MterpIGetI8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetByteInstanceFromMterp
+ jal MterpIGetI8
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2399,14 +2399,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetCharInstanceFromMterp
+ .extern MterpIGetU16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetCharInstanceFromMterp
+ jal MterpIGetU16
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2431,14 +2431,14 @@ artMterpAsmInstructionStart = .L_op_nop
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
- .extern artGetShortInstanceFromMterp
+ .extern MterpIGetI16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
- jal artGetShortInstanceFromMterp
+ jal MterpIGetI16
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
ext a2, rINST, 8, 4 # a2 <- A
PREFETCH_INST 2
@@ -2463,7 +2463,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet32InstanceFromMterp
+ .extern MterpIPutU32
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
@@ -2472,7 +2472,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG a2, a2 # a2 <- fp[A]
ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST 2
- jal artSet32InstanceFromMterp
+ jal MterpIPutU32
bnez v0, MterpPossibleException # bail out
ADVANCE 2
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2483,7 +2483,7 @@ artMterpAsmInstructionStart = .L_op_nop
.L_op_iput_wide: /* 0x5a */
/* File: mips64/op_iput_wide.S */
/* iput-wide vA, vB, field//CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
@@ -2492,7 +2492,7 @@ artMterpAsmInstructionStart = .L_op_nop
dlsa a2, a2, rFP, 2 # a2 <- &fp[A]
ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST 2
- jal artSet64InstanceFromMterp
+ jal MterpIPutU64
bnez v0, MterpPossibleException # bail out
ADVANCE 2
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2502,13 +2502,13 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_iput_object: /* 0x5b */
/* File: mips64/op_iput_object.S */
- .extern MterpIputObject
+ .extern MterpIPutObj
EXPORT_PC
daddu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
move a2, rINST
move a3, rSELF
- jal MterpIputObject
+ jal MterpIPutObj
beqzc v0, MterpException
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2525,7 +2525,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutU8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
@@ -2534,7 +2534,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG a2, a2 # a2 <- fp[A]
ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST 2
- jal artSet8InstanceFromMterp
+ jal MterpIPutU8
bnez v0, MterpPossibleException # bail out
ADVANCE 2
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2552,7 +2552,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutI8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
@@ -2561,7 +2561,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG a2, a2 # a2 <- fp[A]
ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST 2
- jal artSet8InstanceFromMterp
+ jal MterpIPutI8
bnez v0, MterpPossibleException # bail out
ADVANCE 2
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2579,7 +2579,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutU16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
@@ -2588,7 +2588,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG a2, a2 # a2 <- fp[A]
ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST 2
- jal artSet16InstanceFromMterp
+ jal MterpIPutU16
bnez v0, MterpPossibleException # bail out
ADVANCE 2
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2606,7 +2606,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field//CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutI16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref CCCC
srl a1, rINST, 12 # a1 <- B
@@ -2615,7 +2615,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_VREG a2, a2 # a2 <- fp[A]
ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
PREFETCH_INST 2
- jal artSet16InstanceFromMterp
+ jal MterpIPutI16
bnez v0, MterpPossibleException # bail out
ADVANCE 2
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2632,12 +2632,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern MterpGet32Static
+ .extern MterpSGetU32
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGet32Static
+ jal MterpSGetU32
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
@@ -2661,12 +2661,12 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sget-wide vAA, field//BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGet64Static
+ jal MterpSGetU64
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a4, rINST, 8 # a4 <- AA
bnez a3, MterpException # bail out
@@ -2686,12 +2686,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern MterpGetObjStatic
+ .extern MterpSGetObj
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGetObjStatic
+ jal MterpSGetObj
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
@@ -2718,12 +2718,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern MterpGetBooleanStatic
+ .extern MterpSGetU8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGetBooleanStatic
+ jal MterpSGetU8
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
and v0, v0, 0xff
@@ -2750,12 +2750,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern MterpGetByteStatic
+ .extern MterpSGetI8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGetByteStatic
+ jal MterpSGetI8
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
seb v0, v0
@@ -2782,12 +2782,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern MterpGetCharStatic
+ .extern MterpSGetU16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGetCharStatic
+ jal MterpSGetU16
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
and v0, v0, 0xffff
@@ -2814,12 +2814,12 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field//BBBB */
- .extern MterpGetShortStatic
+ .extern MterpSGetI16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
ld a1, OFF_FP_METHOD(rFP)
move a2, rSELF
- jal MterpGetShortStatic
+ jal MterpSGetI16
ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
srl a2, rINST, 8 # a2 <- AA
seh v0, v0
@@ -2845,7 +2845,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern MterpSet32Static
+ .extern MterpSPutU32
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2853,7 +2853,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSet32Static
+ jal MterpSPutU32
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2868,7 +2868,7 @@ artMterpAsmInstructionStart = .L_op_nop
*
*/
/* sput-wide vAA, field//BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a1, rINST, 8 # a2 <- AA
@@ -2876,7 +2876,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSet64Static
+ jal MterpSPutU64
bnezc v0, MterpException # 0 on success, -1 on failure
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2886,13 +2886,13 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_sput_object: /* 0x69 */
/* File: mips64/op_sput_object.S */
- .extern MterpSputObject
+ .extern MterpSPutObj
EXPORT_PC
daddu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
move a2, rINST
move a3, rSELF
- jal MterpSputObject
+ jal MterpSPutObj
beqzc v0, MterpException
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2909,7 +2909,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern MterpSetBooleanStatic
+ .extern MterpSPutU8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2917,7 +2917,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSetBooleanStatic
+ jal MterpSPutU8
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2935,7 +2935,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern MterpSetByteStatic
+ .extern MterpSPutI8
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2943,7 +2943,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSetByteStatic
+ jal MterpSPutI8
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2961,7 +2961,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern MterpSetCharStatic
+ .extern MterpSPutU16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2969,7 +2969,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSetCharStatic
+ jal MterpSPutU16
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
@@ -2987,7 +2987,7 @@ artMterpAsmInstructionStart = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field//BBBB */
- .extern MterpSetShortStatic
+ .extern MterpSPutI16
EXPORT_PC
lhu a0, 2(rPC) # a0 <- field ref BBBB
srl a3, rINST, 8 # a3 <- AA
@@ -2995,7 +2995,7 @@ artMterpAsmInstructionStart = .L_op_nop
ld a2, OFF_FP_METHOD(rFP)
move a3, rSELF
PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSetShortStatic
+ jal MterpSPutI16
bnezc v0, MterpException # 0 on success
ADVANCE 2 # Past exception point - now advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index ad74b29871..3f709199b9 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -103,6 +103,7 @@ unspecified registers or condition codes.
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $(value)
#define FUNCTION_TYPE(name)
+ #define OBJECT_TYPE(name)
#define SIZE(start,end)
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
@@ -110,6 +111,7 @@ unspecified registers or condition codes.
#else
#define MACRO_LITERAL(value) $value
#define FUNCTION_TYPE(name) .type name, @function
+ #define OBJECT_TYPE(name) .type name, @object
#define SIZE(start,end) .size start, .-end
#define SYMBOL(name) name
#define ASM_HIDDEN .hidden
@@ -407,6 +409,8 @@ SYMBOL(ExecuteMterpImpl):
/* File: x86/instruction_start.S */
+ OBJECT_TYPE(artMterpAsmInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
.global SYMBOL(artMterpAsmInstructionStart)
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
@@ -2132,7 +2136,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGet32InstanceFromMterp)
+ call SYMBOL(MterpIGetU32)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2165,7 +2169,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGet64InstanceFromMterp)
+ call SYMBOL(MterpIGetU64)
mov rSELF, %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
@@ -2196,7 +2200,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetObjInstanceFromMterp)
+ call SYMBOL(MterpIGetObj)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2231,7 +2235,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetBooleanInstanceFromMterp)
+ call SYMBOL(MterpIGetU8)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2266,7 +2270,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetByteInstanceFromMterp)
+ call SYMBOL(MterpIGetI8)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2301,7 +2305,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetCharInstanceFromMterp)
+ call SYMBOL(MterpIGetU16)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2336,7 +2340,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGetShortInstanceFromMterp)
+ call SYMBOL(MterpIGetI16)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2360,7 +2364,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet32InstanceFromMterp
+ .extern MterpIPutU32
EXPORT_PC
movzwl 2(rPC), %eax # eax<- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2373,7 +2377,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(artSet32InstanceFromMterp)
+ call SYMBOL(MterpIPutU32)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
@@ -2384,7 +2388,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.L_op_iput_wide: /* 0x5a */
/* File: x86/op_iput_wide.S */
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
movzwl 2(rPC), %eax # eax <- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2397,7 +2401,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # &fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(artSet64InstanceFromMterp)
+ call SYMBOL(MterpIPutU64)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
@@ -2415,7 +2419,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl rINST, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpIputObject)
+ call SYMBOL(MterpIPutObj)
testb %al, %al
jz MterpException
RESTORE_IBASE
@@ -2432,7 +2436,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutU8
EXPORT_PC
movzwl 2(rPC), %eax # eax<- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2445,7 +2449,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(artSet8InstanceFromMterp)
+ call SYMBOL(MterpIPutU8)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
@@ -2463,7 +2467,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutI8
EXPORT_PC
movzwl 2(rPC), %eax # eax<- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2476,7 +2480,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(artSet8InstanceFromMterp)
+ call SYMBOL(MterpIPutI8)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
@@ -2494,7 +2498,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutU16
EXPORT_PC
movzwl 2(rPC), %eax # eax<- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2507,7 +2511,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(artSet16InstanceFromMterp)
+ call SYMBOL(MterpIPutU16)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
@@ -2525,7 +2529,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutI16
EXPORT_PC
movzwl 2(rPC), %eax # eax<- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2538,7 +2542,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(artSet16InstanceFromMterp)
+ call SYMBOL(MterpIPutI16)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
@@ -2555,7 +2559,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGet32Static
+ .extern MterpSGetU32
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2563,7 +2567,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGet32Static)
+ call SYMBOL(MterpSGetU32)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2584,7 +2588,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
*
*/
/* sget-wide vAA, field@BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2592,7 +2596,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGet64Static)
+ call SYMBOL(MterpSGetU64)
movl rSELF, %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
@@ -2612,7 +2616,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetObjStatic
+ .extern MterpSGetObj
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2620,7 +2624,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGetObjStatic)
+ call SYMBOL(MterpSGetObj)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2644,7 +2648,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetBooleanStatic
+ .extern MterpSGetU8
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2652,7 +2656,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGetBooleanStatic)
+ call SYMBOL(MterpSGetU8)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2676,7 +2680,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetByteStatic
+ .extern MterpSGetI8
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2684,7 +2688,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGetByteStatic)
+ call SYMBOL(MterpSGetI8)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2708,7 +2712,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetCharStatic
+ .extern MterpSGetU16
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2716,7 +2720,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGetCharStatic)
+ call SYMBOL(MterpSGetU16)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2740,7 +2744,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
- .extern MterpGetShortStatic
+ .extern MterpSGetI16
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -2748,7 +2752,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGetShortStatic)
+ call SYMBOL(MterpSGetI16)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
@@ -2771,7 +2775,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSet32Static
+ .extern MterpSPutU32
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2781,7 +2785,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSet32Static)
+ call SYMBOL(MterpSPutU32)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2796,7 +2800,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
*
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2806,7 +2810,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSet64Static)
+ call SYMBOL(MterpSPutU64)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2824,7 +2828,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl rINST, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpSputObject)
+ call SYMBOL(MterpSPutObj)
testb %al, %al
jz MterpException
RESTORE_IBASE
@@ -2841,7 +2845,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetBooleanStatic
+ .extern MterpSPutU8
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2851,7 +2855,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSetBooleanStatic)
+ call SYMBOL(MterpSPutU8)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2869,7 +2873,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetByteStatic
+ .extern MterpSPutI8
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2879,7 +2883,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSetByteStatic)
+ call SYMBOL(MterpSPutI8)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2897,7 +2901,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetCharStatic
+ .extern MterpSPutU16
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2907,7 +2911,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSetCharStatic)
+ call SYMBOL(MterpSPutU16)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -2925,7 +2929,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetShortStatic
+ .extern MterpSPutI16
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -2935,7 +2939,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSetShortStatic)
+ call SYMBOL(MterpSPutI16)
testb %al, %al
jnz MterpException
RESTORE_IBASE
@@ -6473,6 +6477,8 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.balign 128
/* File: x86/instruction_end.S */
+ OBJECT_TYPE(artMterpAsmInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
@@ -6484,6 +6490,8 @@ SYMBOL(artMterpAsmInstructionEnd):
*/
/* File: x86/instruction_start_sister.S */
+ OBJECT_TYPE(artMterpAsmSisterStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
.global SYMBOL(artMterpAsmSisterStart)
.text
.balign 4
@@ -6491,11 +6499,15 @@ SYMBOL(artMterpAsmSisterStart):
/* File: x86/instruction_end_sister.S */
+ OBJECT_TYPE(artMterpAsmSisterEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
/* File: x86/instruction_start_alt.S */
+ OBJECT_TYPE(artMterpAsmAltInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
.global SYMBOL(artMterpAsmAltInstructionStart)
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
@@ -12647,6 +12659,8 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
.balign 128
/* File: x86/instruction_end_alt.S */
+ OBJECT_TYPE(artMterpAsmAltInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 56d68e6caa..89d56372af 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -99,6 +99,7 @@ unspecified registers or condition codes.
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $(value)
#define FUNCTION_TYPE(name)
+ #define OBJECT_TYPE(name)
#define SIZE(start,end)
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
@@ -106,6 +107,7 @@ unspecified registers or condition codes.
#else
#define MACRO_LITERAL(value) $value
#define FUNCTION_TYPE(name) .type name, @function
+ #define OBJECT_TYPE(name) .type name, @object
#define SIZE(start,end) .size start, .-end
#define SYMBOL(name) name
#define ASM_HIDDEN .hidden
@@ -389,6 +391,8 @@ SYMBOL(ExecuteMterpImpl):
/* File: x86_64/instruction_start.S */
+ OBJECT_TYPE(artMterpAsmInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
.global SYMBOL(artMterpAsmInstructionStart)
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
@@ -2075,7 +2079,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGet32InstanceFromMterp)
+ call SYMBOL(MterpIGetU32)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2108,7 +2112,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGet64InstanceFromMterp)
+ call SYMBOL(MterpIGetU64)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2142,7 +2146,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetObjInstanceFromMterp)
+ call SYMBOL(MterpIGetObj)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2176,7 +2180,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetBooleanInstanceFromMterp)
+ call SYMBOL(MterpIGetU8)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2210,7 +2214,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetByteInstanceFromMterp)
+ call SYMBOL(MterpIGetI8)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2244,7 +2248,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetCharInstanceFromMterp)
+ call SYMBOL(MterpIGetU16)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2278,7 +2282,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
- call SYMBOL(artGetShortInstanceFromMterp)
+ call SYMBOL(MterpIGetI16)
movq rSELF, %rcx
cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
@@ -2305,7 +2309,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet32InstanceFromMterp
+ .extern MterpIPutU32
EXPORT_PC
movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
movzbq rINSTbl, %rcx # rcx<- BA
@@ -2314,7 +2318,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
andb $0xf, rINSTbl # rINST<- A
GET_VREG OUT_32_ARG2, rINSTq # fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(artSet32InstanceFromMterp)
+ call SYMBOL(MterpIPutU32)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2324,7 +2328,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.L_op_iput_wide: /* 0x5a */
/* File: x86_64/op_iput_wide.S */
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movzbq rINSTbl, %rcx # rcx <- BA
@@ -2333,7 +2337,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
andb $0xf, rINSTbl # rINST <- A
leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(artSet64InstanceFromMterp)
+ call SYMBOL(MterpIPutU64)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2348,7 +2352,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
REFRESH_INST 91
movl rINST, OUT_32_ARG2
movq rSELF, OUT_ARG3
- call SYMBOL(MterpIputObject)
+ call SYMBOL(MterpIPutObj)
testb %al, %al
jz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2364,7 +2368,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutU8
EXPORT_PC
movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
movzbq rINSTbl, %rcx # rcx<- BA
@@ -2373,7 +2377,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
andb $0xf, rINSTbl # rINST<- A
GET_VREG OUT_32_ARG2, rINSTq # fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(artSet8InstanceFromMterp)
+ call SYMBOL(MterpIPutU8)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2390,7 +2394,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet8InstanceFromMterp
+ .extern MterpIPutI8
EXPORT_PC
movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
movzbq rINSTbl, %rcx # rcx<- BA
@@ -2399,7 +2403,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
andb $0xf, rINSTbl # rINST<- A
GET_VREG OUT_32_ARG2, rINSTq # fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(artSet8InstanceFromMterp)
+ call SYMBOL(MterpIPutI8)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2416,7 +2420,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutU16
EXPORT_PC
movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
movzbq rINSTbl, %rcx # rcx<- BA
@@ -2425,7 +2429,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
andb $0xf, rINSTbl # rINST<- A
GET_VREG OUT_32_ARG2, rINSTq # fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(artSet16InstanceFromMterp)
+ call SYMBOL(MterpIPutU16)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2442,7 +2446,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern artSet16InstanceFromMterp
+ .extern MterpIPutI16
EXPORT_PC
movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
movzbq rINSTbl, %rcx # rcx<- BA
@@ -2451,7 +2455,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
andb $0xf, rINSTbl # rINST<- A
GET_VREG OUT_32_ARG2, rINSTq # fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(artSet16InstanceFromMterp)
+ call SYMBOL(MterpIPutI16)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2467,12 +2471,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern MterpGet32Static
+ .extern MterpSGetU32
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpGet32Static)
+ call SYMBOL(MterpSGetU32)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2498,12 +2502,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpGet64Static)
+ call SYMBOL(MterpSGetU64)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2530,12 +2534,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern MterpGetObjStatic
+ .extern MterpSGetObj
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpGetObjStatic)
+ call SYMBOL(MterpSGetObj)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2562,12 +2566,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern MterpGetBooleanStatic
+ .extern MterpSGetU8
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpGetBooleanStatic)
+ call SYMBOL(MterpSGetU8)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2594,12 +2598,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern MterpGetByteStatic
+ .extern MterpSGetI8
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpGetByteStatic)
+ call SYMBOL(MterpSGetI8)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2626,12 +2630,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern MterpGetCharStatic
+ .extern MterpSGetU16
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpGetCharStatic)
+ call SYMBOL(MterpSGetU16)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2658,12 +2662,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
*/
/* op vAA, field@BBBB */
- .extern MterpGetShortStatic
+ .extern MterpSGetI16
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpGetShortStatic)
+ call SYMBOL(MterpSGetI16)
movq rSELF, %rcx
cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException
@@ -2689,13 +2693,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSet32Static
+ .extern MterpSPutU32
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSet32Static)
+ call SYMBOL(MterpSPutU32)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2709,13 +2713,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
*
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSet64Static)
+ call SYMBOL(MterpSPutU64)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2730,7 +2734,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
REFRESH_INST 105
movq rINSTq, OUT_ARG2
movq rSELF, OUT_ARG3
- call SYMBOL(MterpSputObject)
+ call SYMBOL(MterpSPutObj)
testb %al, %al
jz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2746,13 +2750,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetBooleanStatic
+ .extern MterpSPutU8
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSetBooleanStatic)
+ call SYMBOL(MterpSPutU8)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2769,13 +2773,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetByteStatic
+ .extern MterpSPutI8
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSetByteStatic)
+ call SYMBOL(MterpSPutI8)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2792,13 +2796,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetCharStatic
+ .extern MterpSPutU16
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSetCharStatic)
+ call SYMBOL(MterpSPutU16)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2815,13 +2819,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
- .extern MterpSetShortStatic
+ .extern MterpSPutI16
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSetShortStatic)
+ call SYMBOL(MterpSPutI16)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6220,6 +6224,8 @@ movswl %ax, %eax
.balign 128
/* File: x86_64/instruction_end.S */
+ OBJECT_TYPE(artMterpAsmInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
@@ -6231,6 +6237,8 @@ SYMBOL(artMterpAsmInstructionEnd):
*/
/* File: x86_64/instruction_start_sister.S */
+ OBJECT_TYPE(artMterpAsmSisterStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
.global SYMBOL(artMterpAsmSisterStart)
.text
.balign 4
@@ -6238,11 +6246,15 @@ SYMBOL(artMterpAsmSisterStart):
/* File: x86_64/instruction_end_sister.S */
+ OBJECT_TYPE(artMterpAsmSisterEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
/* File: x86_64/instruction_start_alt.S */
+ OBJECT_TYPE(artMterpAsmAltInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
.global SYMBOL(artMterpAsmAltInstructionStart)
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
@@ -11882,6 +11894,8 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
.balign 128
/* File: x86_64/instruction_end_alt.S */
+ OBJECT_TYPE(artMterpAsmAltInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
index 6f31228005..a79db27abf 100644
--- a/runtime/interpreter/mterp/x86/header.S
+++ b/runtime/interpreter/mterp/x86/header.S
@@ -96,6 +96,7 @@ unspecified registers or condition codes.
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $$(value)
#define FUNCTION_TYPE(name)
+ #define OBJECT_TYPE(name)
#define SIZE(start,end)
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
@@ -103,6 +104,7 @@ unspecified registers or condition codes.
#else
#define MACRO_LITERAL(value) $$value
#define FUNCTION_TYPE(name) .type name, @function
+ #define OBJECT_TYPE(name) .type name, @object
#define SIZE(start,end) .size start, .-end
#define SYMBOL(name) name
#define ASM_HIDDEN .hidden
diff --git a/runtime/interpreter/mterp/x86/instruction_end.S b/runtime/interpreter/mterp/x86/instruction_end.S
index 3a02a212e6..94587f83b7 100644
--- a/runtime/interpreter/mterp/x86/instruction_end.S
+++ b/runtime/interpreter/mterp/x86/instruction_end.S
@@ -1,3 +1,5 @@
+ OBJECT_TYPE(artMterpAsmInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_end_alt.S b/runtime/interpreter/mterp/x86/instruction_end_alt.S
index 33c2b8e2a0..7757bce9a7 100644
--- a/runtime/interpreter/mterp/x86/instruction_end_alt.S
+++ b/runtime/interpreter/mterp/x86/instruction_end_alt.S
@@ -1,3 +1,5 @@
+ OBJECT_TYPE(artMterpAsmAltInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_end_sister.S b/runtime/interpreter/mterp/x86/instruction_end_sister.S
index ea14b11ede..8eb79accdf 100644
--- a/runtime/interpreter/mterp/x86/instruction_end_sister.S
+++ b/runtime/interpreter/mterp/x86/instruction_end_sister.S
@@ -1,3 +1,5 @@
+ OBJECT_TYPE(artMterpAsmSisterEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_start.S b/runtime/interpreter/mterp/x86/instruction_start.S
index ca711de00c..5d29a81993 100644
--- a/runtime/interpreter/mterp/x86/instruction_start.S
+++ b/runtime/interpreter/mterp/x86/instruction_start.S
@@ -1,4 +1,6 @@
+ OBJECT_TYPE(artMterpAsmInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
.global SYMBOL(artMterpAsmInstructionStart)
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
diff --git a/runtime/interpreter/mterp/x86/instruction_start_alt.S b/runtime/interpreter/mterp/x86/instruction_start_alt.S
index 9272a6a7b0..8dcf5bfaf9 100644
--- a/runtime/interpreter/mterp/x86/instruction_start_alt.S
+++ b/runtime/interpreter/mterp/x86/instruction_start_alt.S
@@ -1,4 +1,6 @@
+ OBJECT_TYPE(artMterpAsmAltInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
.global SYMBOL(artMterpAsmAltInstructionStart)
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
diff --git a/runtime/interpreter/mterp/x86/instruction_start_sister.S b/runtime/interpreter/mterp/x86/instruction_start_sister.S
index b9ac994d32..796e98b09a 100644
--- a/runtime/interpreter/mterp/x86/instruction_start_sister.S
+++ b/runtime/interpreter/mterp/x86/instruction_start_sister.S
@@ -1,4 +1,6 @@
+ OBJECT_TYPE(artMterpAsmSisterStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
.global SYMBOL(artMterpAsmSisterStart)
.text
.balign 4
diff --git a/runtime/interpreter/mterp/x86/op_iget.S b/runtime/interpreter/mterp/x86/op_iget.S
index 219463b646..0af1becb24 100644
--- a/runtime/interpreter/mterp/x86/op_iget.S
+++ b/runtime/interpreter/mterp/x86/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromMterp"}
+%default { "is_object":"0", "helper":"MterpIGetU32"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/x86/op_iget_boolean.S b/runtime/interpreter/mterp/x86/op_iget_boolean.S
index 4ab2afcd38..ddccc41cda 100644
--- a/runtime/interpreter/mterp/x86/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_iget_boolean.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
+%include "x86/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_byte.S b/runtime/interpreter/mterp/x86/op_iget_byte.S
index bb282d4648..cd46d3de08 100644
--- a/runtime/interpreter/mterp/x86/op_iget_byte.S
+++ b/runtime/interpreter/mterp/x86/op_iget_byte.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
+%include "x86/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_char.S b/runtime/interpreter/mterp/x86/op_iget_char.S
index a13203bb81..99697349ae 100644
--- a/runtime/interpreter/mterp/x86/op_iget_char.S
+++ b/runtime/interpreter/mterp/x86/op_iget_char.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
+%include "x86/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_object.S b/runtime/interpreter/mterp/x86/op_iget_object.S
index 79d5e5fd94..3d421fcf7f 100644
--- a/runtime/interpreter/mterp/x86/op_iget_object.S
+++ b/runtime/interpreter/mterp/x86/op_iget_object.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
+%include "x86/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_short.S b/runtime/interpreter/mterp/x86/op_iget_short.S
index 8fc18a570f..c7477f5db3 100644
--- a/runtime/interpreter/mterp/x86/op_iget_short.S
+++ b/runtime/interpreter/mterp/x86/op_iget_short.S
@@ -1 +1 @@
-%include "x86/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
+%include "x86/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide.S b/runtime/interpreter/mterp/x86/op_iget_wide.S
index b111b29587..da27df952b 100644
--- a/runtime/interpreter/mterp/x86/op_iget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_iget_wide.S
@@ -14,7 +14,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(artGet64InstanceFromMterp)
+ call SYMBOL(MterpIGetU64)
mov rSELF, %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
diff --git a/runtime/interpreter/mterp/x86/op_iput.S b/runtime/interpreter/mterp/x86/op_iput.S
index c847e2dc88..4c6603a572 100644
--- a/runtime/interpreter/mterp/x86/op_iput.S
+++ b/runtime/interpreter/mterp/x86/op_iput.S
@@ -1,11 +1,11 @@
-%default { "handler":"artSet32InstanceFromMterp" }
+%default { "helper":"MterpIPutU32" }
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern $handler
+ .extern $helper
EXPORT_PC
movzwl 2(rPC), %eax # eax<- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -18,7 +18,7 @@
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL($handler)
+ call SYMBOL($helper)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_boolean.S b/runtime/interpreter/mterp/x86/op_iput_boolean.S
index 11cab8880f..fdd530374e 100644
--- a/runtime/interpreter/mterp/x86/op_iput_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_iput_boolean.S
@@ -1 +1 @@
-%include "x86/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "x86/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_byte.S b/runtime/interpreter/mterp/x86/op_iput_byte.S
index 11cab8880f..b81850c538 100644
--- a/runtime/interpreter/mterp/x86/op_iput_byte.S
+++ b/runtime/interpreter/mterp/x86/op_iput_byte.S
@@ -1 +1 @@
-%include "x86/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "x86/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_char.S b/runtime/interpreter/mterp/x86/op_iput_char.S
index abbf2bdc6e..dde385371e 100644
--- a/runtime/interpreter/mterp/x86/op_iput_char.S
+++ b/runtime/interpreter/mterp/x86/op_iput_char.S
@@ -1 +1 @@
-%include "x86/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "x86/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_object.S b/runtime/interpreter/mterp/x86/op_iput_object.S
index e0136970b0..56e026e40a 100644
--- a/runtime/interpreter/mterp/x86/op_iput_object.S
+++ b/runtime/interpreter/mterp/x86/op_iput_object.S
@@ -6,7 +6,7 @@
movl rINST, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpIputObject)
+ call SYMBOL(MterpIPutObj)
testb %al, %al
jz MterpException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_short.S b/runtime/interpreter/mterp/x86/op_iput_short.S
index abbf2bdc6e..130e875bb2 100644
--- a/runtime/interpreter/mterp/x86/op_iput_short.S
+++ b/runtime/interpreter/mterp/x86/op_iput_short.S
@@ -1 +1 @@
-%include "x86/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "x86/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide.S b/runtime/interpreter/mterp/x86/op_iput_wide.S
index 122eecf43f..ea22b919c3 100644
--- a/runtime/interpreter/mterp/x86/op_iput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_iput_wide.S
@@ -1,5 +1,5 @@
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
movzwl 2(rPC), %eax # eax <- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -12,7 +12,7 @@
movl %eax, OUT_ARG2(%esp) # &fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(artSet64InstanceFromMterp)
+ call SYMBOL(MterpIPutU64)
testb %al, %al
jnz MterpPossibleException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S
index 6e42d323e6..66c7b0bac5 100644
--- a/runtime/interpreter/mterp/x86/op_sget.S
+++ b/runtime/interpreter/mterp/x86/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"MterpGet32Static" }
+%default { "is_object":"0", "helper":"MterpSGetU32" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86/op_sget_boolean.S b/runtime/interpreter/mterp/x86/op_sget_boolean.S
index 5fa2bf0cfc..3936eeae45 100644
--- a/runtime/interpreter/mterp/x86/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_sget_boolean.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"MterpGetBooleanStatic"}
+%include "x86/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_byte.S b/runtime/interpreter/mterp/x86/op_sget_byte.S
index ef812f118e..967586d944 100644
--- a/runtime/interpreter/mterp/x86/op_sget_byte.S
+++ b/runtime/interpreter/mterp/x86/op_sget_byte.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"MterpGetByteStatic"}
+%include "x86/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_char.S b/runtime/interpreter/mterp/x86/op_sget_char.S
index 3bc34ef338..b706f18638 100644
--- a/runtime/interpreter/mterp/x86/op_sget_char.S
+++ b/runtime/interpreter/mterp/x86/op_sget_char.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"MterpGetCharStatic"}
+%include "x86/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_object.S b/runtime/interpreter/mterp/x86/op_sget_object.S
index b829e75f30..eac8836534 100644
--- a/runtime/interpreter/mterp/x86/op_sget_object.S
+++ b/runtime/interpreter/mterp/x86/op_sget_object.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
+%include "x86/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_short.S b/runtime/interpreter/mterp/x86/op_sget_short.S
index 449cf6f918..ee058a6016 100644
--- a/runtime/interpreter/mterp/x86/op_sget_short.S
+++ b/runtime/interpreter/mterp/x86/op_sget_short.S
@@ -1 +1 @@
-%include "x86/op_sget.S" {"helper":"MterpGetShortStatic"}
+%include "x86/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S
index a605bcf2e5..994cc3aebb 100644
--- a/runtime/interpreter/mterp/x86/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sget_wide.S
@@ -3,7 +3,7 @@
*
*/
/* sget-wide vAA, field@BBBB */
- .extern MterpGet64Static
+ .extern MterpSGetU64
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref CCCC
@@ -11,7 +11,7 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpGet64Static)
+ call SYMBOL(MterpSGetU64)
movl rSELF, %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S
index 99f6088982..e99e7a7239 100644
--- a/runtime/interpreter/mterp/x86/op_sput.S
+++ b/runtime/interpreter/mterp/x86/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"MterpSet32Static"}
+%default { "helper":"MterpSPutU32"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86/op_sput_boolean.S b/runtime/interpreter/mterp/x86/op_sput_boolean.S
index a7fffda1db..c6aa7c4cd1 100644
--- a/runtime/interpreter/mterp/x86/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/x86/op_sput_boolean.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"MterpSetBooleanStatic"}
+%include "x86/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_byte.S b/runtime/interpreter/mterp/x86/op_sput_byte.S
index 3a5ff9267d..fd504a8023 100644
--- a/runtime/interpreter/mterp/x86/op_sput_byte.S
+++ b/runtime/interpreter/mterp/x86/op_sput_byte.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"MterpSetByteStatic"}
+%include "x86/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_char.S b/runtime/interpreter/mterp/x86/op_sput_char.S
index 565cc2aa0f..b4d0997737 100644
--- a/runtime/interpreter/mterp/x86/op_sput_char.S
+++ b/runtime/interpreter/mterp/x86/op_sput_char.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"MterpSetCharStatic"}
+%include "x86/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_object.S b/runtime/interpreter/mterp/x86/op_sput_object.S
index 0db517723b..941b07201e 100644
--- a/runtime/interpreter/mterp/x86/op_sput_object.S
+++ b/runtime/interpreter/mterp/x86/op_sput_object.S
@@ -6,7 +6,7 @@
movl rINST, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpSputObject)
+ call SYMBOL(MterpSPutObj)
testb %al, %al
jz MterpException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_sput_short.S b/runtime/interpreter/mterp/x86/op_sput_short.S
index 85c344165e..eba01bdfd0 100644
--- a/runtime/interpreter/mterp/x86/op_sput_short.S
+++ b/runtime/interpreter/mterp/x86/op_sput_short.S
@@ -1 +1 @@
-%include "x86/op_sput.S" {"helper":"MterpSetShortStatic"}
+%include "x86/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S
index 8cc7e28554..f58150706e 100644
--- a/runtime/interpreter/mterp/x86/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sput_wide.S
@@ -3,7 +3,7 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
@@ -13,7 +13,7 @@
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSet64Static)
+ call SYMBOL(MterpSPutU64)
testb %al, %al
jnz MterpException
RESTORE_IBASE
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
index 4ebe95e987..0332ce272c 100644
--- a/runtime/interpreter/mterp/x86_64/header.S
+++ b/runtime/interpreter/mterp/x86_64/header.S
@@ -92,6 +92,7 @@ unspecified registers or condition codes.
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $$(value)
#define FUNCTION_TYPE(name)
+ #define OBJECT_TYPE(name)
#define SIZE(start,end)
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
@@ -99,6 +100,7 @@ unspecified registers or condition codes.
#else
#define MACRO_LITERAL(value) $$value
#define FUNCTION_TYPE(name) .type name, @function
+ #define OBJECT_TYPE(name) .type name, @object
#define SIZE(start,end) .size start, .-end
#define SYMBOL(name) name
#define ASM_HIDDEN .hidden
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end.S b/runtime/interpreter/mterp/x86_64/instruction_end.S
index 3a02a212e6..94587f83b7 100644
--- a/runtime/interpreter/mterp/x86_64/instruction_end.S
+++ b/runtime/interpreter/mterp/x86_64/instruction_end.S
@@ -1,3 +1,5 @@
+ OBJECT_TYPE(artMterpAsmInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end_alt.S b/runtime/interpreter/mterp/x86_64/instruction_end_alt.S
index 33c2b8e2a0..7757bce9a7 100644
--- a/runtime/interpreter/mterp/x86_64/instruction_end_alt.S
+++ b/runtime/interpreter/mterp/x86_64/instruction_end_alt.S
@@ -1,3 +1,5 @@
+ OBJECT_TYPE(artMterpAsmAltInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end_sister.S b/runtime/interpreter/mterp/x86_64/instruction_end_sister.S
index ea14b11ede..8eb79accdf 100644
--- a/runtime/interpreter/mterp/x86_64/instruction_end_sister.S
+++ b/runtime/interpreter/mterp/x86_64/instruction_end_sister.S
@@ -1,3 +1,5 @@
+ OBJECT_TYPE(artMterpAsmSisterEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start.S b/runtime/interpreter/mterp/x86_64/instruction_start.S
index ca711de00c..5d29a81993 100644
--- a/runtime/interpreter/mterp/x86_64/instruction_start.S
+++ b/runtime/interpreter/mterp/x86_64/instruction_start.S
@@ -1,4 +1,6 @@
+ OBJECT_TYPE(artMterpAsmInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
.global SYMBOL(artMterpAsmInstructionStart)
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start_alt.S b/runtime/interpreter/mterp/x86_64/instruction_start_alt.S
index 9272a6a7b0..8dcf5bfaf9 100644
--- a/runtime/interpreter/mterp/x86_64/instruction_start_alt.S
+++ b/runtime/interpreter/mterp/x86_64/instruction_start_alt.S
@@ -1,4 +1,6 @@
+ OBJECT_TYPE(artMterpAsmAltInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
.global SYMBOL(artMterpAsmAltInstructionStart)
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start_sister.S b/runtime/interpreter/mterp/x86_64/instruction_start_sister.S
index b9ac994d32..796e98b09a 100644
--- a/runtime/interpreter/mterp/x86_64/instruction_start_sister.S
+++ b/runtime/interpreter/mterp/x86_64/instruction_start_sister.S
@@ -1,4 +1,6 @@
+ OBJECT_TYPE(artMterpAsmSisterStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
.global SYMBOL(artMterpAsmSisterStart)
.text
.balign 4
diff --git a/runtime/interpreter/mterp/x86_64/op_iget.S b/runtime/interpreter/mterp/x86_64/op_iget.S
index ffc14b5d22..5c6cab6acb 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"artGet32InstanceFromMterp", "wide":"0"}
+%default { "is_object":"0", "helper":"MterpIGetU32", "wide":"0"}
/*
* General instance field get.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
index 1379d53cfe..18e9264926 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetBooleanInstanceFromMterp" }
+%include "x86_64/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte.S b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
index 93047ec99c..bec0ad526c 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetByteInstanceFromMterp" }
+%include "x86_64/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char.S b/runtime/interpreter/mterp/x86_64/op_iget_char.S
index 239f0d0bd7..5e22b88129 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_char.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetCharInstanceFromMterp" }
+%include "x86_64/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object.S b/runtime/interpreter/mterp/x86_64/op_iget_object.S
index 2104d2c744..bcef1d2c25 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_object.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_object.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromMterp" }
+%include "x86_64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short.S b/runtime/interpreter/mterp/x86_64/op_iget_short.S
index 3525effe75..14c49f7711 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_short.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGetShortInstanceFromMterp" }
+%include "x86_64/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide.S b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
index 706c44121e..d9d174400c 100644
--- a/runtime/interpreter/mterp/x86_64/op_iget_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
@@ -1 +1 @@
-%include "x86_64/op_iget.S" { "helper":"artGet64InstanceFromMterp", "wide":"1" }
+%include "x86_64/op_iget.S" { "helper":"MterpIGetU64", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput.S b/runtime/interpreter/mterp/x86_64/op_iput.S
index 6b7cb1cc84..12affdbe84 100644
--- a/runtime/interpreter/mterp/x86_64/op_iput.S
+++ b/runtime/interpreter/mterp/x86_64/op_iput.S
@@ -1,11 +1,11 @@
-%default { "handler":"artSet32InstanceFromMterp"}
+%default { "helper":"MterpIPutU32"}
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
- .extern $handler
+ .extern $helper
EXPORT_PC
movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
movzbq rINSTbl, %rcx # rcx<- BA
@@ -14,7 +14,7 @@
andb $$0xf, rINSTbl # rINST<- A
GET_VREG OUT_32_ARG2, rINSTq # fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL($handler)
+ call SYMBOL($helper)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
index cb4b1cde45..06bbd704d7 100644
--- a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "x86_64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte.S b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
index cb4b1cde45..53f9008eb5 100644
--- a/runtime/interpreter/mterp/x86_64/op_iput_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
@@ -1 +1 @@
-%include "x86_64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
+%include "x86_64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char.S b/runtime/interpreter/mterp/x86_64/op_iput_char.S
index b4e147cf5e..4736f5e9d4 100644
--- a/runtime/interpreter/mterp/x86_64/op_iput_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_iput_char.S
@@ -1 +1 @@
-%include "x86_64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "x86_64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object.S b/runtime/interpreter/mterp/x86_64/op_iput_object.S
index 828712d8ba..22648cdde7 100644
--- a/runtime/interpreter/mterp/x86_64/op_iput_object.S
+++ b/runtime/interpreter/mterp/x86_64/op_iput_object.S
@@ -4,7 +4,7 @@
REFRESH_INST ${opnum}
movl rINST, OUT_32_ARG2
movq rSELF, OUT_ARG3
- call SYMBOL(MterpIputObject)
+ call SYMBOL(MterpIPutObj)
testb %al, %al
jz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short.S b/runtime/interpreter/mterp/x86_64/op_iput_short.S
index b4e147cf5e..dca5735963 100644
--- a/runtime/interpreter/mterp/x86_64/op_iput_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_iput_short.S
@@ -1 +1 @@
-%include "x86_64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
+%include "x86_64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide.S b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
index e59717b846..4f8c47c1e6 100644
--- a/runtime/interpreter/mterp/x86_64/op_iput_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
@@ -1,5 +1,5 @@
/* iput-wide vA, vB, field@CCCC */
- .extern artSet64InstanceFromMterp
+ .extern MterpIPutU64
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref CCCC
movzbq rINSTbl, %rcx # rcx <- BA
@@ -8,7 +8,7 @@
andb $$0xf, rINSTbl # rINST <- A
leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[A]
movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(artSet64InstanceFromMterp)
+ call SYMBOL(MterpIPutU64)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S
index e996c77801..c15ac1e280 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget.S
@@ -1,4 +1,4 @@
-%default { "is_object":"0", "helper":"MterpGet32Static", "wide":"0" }
+%default { "is_object":"0", "helper":"MterpSGetU32", "wide":"0" }
/*
* General SGET handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
index ee772ad4e1..e5a4e41995 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"MterpGetBooleanStatic"}
+%include "x86_64/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
index f65ea4951e..4602f7da53 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"MterpGetByteStatic"}
+%include "x86_64/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S
index 3972551bec..a094a542de 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_char.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"MterpGetCharStatic"}
+%include "x86_64/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S
index a0bbfd8d35..94597b187c 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_object.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_object.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"}
+%include "x86_64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S
index df212dc5c9..dee5c247b9 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_short.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"MterpGetShortStatic"}
+%include "x86_64/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
index 1e98e28a92..65ddb8a09c 100644
--- a/runtime/interpreter/mterp/x86_64/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
@@ -1 +1 @@
-%include "x86_64/op_sget.S" {"helper":"MterpGet64Static", "wide":"1"}
+%include "x86_64/op_sget.S" {"helper":"MterpSGetU64", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S
index 9705619900..9a33d521b9 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput.S
@@ -1,4 +1,4 @@
-%default { "helper":"MterpSet32Static"}
+%default { "helper":"MterpSPutU32"}
/*
* General SPUT handler wrapper.
*
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
index 8bf4a62328..ea9acbfdc7 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSetBooleanStatic"}
+%include "x86_64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
index 5bb26ebed5..62c9e205a1 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_byte.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSetByteStatic"}
+%include "x86_64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S
index 42b244e2bb..ab0196e027 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_char.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_char.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSetCharStatic"}
+%include "x86_64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_object.S b/runtime/interpreter/mterp/x86_64/op_sput_object.S
index eb5a37673e..8a47074ec3 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_object.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_object.S
@@ -4,7 +4,7 @@
REFRESH_INST ${opnum}
movq rINSTq, OUT_ARG2
movq rSELF, OUT_ARG3
- call SYMBOL(MterpSputObject)
+ call SYMBOL(MterpSPutObj)
testb %al, %al
jz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S
index 9670092aaf..f73a3fc69e 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_short.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_short.S
@@ -1 +1 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSetShortStatic"}
+%include "x86_64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
index a21bcb5dd5..464d1697ac 100644
--- a/runtime/interpreter/mterp/x86_64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
@@ -3,13 +3,13 @@
*
*/
/* sput-wide vAA, field@BBBB */
- .extern MterpSet64Static
+ .extern MterpSPutU64
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # field ref BBBB
leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSet64Static)
+ call SYMBOL(MterpSPutU64)
testb %al, %al
jnz MterpException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index f76b86c94f..0e4cf27e50 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -159,14 +159,14 @@ class ShadowFrame {
}
int64_t GetVRegLong(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
return *reinterpret_cast<unaligned_int64*>(vreg);
}
double GetVRegDouble(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
typedef const double unaligned_double __attribute__ ((aligned (4)));
return *reinterpret_cast<unaligned_double*>(vreg);
@@ -220,7 +220,7 @@ class ShadowFrame {
}
void SetVRegLong(size_t i, int64_t val) {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
*reinterpret_cast<unaligned_int64*>(vreg) = val;
@@ -233,7 +233,7 @@ class ShadowFrame {
}
void SetVRegDouble(size_t i, double val) {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
typedef double unaligned_double __attribute__ ((aligned (4)));
*reinterpret_cast<unaligned_double*>(vreg) = val;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 74aa787db7..d4b51af903 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -517,24 +517,23 @@ void UnstartedRuntime::UnstartedClassIsAnonymousClass(
result->SetZ(class_name == nullptr);
}
-static std::unique_ptr<MemMap> FindAndExtractEntry(const std::string& jar_file,
- const char* entry_name,
- size_t* size,
- std::string* error_msg) {
+static MemMap FindAndExtractEntry(const std::string& jar_file,
+ const char* entry_name,
+ size_t* size,
+ std::string* error_msg) {
CHECK(size != nullptr);
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(jar_file.c_str(), error_msg));
if (zip_archive == nullptr) {
- return nullptr;
+ return MemMap::Invalid();
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(entry_name, error_msg));
if (zip_entry == nullptr) {
- return nullptr;
+ return MemMap::Invalid();
}
- std::unique_ptr<MemMap> tmp_map(
- zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg));
- if (tmp_map == nullptr) {
- return nullptr;
+ MemMap tmp_map = zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg);
+ if (!tmp_map.IsValid()) {
+ return MemMap::Invalid();
}
// OK, from here everything seems fine.
@@ -577,18 +576,18 @@ static void GetResourceAsStream(Thread* self,
return;
}
- std::unique_ptr<MemMap> mem_map;
+ MemMap mem_map;
size_t map_size;
std::string last_error_msg; // Only store the last message (we could concatenate).
for (const std::string& jar_file : split) {
mem_map = FindAndExtractEntry(jar_file, resource_cstr, &map_size, &last_error_msg);
- if (mem_map != nullptr) {
+ if (mem_map.IsValid()) {
break;
}
}
- if (mem_map == nullptr) {
+ if (!mem_map.IsValid()) {
// Didn't find it. There's a good chance this will be the same at runtime, but still
// conservatively abort the transaction here.
AbortTransactionOrFail(self,
@@ -607,9 +606,9 @@ static void GetResourceAsStream(Thread* self,
return;
}
// Copy in content.
- memcpy(h_array->GetData(), mem_map->Begin(), map_size);
+ memcpy(h_array->GetData(), mem_map.Begin(), map_size);
// Be proactive releasing memory.
- mem_map.reset();
+ mem_map.Reset();
// Create a ByteArrayInputStream.
Handle<mirror::Class> h_class(hs.NewHandle(
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 25ac6e2a31..452a76b89a 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -27,12 +27,12 @@ namespace art {
class StackVisitor;
-class JavaFrameRootInfo FINAL : public RootInfo {
+class JavaFrameRootInfo final : public RootInfo {
public:
JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
: RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
}
- void Describe(std::ostream& os) const OVERRIDE
+ void Describe(std::ostream& os) const override
REQUIRES_SHARED(Locks::mutator_lock_);
size_t GetVReg() const {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ed449b5433..a6bc029828 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -399,7 +399,7 @@ void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
struct CollectClasses : public ClassVisitor {
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
classes_.push_back(klass.Ptr());
return true;
}
@@ -576,7 +576,7 @@ void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) {
memory_use_.AddValue(bytes);
}
-class JitCompileTask FINAL : public Task {
+class JitCompileTask final : public Task {
public:
enum TaskKind {
kAllocateProfile,
@@ -596,7 +596,7 @@ class JitCompileTask FINAL : public Task {
soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
ScopedObjectAccess soa(self);
if (kind_ == kCompile) {
Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
@@ -611,7 +611,7 @@ class JitCompileTask FINAL : public Task {
ProfileSaver::NotifyJitActivity();
}
- void Finalize() OVERRIDE {
+ void Finalize() override {
delete this;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b92affa26e..2b2898c195 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -168,11 +168,6 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
ScopedTrace trace(__PRETTY_FUNCTION__);
CHECK_GE(max_capacity, initial_capacity);
- // Generating debug information is for using the Linux perf tool on
- // host which does not work with ashmem.
- // Also, targets linux and fuchsia do not support ashmem.
- bool use_ashmem = !generate_debug_info && !kIsTargetLinux && !kIsTargetFuchsia;
-
// With 'perf', we want a 1-1 mapping between an address and a method.
// We aren't able to keep method pointers live during the instrumentation method entry trampoline
// so we will just disable jit-gc if we are doing that.
@@ -205,15 +200,16 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// We could do PC-relative addressing to avoid this problem, but that
// would require reserving code and data area before submitting, which
// means more windows for the code memory to be RWX.
- std::unique_ptr<MemMap> data_map(MemMap::MapAnonymous(
- "data-code-cache", nullptr,
+ MemMap data_map = MemMap::MapAnonymous(
+ "data-code-cache",
+ /* addr */ nullptr,
max_capacity,
kProtData,
/* low_4gb */ true,
/* reuse */ false,
- &error_str,
- use_ashmem));
- if (data_map == nullptr) {
+ /* reservation */ nullptr,
+ &error_str);
+ if (!data_map.IsValid()) {
std::ostringstream oss;
oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
@@ -229,26 +225,23 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t data_size = max_capacity / 2;
size_t code_size = max_capacity - data_size;
DCHECK_EQ(code_size + data_size, max_capacity);
- uint8_t* divider = data_map->Begin() + data_size;
-
- MemMap* code_map = data_map->RemapAtEnd(
- divider,
- "jit-code-cache",
- memmap_flags_prot_code | PROT_WRITE,
- &error_str, use_ashmem);
- if (code_map == nullptr) {
+ uint8_t* divider = data_map.Begin() + data_size;
+
+ MemMap code_map = data_map.RemapAtEnd(
+ divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str);
+ if (!code_map.IsValid()) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
return nullptr;
}
- DCHECK_EQ(code_map->Begin(), divider);
+ DCHECK_EQ(code_map.Begin(), divider);
data_size = initial_capacity / 2;
code_size = initial_capacity - data_size;
DCHECK_EQ(code_size + data_size, initial_capacity);
return new JitCodeCache(
- code_map,
- data_map.release(),
+ std::move(code_map),
+ std::move(data_map),
code_size,
data_size,
max_capacity,
@@ -256,8 +249,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
memmap_flags_prot_code);
}
-JitCodeCache::JitCodeCache(MemMap* code_map,
- MemMap* data_map,
+JitCodeCache::JitCodeCache(MemMap&& code_map,
+ MemMap&& data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -266,8 +259,8 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
- code_map_(code_map),
- data_map_(data_map),
+ code_map_(std::move(code_map)),
+ data_map_(std::move(data_map)),
max_capacity_(max_capacity),
current_capacity_(initial_code_capacity + initial_data_capacity),
code_end_(initial_code_capacity),
@@ -287,8 +280,8 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
memmap_flags_prot_code_(memmap_flags_prot_code) {
DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
- code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
- data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
+ code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
+ data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
PLOG(FATAL) << "create_mspace_with_base failed";
@@ -298,13 +291,13 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
CheckedCall(mprotect,
"mprotect jit code cache",
- code_map_->Begin(),
- code_map_->Size(),
+ code_map_.Begin(),
+ code_map_.Size(),
memmap_flags_prot_code_);
CheckedCall(mprotect,
"mprotect jit data cache",
- data_map_->Begin(),
- data_map_->Size(),
+ data_map_.Begin(),
+ data_map_.Size(),
kProtData);
VLOG(jit) << "Created jit code cache: initial data size="
@@ -316,7 +309,7 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
JitCodeCache::~JitCodeCache() {}
bool JitCodeCache::ContainsPc(const void* ptr) const {
- return code_map_->Begin() <= ptr && ptr < code_map_->End();
+ return code_map_.Begin() <= ptr && ptr < code_map_.End();
}
bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -387,8 +380,8 @@ class ScopedCodeCacheWrite : ScopedTrace {
CheckedCall(
mprotect,
"make code writable",
- code_cache_->code_map_->Begin(),
- code_cache_->code_map_->Size(),
+ code_cache_->code_map_.Begin(),
+ code_cache_->code_map_.Size(),
code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
}
@@ -397,8 +390,8 @@ class ScopedCodeCacheWrite : ScopedTrace {
CheckedCall(
mprotect,
"make code protected",
- code_cache_->code_map_->Begin(),
- code_cache_->code_map_->Size(),
+ code_cache_->code_map_.Begin(),
+ code_cache_->code_map_.Size(),
code_cache_->memmap_flags_prot_code_);
}
@@ -608,17 +601,17 @@ void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
void JitCodeCache::FreeAllMethodHeaders(
const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
- {
- MutexLock mu(Thread::Current(), *Locks::cha_lock_);
- Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
- ->RemoveDependentsWithMethodHeaders(method_headers);
- }
-
// We need to remove entries in method_headers from CHA dependencies
// first since once we do FreeCode() below, the memory can be reused
// so it's possible for the same method_header to start representing
// different compile code.
MutexLock mu(Thread::Current(), lock_);
+ {
+ MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
+ Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
+ ->RemoveDependentsWithMethodHeaders(method_headers);
+ }
+
ScopedCodeCacheWrite scc(this);
for (const OatQuickMethodHeader* method_header : method_headers) {
FreeCodeAndData(method_header->GetCode());
@@ -742,6 +735,18 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
}
+void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
+ while (collection_in_progress_) {
+ lock_.Unlock(self);
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ }
+ lock_.Lock(self);
+ }
+}
+
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
@@ -755,6 +760,13 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
const ArenaSet<ArtMethod*>&
cha_single_implementation_list) {
DCHECK(!method->IsNative() || !osr);
+
+ if (!method->IsNative()) {
+ // We need to do this before grabbing the lock_ because it needs to be able to see the string
+ // InternTable. Native methods do not have roots.
+ DCheckRootsAreValid(roots);
+ }
+
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -763,44 +775,45 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
uint8_t* memory = nullptr;
+ MutexLock mu(self, lock_);
+ // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
+ // finish.
+ WaitForPotentialCollectionToCompleteRunnable(self);
{
- ScopedThreadSuspension sts(self, kSuspended);
- MutexLock mu(self, lock_);
- WaitForPotentialCollectionToComplete(self);
- {
- ScopedCodeCacheWrite scc(this);
- memory = AllocateCode(total_size);
- if (memory == nullptr) {
- return nullptr;
- }
- code_ptr = memory + header_size;
-
- std::copy(code, code + code_size, code_ptr);
- method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- new (method_header) OatQuickMethodHeader(
- (stack_map != nullptr) ? code_ptr - stack_map : 0u,
- code_size);
- // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
- // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
- // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
- // 6P) stop being supported or their kernels are fixed.
- //
- // For reference, this behavior is caused by this commit:
- // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
- FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
- reinterpret_cast<char*>(code_ptr + code_size));
- DCHECK(!Runtime::Current()->IsAotCompiler());
- if (has_should_deoptimize_flag) {
- method_header->SetHasShouldDeoptimizeFlag();
- }
+ ScopedCodeCacheWrite scc(this);
+ memory = AllocateCode(total_size);
+ if (memory == nullptr) {
+ return nullptr;
+ }
+ code_ptr = memory + header_size;
+
+ std::copy(code, code + code_size, code_ptr);
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ new (method_header) OatQuickMethodHeader(
+ (stack_map != nullptr) ? code_ptr - stack_map : 0u,
+ code_size);
+ // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
+ // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
+ // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
+ // 6P) stop being supported or their kernels are fixed.
+ //
+ // For reference, this behavior is caused by this commit:
+ // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+ FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
+ reinterpret_cast<char*>(code_ptr + code_size));
+ DCHECK(!Runtime::Current()->IsAotCompiler());
+ if (has_should_deoptimize_flag) {
+ method_header->SetHasShouldDeoptimizeFlag();
}
number_of_compilations_++;
}
// We need to update the entry point in the runnable state for the instrumentation.
{
- // Need cha_lock_ for checking all single-implementation flags and register
- // dependencies.
+ // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
+ // compiled code is considered invalidated by some class linking, but below we still make the
+ // compiled code valid for the method. Need cha_lock_ for checking all single-implementation
+ // flags and register dependencies.
MutexLock cha_mu(self, *Locks::cha_lock_);
bool single_impl_still_valid = true;
for (ArtMethod* single_impl : cha_single_implementation_list) {
@@ -826,16 +839,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
single_impl, method, method_header);
}
- if (!method->IsNative()) {
- // We need to do this before grabbing the lock_ because it needs to be able to see the string
- // InternTable. Native methods do not have roots.
- DCheckRootsAreValid(roots);
- }
-
- // The following needs to be guarded by cha_lock_ also. Otherwise it's
- // possible that the compiled code is considered invalidated by some class linking,
- // but below we still make the compiled code valid for the method.
- MutexLock mu(self, lock_);
if (UNLIKELY(method->IsNative())) {
auto it = jni_stubs_map_.find(JniStubKey(method));
DCHECK(it != jni_stubs_map_.end())
@@ -867,11 +870,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
method, method_header->GetEntryPoint());
}
}
- if (collection_in_progress_) {
- // We need to update the live bitmap if there is a GC to ensure it sees this new
- // code.
- GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
- }
VLOG(jit)
<< "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
<< ArtMethod::PrettyMethod(method) << "@" << method
@@ -1085,14 +1083,14 @@ size_t JitCodeCache::ReserveData(Thread* self,
}
}
-class MarkCodeVisitor FINAL : public StackVisitor {
+class MarkCodeVisitor final : public StackVisitor {
public:
MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
code_cache_(code_cache_in),
bitmap_(code_cache_->GetLiveBitmap()) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
if (method_header == nullptr) {
return true;
@@ -1110,12 +1108,12 @@ class MarkCodeVisitor FINAL : public StackVisitor {
CodeCacheBitmap* const bitmap_;
};
-class MarkCodeClosure FINAL : public Closure {
+class MarkCodeClosure final : public Closure {
public:
MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
: code_cache_(code_cache), barrier_(barrier) {}
- void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
MarkCodeVisitor visitor(thread, code_cache_);
@@ -1232,8 +1230,8 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
number_of_collections_++;
live_bitmap_.reset(CodeCacheBitmap::Create(
"code-cache-bitmap",
- reinterpret_cast<uintptr_t>(code_map_->Begin()),
- reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+ reinterpret_cast<uintptr_t>(code_map_.Begin()),
+ reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
collection_in_progress_ = true;
}
}
@@ -1605,12 +1603,12 @@ void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_S
if (code_mspace_ == mspace) {
size_t result = code_end_;
code_end_ += increment;
- return reinterpret_cast<void*>(result + code_map_->Begin());
+ return reinterpret_cast<void*>(result + code_map_.Begin());
} else {
DCHECK_EQ(data_mspace_, mspace);
size_t result = data_end_;
data_end_ += increment;
- return reinterpret_cast<void*>(result + data_map_->Begin());
+ return reinterpret_cast<void*>(result + data_map_.Begin());
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 29f9c9cf43..a4a0f8f4e8 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -28,6 +28,7 @@
#include "base/atomic.h"
#include "base/histogram.h"
#include "base/macros.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "base/safe_map.h"
@@ -39,7 +40,6 @@ class LinearAlloc;
class InlineCache;
class IsMarkedVisitor;
class JitJniStubTestHelper;
-class MemMap;
class OatQuickMethodHeader;
struct ProfileMethodInfo;
class ProfilingInfo;
@@ -279,8 +279,8 @@ class JitCodeCache {
private:
// Take ownership of maps.
- JitCodeCache(MemMap* code_map,
- MemMap* data_map,
+ JitCodeCache(MemMap&& code_map,
+ MemMap&& data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -314,6 +314,12 @@ class JitCodeCache {
REQUIRES(lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
+ // The non-mutator lock version should be used if possible. This method will release then
+ // re-acquire the mutator lock.
+ void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
+ REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
bool WaitForPotentialCollectionToComplete(Thread* self)
@@ -390,9 +396,9 @@ class JitCodeCache {
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(lock_);
// Mem map which holds code.
- std::unique_ptr<MemMap> code_map_;
+ MemMap code_map_;
// Mem map which holds data (stack maps and profiling info).
- std::unique_ptr<MemMap> data_map_;
+ MemMap data_map_;
// The opaque mspace for allocating code.
void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 6ccda8b0bb..d9ef922390 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -255,7 +255,7 @@ class GetClassLoadersVisitor : public ClassLoaderVisitor {
class_loaders_(class_loaders) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
class_loaders_->push_back(hs_->NewHandle(class_loader));
}
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index 8424610cf8..f695c8fd9b 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -40,7 +40,7 @@ static constexpr size_t kMaxMethodIds = 65535;
class ProfileCompilationInfoTest : public CommonRuntimeTest {
public:
- void PostRuntimeCreate() OVERRIDE {
+ void PostRuntimeCreate() override {
allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
}
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index 74e4a30905..4049c6e699 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -34,7 +34,7 @@ class JavaVmExtTest : public CommonRuntimeTest {
}
- virtual void TearDown() OVERRIDE {
+ void TearDown() override {
CommonRuntimeTest::TearDown();
}
@@ -137,7 +137,7 @@ TEST_F(JavaVmExtTest, DetachCurrentThread) {
class JavaVmExtStackTraceTest : public JavaVmExtTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr);
}
};
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a25049e681..3040b905ec 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -84,7 +84,7 @@ class JniInternalTest : public CommonCompilerTest {
}
}
- virtual void TearDown() OVERRIDE {
+ void TearDown() override {
CleanUpJniEnv();
CommonCompilerTest::TearDown();
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index bc72517a06..51dc1a4627 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -32,12 +32,12 @@
#include "dex_cache.h"
#include "gc/heap-inl.h"
#include "iftable.h"
-#include "subtype_check.h"
#include "object-inl.h"
#include "object_array.h"
#include "read_barrier-inl.h"
#include "runtime.h"
#include "string.h"
+#include "subtype_check.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c3e167c306..811ee515d3 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -63,7 +63,7 @@ using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
// C++ mirror of java.lang.Class
-class MANAGED Class FINAL : public Object {
+class MANAGED Class final : public Object {
public:
// A magic value for reference_instance_offsets_. Ignore the bits and walk the super chain when
// this is the value.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 87f4f0ab7b..8401b66ee4 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -141,7 +141,7 @@ using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
// C++ mirror of java.lang.DexCache.
-class MANAGED DexCache FINAL : public Object {
+class MANAGED DexCache final : public Object {
public:
// Size of java.lang.DexCache.class.
static uint32_t ClassSize(PointerSize pointer_size);
@@ -157,12 +157,12 @@ class MANAGED DexCache FINAL : public Object {
"String dex cache size is not a power of 2.");
// Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
- static constexpr size_t kDexCacheFieldCacheSize = 512;
+ static constexpr size_t kDexCacheFieldCacheSize = 1024;
static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
"Field dex cache size is not a power of 2.");
// Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
- static constexpr size_t kDexCacheMethodCacheSize = 512;
+ static constexpr size_t kDexCacheMethodCacheSize = 1024;
static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
"Method dex cache size is not a power of 2.");
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 7a70cae1ef..e9e7ca8688 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,7 +34,7 @@ class DexCacheTest : public CommonRuntimeTest {};
class DexCacheMethodHandlesTest : public DexCacheTest {
protected:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
}
};
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d72c7866c5..9e3c9af86d 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -23,7 +23,7 @@
namespace art {
namespace mirror {
-class MANAGED IfTable FINAL : public ObjectArray<Object> {
+class MANAGED IfTable final : public ObjectArray<Object> {
public:
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/mirror/proxy.h b/runtime/mirror/proxy.h
index db511d6425..7775de35d2 100644
--- a/runtime/mirror/proxy.h
+++ b/runtime/mirror/proxy.h
@@ -26,7 +26,7 @@ struct ProxyOffsets;
namespace mirror {
// C++ mirror of java.lang.reflect.Proxy.
-class MANAGED Proxy FINAL : public Object {
+class MANAGED Proxy final : public Object {
private:
HeapReference<Object> h_;
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 55a2ef0b49..37ac57587f 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -27,7 +27,7 @@ struct StackTraceElementOffsets;
namespace mirror {
// C++ mirror of java.lang.StackTraceElement
-class MANAGED StackTraceElement FINAL : public Object {
+class MANAGED StackTraceElement final : public Object {
public:
String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 0e2fc903b5..d08717ca82 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -40,7 +40,7 @@ enum class StringCompressionFlag : uint32_t {
};
// C++ mirror of java.lang.String
-class MANAGED String FINAL : public Object {
+class MANAGED String final : public Object {
public:
// Size of java.lang.String.class.
static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 56c953b816..864e1eab73 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -353,7 +353,7 @@ inline void StoreResult(ObjPtr<Object> value, JValue* result)
//
template <typename T>
-class JValueByteSwapper FINAL {
+class JValueByteSwapper final {
public:
static void ByteSwap(JValue* value);
static void MaybeByteSwap(bool byte_swap, JValue* value) {
@@ -392,7 +392,7 @@ class AtomicGetAccessor : public Object::Accessor<T> {
public:
explicit AtomicGetAccessor(JValue* result) : result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
StoreResult(atom->load(MO), result_);
}
@@ -406,7 +406,7 @@ class AtomicSetAccessor : public Object::Accessor<T> {
public:
explicit AtomicSetAccessor(T new_value) : new_value_(new_value) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
atom->store(new_value_, MO);
}
@@ -431,7 +431,7 @@ class AtomicStrongCompareAndSetAccessor : public Object::Accessor<T> {
AtomicStrongCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
bool success = atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -453,7 +453,7 @@ class AtomicStrongCompareAndExchangeAccessor : public Object::Accessor<T> {
AtomicStrongCompareAndExchangeAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
StoreResult(expected_value_, result_);
@@ -475,7 +475,7 @@ class AtomicWeakCompareAndSetAccessor : public Object::Accessor<T> {
AtomicWeakCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
: expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
bool success = atom->compare_exchange_weak(expected_value_, desired_value_, MOS, MOF);
StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -496,7 +496,7 @@ class AtomicGetAndSetAccessor : public Object::Accessor<T> {
public:
AtomicGetAndSetAccessor(T new_value, JValue* result) : new_value_(new_value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->exchange(new_value_, MO);
StoreResult(old_value, result_);
@@ -540,7 +540,7 @@ class AtomicGetAndAddAccessor : public Object::Accessor<T> {
public:
AtomicGetAndAddAccessor(T addend, JValue* result) : addend_(addend), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
constexpr bool kIsFloatingPoint = std::is_floating_point<T>::value;
T old_value = AtomicGetAndAddOperator<T, kIsFloatingPoint, MO>::Apply(addr, addend_);
StoreResult(old_value, result_);
@@ -562,7 +562,7 @@ class AtomicGetAndAddWithByteSwapAccessor : public Object::Accessor<T> {
public:
AtomicGetAndAddWithByteSwapAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* const atom = reinterpret_cast<std::atomic<T>*>(addr);
T current_value = atom->load(std::memory_order_relaxed);
T sum;
@@ -591,7 +591,7 @@ class AtomicGetAndBitwiseOrAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseOrAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_or(value_, MO);
StoreResult(old_value, result_);
@@ -610,7 +610,7 @@ class AtomicGetAndBitwiseAndAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseAndAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_and(value_, MO);
StoreResult(old_value, result_);
@@ -630,7 +630,7 @@ class AtomicGetAndBitwiseXorAccessor : public Object::Accessor<T> {
public:
AtomicGetAndBitwiseXorAccessor(T value, JValue* result) : value_(value), result_(result) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
T old_value = atom->fetch_xor(value_, MO);
StoreResult(old_value, result_);
@@ -679,7 +679,7 @@ class TypeAdaptorAccessor : public Object::Accessor<T> {
explicit TypeAdaptorAccessor(Object::Accessor<U>* inner_accessor)
: inner_accessor_(inner_accessor) {}
- void Access(T* addr) OVERRIDE {
+ void Access(T* addr) override {
static_assert(sizeof(T) == sizeof(U), "bad conversion");
inner_accessor_->Access(reinterpret_cast<U*>(addr));
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index d47bc0d12e..72eced2333 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -184,7 +184,7 @@ bool Monitor::Install(Thread* self) {
if (locking_method_ != nullptr && UNLIKELY(locking_method_->IsProxyMethod())) {
// Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
// enough that it's OK to walk the stack twice.
- struct NextMethodVisitor FINAL : public StackVisitor {
+ struct NextMethodVisitor final : public StackVisitor {
explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread,
nullptr,
@@ -193,7 +193,7 @@ bool Monitor::Install(Thread* self) {
count_(0),
method_(nullptr),
dex_pc_(0) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -271,7 +271,7 @@ void Monitor::SetObject(mirror::Object* object) {
// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
-struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
+struct NthCallerWithDexPcVisitor final : public StackVisitor {
explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -279,7 +279,7 @@ struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
dex_pc_(0),
current_frame_number_(0),
wanted_frame_number_(frame) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr || m->IsRuntimeMethod()) {
// Runtime method, upcall, or resolution issue. Skip.
@@ -514,7 +514,7 @@ void Monitor::Lock(Thread* self) {
if (should_dump_stacks) {
// Very long contention. Dump stacks.
struct CollectStackTrace : public Closure {
- void Run(art::Thread* thread) OVERRIDE
+ void Run(art::Thread* thread) override
REQUIRES_SHARED(art::Locks::mutator_lock_) {
thread->DumpJavaStack(oss);
}
@@ -1574,7 +1574,7 @@ class MonitorDeflateVisitor : public IsMarkedVisitor {
public:
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
- virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+ mirror::Object* IsMarked(mirror::Object* object) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Monitor::Deflate(self_, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index 5c962c3b26..c943402126 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -54,7 +54,7 @@ class MonitorObjectsStackVisitor : public StackVisitor {
kEndStackWalk,
};
- bool VisitFrame() FINAL REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() final REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index bff8d7678c..c88748ffb8 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -34,7 +34,7 @@ namespace art {
class MonitorTest : public CommonRuntimeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
// Use a smaller heap
SetUpRuntimeOptionsForFillHeap(options);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index b598df3eba..71fabd0250 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -163,33 +163,33 @@ class NullableScopedUtfChars {
void operator=(const NullableScopedUtfChars&);
};
-static std::unique_ptr<MemMap> AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
+static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
if (end <= start) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("Bad range");
- return nullptr;
+ return MemMap::Invalid();
}
std::string error_message;
size_t length = static_cast<size_t>(end - start);
- std::unique_ptr<MemMap> dex_mem_map(MemMap::MapAnonymous("DEX data",
- nullptr,
- length,
- PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
- /* reuse */ false,
- &error_message));
- if (dex_mem_map == nullptr) {
+ MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
+ /* addr */ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_message);
+ if (!dex_mem_map.IsValid()) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("%s", error_message.c_str());
+ return MemMap::Invalid();
}
return dex_mem_map;
}
-static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem_map) {
+static const DexFile* CreateDexFile(JNIEnv* env, MemMap&& dex_mem_map) {
std::string location = StringPrintf("Anonymous-DexFile@%p-%p",
- dex_mem_map->Begin(),
- dex_mem_map->End());
+ dex_mem_map.Begin(),
+ dex_mem_map.End());
std::string error_message;
const ArtDexFileLoader dex_file_loader;
std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
@@ -213,7 +213,7 @@ static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem
return dex_file.release();
}
-static jobject CreateSingleDexFileCookie(JNIEnv* env, std::unique_ptr<MemMap> data) {
+static jobject CreateSingleDexFileCookie(JNIEnv* env, MemMap&& data) {
std::unique_ptr<const DexFile> dex_file(CreateDexFile(env, std::move(data)));
if (dex_file.get() == nullptr) {
DCHECK(env->ExceptionCheck());
@@ -236,14 +236,14 @@ static jobject DexFile_createCookieWithDirectBuffer(JNIEnv* env,
return nullptr;
}
- std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
- if (dex_mem_map == nullptr) {
+ MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+ if (!dex_mem_map.IsValid()) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
size_t length = static_cast<size_t>(end - start);
- memcpy(dex_mem_map->Begin(), base_address, length);
+ memcpy(dex_mem_map.Begin(), base_address, length);
return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
}
@@ -252,13 +252,13 @@ static jobject DexFile_createCookieWithArray(JNIEnv* env,
jbyteArray buffer,
jint start,
jint end) {
- std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
- if (dex_mem_map == nullptr) {
+ MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+ if (!dex_mem_map.IsValid()) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
- auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
+ auto destination = reinterpret_cast<jbyte*>(dex_mem_map.Begin());
env->GetByteArrayRegion(buffer, start, end - start, destination);
return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
}
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 7ac4086362..6f98a6d381 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -207,7 +207,7 @@ static void VMDebug_printLoadedClasses(JNIEnv* env, jclass, jint flags) {
public:
explicit DumpClassVisitor(int dump_flags) : flags_(dump_flags) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
klass->DumpClass(LOG_STREAM(ERROR), flags_);
return true;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 9b3fd16ac0..0e619407e5 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -332,7 +332,7 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::String> string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 5b47eaca86..72dae4791c 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -120,9 +120,9 @@ static void DoCollectNonDebuggableCallback(Thread* thread, void* data)
: StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_set_(class_set) {}
- ~NonDebuggableStacksVisitor() OVERRIDE {}
+ ~NonDebuggableStacksVisitor() override {}
- bool VisitFrame() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true;
}
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 9c777cc277..496a6f3d09 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -21,22 +21,22 @@
namespace art {
-class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
+class NoopCompilerCallbacks final : public CompilerCallbacks {
public:
NoopCompilerCallbacks() : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp) {}
~NoopCompilerCallbacks() {}
- void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
+ void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {
}
- void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
// This is only used by compilers which need to be able to run without relocation even when it
// would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
// to disable the relocation since both deal with writing out the images directly.
- bool IsRelocationPossible() OVERRIDE { return false; }
+ bool IsRelocationPossible() override { return false; }
- verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return nullptr; }
+ verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
private:
DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 58e16ed1b7..1c8de8fa79 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -102,12 +102,12 @@ class OatFileBase : public OatFile {
const std::string& elf_filename,
const std::string& location,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg);
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg);
template <typename kOatFileBaseSubType>
static OatFileBase* OpenOatFile(int zip_fd,
@@ -116,12 +116,12 @@ class OatFileBase : public OatFile {
const std::string& vdex_filename,
const std::string& oat_filename,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg);
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg);
protected:
OatFileBase(const std::string& filename, bool executable) : OatFile(filename, executable) {}
@@ -143,18 +143,18 @@ class OatFileBase : public OatFile {
std::string* error_msg);
virtual bool Load(const std::string& elf_filename,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) = 0;
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg) = 0;
virtual bool Load(int oat_fd,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) = 0;
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg) = 0;
bool ComputeFields(uint8_t* requested_base,
const std::string& file_path,
@@ -188,21 +188,21 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
const std::string& elf_filename,
const std::string& location,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(location, executable));
ret->PreLoad();
if (!ret->Load(elf_filename,
- oat_file_begin,
writable,
executable,
low_4gb,
+ reservation,
error_msg)) {
return nullptr;
}
@@ -231,19 +231,19 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
const std::string& vdex_location,
const std::string& oat_location,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(oat_location, executable));
if (!ret->Load(oat_fd,
- oat_file_begin,
writable,
executable,
low_4gb,
+ reservation,
error_msg)) {
return nullptr;
}
@@ -889,7 +889,7 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e
// OatFile via dlopen //
////////////////////////
-class DlOpenOatFile FINAL : public OatFileBase {
+class DlOpenOatFile final : public OatFileBase {
public:
DlOpenOatFile(const std::string& filename, bool executable)
: OatFileBase(filename, executable),
@@ -911,7 +911,7 @@ class DlOpenOatFile FINAL : public OatFileBase {
protected:
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
const uint8_t* ptr =
reinterpret_cast<const uint8_t*>(dlsym(dlopen_handle_, symbol_name.c_str()));
if (ptr == nullptr) {
@@ -920,26 +920,31 @@ class DlOpenOatFile FINAL : public OatFileBase {
return ptr;
}
- void PreLoad() OVERRIDE;
+ void PreLoad() override;
bool Load(const std::string& elf_filename,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
-
- bool Load(int, uint8_t*, bool, bool, bool, std::string*) {
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg) override;
+
+ bool Load(int oat_fd ATTRIBUTE_UNUSED,
+ bool writable ATTRIBUTE_UNUSED,
+ bool executable ATTRIBUTE_UNUSED,
+ bool low_4gb ATTRIBUTE_UNUSED,
+ /*inout*/MemMap* reservation ATTRIBUTE_UNUSED,
+ /*out*/std::string* error_msg ATTRIBUTE_UNUSED) override {
return false;
}
// Ask the linker where it mmaped the file and notify our mmap wrapper of the regions.
- void PreSetup(const std::string& elf_filename) OVERRIDE;
+ void PreSetup(const std::string& elf_filename) override;
private:
bool Dlopen(const std::string& elf_filename,
- uint8_t* oat_file_begin,
- std::string* error_msg);
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg);
// On the host, if the same library is loaded again with dlopen the same
// file handle is returned. This differs from the behavior of dlopen on the
@@ -952,12 +957,13 @@ class DlOpenOatFile FINAL : public OatFileBase {
// Guarded by host_dlopen_handles_lock_;
static std::unordered_set<void*> host_dlopen_handles_;
+ // Reservation and dummy memory map objects corresponding to the regions mapped by dlopen.
+ // Note: Must be destroyed after dlclose() as it can hold the owning reservation.
+ std::vector<MemMap> dlopen_mmaps_;
+
// dlopen handle during runtime.
void* dlopen_handle_; // TODO: Unique_ptr with custom deleter.
- // Dummy memory map objects corresponding to the regions mapped by dlopen.
- std::vector<std::unique_ptr<MemMap>> dlopen_mmaps_;
-
// The number of shared objects the linker told us about before loading. Used to
// (optimistically) optimize the PreSetup stage (see comment there).
size_t shared_objects_before_;
@@ -975,9 +981,9 @@ void DlOpenOatFile::PreLoad() {
#else
// Count the entries in dl_iterate_phdr we get at this point in time.
struct dl_iterate_context {
- static int callback(struct dl_phdr_info *info ATTRIBUTE_UNUSED,
+ static int callback(dl_phdr_info* info ATTRIBUTE_UNUSED,
size_t size ATTRIBUTE_UNUSED,
- void *data) {
+ void* data) {
reinterpret_cast<dl_iterate_context*>(data)->count++;
return 0; // Continue iteration.
}
@@ -990,11 +996,11 @@ void DlOpenOatFile::PreLoad() {
}
bool DlOpenOatFile::Load(const std::string& elf_filename,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg) {
// Use dlopen only when flagged to do so, and when it's OK to load things executable.
// TODO: Also try when not executable? The issue here could be re-mapping as writable (as
// !executable is a sign that we may want to patch), which may not be allowed for
@@ -1027,19 +1033,19 @@ bool DlOpenOatFile::Load(const std::string& elf_filename,
}
}
- bool success = Dlopen(elf_filename, oat_file_begin, error_msg);
+ bool success = Dlopen(elf_filename, reservation, error_msg);
DCHECK(dlopen_handle_ != nullptr || !success);
return success;
}
bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
- uint8_t* oat_file_begin,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
#ifdef __APPLE__
// The dl_iterate_phdr syscall is missing. There is similar API on OSX,
// but let's fallback to the custom loading code for the time being.
- UNUSED(elf_filename, oat_file_begin);
+ UNUSED(elf_filename, reservation);
*error_msg = "Dlopen unsupported on Mac.";
return false;
#else
@@ -1056,15 +1062,85 @@ bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
// times).
ANDROID_DLEXT_FORCE_FIXED_VADDR; // Take a non-zero vaddr as absolute
// (non-pic boot image).
- if (oat_file_begin != nullptr) { //
- extinfo.flags |= ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS; // Use the requested addr if
- extinfo.reserved_addr = oat_file_begin; // vaddr = 0.
- } // (pic boot image).
+ if (reservation != nullptr) {
+ if (!reservation->IsValid()) {
+ *error_msg = StringPrintf("Invalid reservation for %s", elf_filename.c_str());
+ return false;
+ }
+ extinfo.flags |= ANDROID_DLEXT_RESERVED_ADDRESS; // Use the reserved memory range.
+ extinfo.reserved_addr = reservation->Begin();
+ extinfo.reserved_size = reservation->Size();
+ }
dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);
+ if (reservation != nullptr && dlopen_handle_ != nullptr) {
+ // Find used pages from the reservation.
+ struct dl_iterate_context {
+ static int callback(dl_phdr_info* info, size_t size ATTRIBUTE_UNUSED, void* data) {
+ auto* context = reinterpret_cast<dl_iterate_context*>(data);
+ static_assert(std::is_same<Elf32_Half, Elf64_Half>::value, "Half must match");
+ using Elf_Half = Elf64_Half;
+
+ // See whether this callback corresponds to the file which we have just loaded.
+ uint8_t* reservation_begin = context->reservation->Begin();
+ bool contained_in_reservation = false;
+ for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
+ if (info->dlpi_phdr[i].p_type == PT_LOAD) {
+ uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr);
+ size_t memsz = info->dlpi_phdr[i].p_memsz;
+ size_t offset = static_cast<size_t>(vaddr - reservation_begin);
+ if (offset < context->reservation->Size()) {
+ contained_in_reservation = true;
+ DCHECK_LE(memsz, context->reservation->Size() - offset);
+ } else if (vaddr < reservation_begin) {
+ // Check that there's no overlap with the reservation.
+ DCHECK_LE(memsz, static_cast<size_t>(reservation_begin - vaddr));
+ }
+ break; // It is sufficient to check the first PT_LOAD header.
+ }
+ }
+
+ if (contained_in_reservation) {
+ for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
+ if (info->dlpi_phdr[i].p_type == PT_LOAD) {
+ uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr);
+ size_t memsz = info->dlpi_phdr[i].p_memsz;
+ size_t offset = static_cast<size_t>(vaddr - reservation_begin);
+ DCHECK_LT(offset, context->reservation->Size());
+ DCHECK_LE(memsz, context->reservation->Size() - offset);
+ context->max_size = std::max(context->max_size, offset + memsz);
+ }
+ }
+
+ return 1; // Stop iteration and return 1 from dl_iterate_phdr.
+ }
+ return 0; // Continue iteration and return 0 from dl_iterate_phdr when finished.
+ }
+
+ const MemMap* const reservation;
+ size_t max_size = 0u;
+ };
+ dl_iterate_context context = { reservation };
+
+ if (dl_iterate_phdr(dl_iterate_context::callback, &context) == 0) {
+ LOG(FATAL) << "Could not find the shared object mmapped to the reservation.";
+ UNREACHABLE();
+ }
+
+ // Take ownership of the memory used by the shared object. dlopen() does not assume
+ // full ownership of this memory and dlclose() shall just remap it as zero pages with
+ // PROT_NONE. We need to unmap the memory when destroying this oat file.
+ dlopen_mmaps_.push_back(reservation->TakeReservedMemory(context.max_size));
+ }
#else
- UNUSED(oat_file_begin);
static_assert(!kIsTargetBuild || kIsTargetLinux || kIsTargetFuchsia,
"host_dlopen_handles_ will leak handles");
+ if (reservation != nullptr) {
+ *error_msg = StringPrintf("dlopen() into reserved memory is unsupported on host for '%s'.",
+ elf_filename.c_str());
+ return false;
+ }
MutexLock mu(Thread::Current(), *Locks::host_dlopen_handles_lock_);
dlopen_handle_ = dlopen(absolute_path.get(), RTLD_NOW);
if (dlopen_handle_ != nullptr) {
@@ -1092,8 +1168,11 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
UNREACHABLE();
#else
struct dl_iterate_context {
- static int callback(struct dl_phdr_info *info, size_t /* size */, void *data) {
+ static int callback(dl_phdr_info* info, size_t size ATTRIBUTE_UNUSED, void* data) {
auto* context = reinterpret_cast<dl_iterate_context*>(data);
+ static_assert(std::is_same<Elf32_Half, Elf64_Half>::value, "Half must match");
+ using Elf_Half = Elf64_Half;
+
context->shared_objects_seen++;
if (context->shared_objects_seen < context->shared_objects_before) {
// We haven't been called yet for anything we haven't seen before. Just continue.
@@ -1104,7 +1183,7 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
// See whether this callback corresponds to the file which we have just loaded.
bool contains_begin = false;
- for (int i = 0; i < info->dlpi_phnum; i++) {
+ for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
if (info->dlpi_phdr[i].p_type == PT_LOAD) {
uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
info->dlpi_phdr[i].p_vaddr);
@@ -1117,13 +1196,13 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
}
// Add dummy mmaps for this file.
if (contains_begin) {
- for (int i = 0; i < info->dlpi_phnum; i++) {
+ for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
if (info->dlpi_phdr[i].p_type == PT_LOAD) {
uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
info->dlpi_phdr[i].p_vaddr);
size_t memsz = info->dlpi_phdr[i].p_memsz;
- MemMap* mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
- context->dlopen_mmaps_->push_back(std::unique_ptr<MemMap>(mmap));
+ MemMap mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
+ context->dlopen_mmaps_->push_back(std::move(mmap));
}
}
return 1; // Stop iteration and return 1 from dl_iterate_phdr.
@@ -1131,7 +1210,7 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
return 0; // Continue iteration and return 0 from dl_iterate_phdr when finished.
}
const uint8_t* const begin_;
- std::vector<std::unique_ptr<MemMap>>* const dlopen_mmaps_;
+ std::vector<MemMap>* const dlopen_mmaps_;
const size_t shared_objects_before;
size_t shared_objects_seen;
};
@@ -1156,20 +1235,19 @@ void DlOpenOatFile::PreSetup(const std::string& elf_filename) {
// OatFile via our own ElfFile implementation //
////////////////////////////////////////////////
-class ElfOatFile FINAL : public OatFileBase {
+class ElfOatFile final : public OatFileBase {
public:
ElfOatFile(const std::string& filename, bool executable) : OatFileBase(filename, executable) {}
static ElfOatFile* OpenElfFile(int zip_fd,
File* file,
const std::string& location,
- uint8_t* requested_base,
- uint8_t* oat_file_begin, // Override base if not null
bool writable,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg);
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg);
bool InitializeFromElfFile(int zip_fd,
ElfFile* elf_file,
@@ -1179,7 +1257,7 @@ class ElfOatFile FINAL : public OatFileBase {
protected:
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
- std::string* error_msg) const OVERRIDE {
+ std::string* error_msg) const override {
const uint8_t* ptr = elf_file_->FindDynamicSymbolAddress(symbol_name);
if (ptr == nullptr) {
*error_msg = "(Internal implementation could not find symbol)";
@@ -1187,33 +1265,33 @@ class ElfOatFile FINAL : public OatFileBase {
return ptr;
}
- void PreLoad() OVERRIDE {
+ void PreLoad() override {
}
bool Load(const std::string& elf_filename,
- uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg) override;
bool Load(int oat_fd,
- uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) OVERRIDE;
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg) override;
- void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) OVERRIDE {
+ void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) override {
}
private:
bool ElfFileOpen(File* file,
- uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg);
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg);
private:
// Backing memory map for oat file during cross compilation.
@@ -1225,20 +1303,19 @@ class ElfOatFile FINAL : public OatFileBase {
ElfOatFile* ElfOatFile::OpenElfFile(int zip_fd,
File* file,
const std::string& location,
- uint8_t* requested_base,
- uint8_t* oat_file_begin, // Override base if not null
bool writable,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg) {
ScopedTrace trace("Open elf file " + location);
std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, executable));
bool success = oat_file->ElfFileOpen(file,
- oat_file_begin,
writable,
low_4gb,
executable,
+ reservation,
error_msg);
if (!success) {
CHECK(!error_msg->empty());
@@ -1246,7 +1323,7 @@ ElfOatFile* ElfOatFile::OpenElfFile(int zip_fd,
}
// Complete the setup.
- if (!oat_file->ComputeFields(requested_base, file->GetPath(), error_msg)) {
+ if (!oat_file->ComputeFields(/* requested_base */ nullptr, file->GetPath(), error_msg)) {
return nullptr;
}
@@ -1279,11 +1356,11 @@ bool ElfOatFile::InitializeFromElfFile(int zip_fd,
}
bool ElfOatFile::Load(const std::string& elf_filename,
- uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
if (file == nullptr) {
@@ -1291,19 +1368,19 @@ bool ElfOatFile::Load(const std::string& elf_filename,
return false;
}
return ElfOatFile::ElfFileOpen(file.get(),
- oat_file_begin,
writable,
executable,
low_4gb,
+ reservation,
error_msg);
}
bool ElfOatFile::Load(int oat_fd,
- uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
if (oat_fd != -1) {
std::unique_ptr<File> file = std::make_unique<File>(oat_fd, false);
@@ -1314,34 +1391,33 @@ bool ElfOatFile::Load(int oat_fd,
return false;
}
return ElfOatFile::ElfFileOpen(file.get(),
- oat_file_begin,
writable,
executable,
low_4gb,
+ reservation,
error_msg);
}
return false;
}
bool ElfOatFile::ElfFileOpen(File* file,
- uint8_t* oat_file_begin,
bool writable,
bool executable,
bool low_4gb,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
// TODO: rename requested_base to oat_data_begin
elf_file_.reset(ElfFile::Open(file,
writable,
/*program_header_only*/true,
low_4gb,
- error_msg,
- oat_file_begin));
+ error_msg));
if (elf_file_ == nullptr) {
DCHECK(!error_msg->empty());
return false;
}
- bool loaded = elf_file_->Load(file, executable, low_4gb, error_msg);
+ bool loaded = elf_file_->Load(file, executable, low_4gb, reservation, error_msg);
DCHECK(loaded || !error_msg->empty());
return loaded;
}
@@ -1392,11 +1468,11 @@ OatFile* OatFile::Open(int zip_fd,
const std::string& oat_filename,
const std::string& oat_location,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
ScopedTrace trace("Open oat file " + oat_location);
CHECK(!oat_filename.empty()) << oat_location;
CheckLocation(oat_location);
@@ -1419,11 +1495,11 @@ OatFile* OatFile::Open(int zip_fd,
oat_filename,
oat_location,
requested_base,
- oat_file_begin,
false /* writable */,
executable,
low_4gb,
abs_dex_location,
+ reservation,
error_msg);
if (with_dlopen != nullptr) {
return with_dlopen;
@@ -1449,11 +1525,11 @@ OatFile* OatFile::Open(int zip_fd,
oat_filename,
oat_location,
requested_base,
- oat_file_begin,
false /* writable */,
executable,
low_4gb,
abs_dex_location,
+ reservation,
error_msg);
return with_internal;
}
@@ -1463,11 +1539,11 @@ OatFile* OatFile::Open(int zip_fd,
int oat_fd,
const std::string& oat_location,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg) {
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
CHECK(!oat_location.empty()) << oat_location;
std::string vdex_location = GetVdexFilename(oat_location);
@@ -1478,11 +1554,11 @@ OatFile* OatFile::Open(int zip_fd,
vdex_location,
oat_location,
requested_base,
- oat_file_begin,
false /* writable */,
executable,
low_4gb,
abs_dex_location,
+ reservation,
error_msg);
return with_internal;
}
@@ -1496,12 +1572,11 @@ OatFile* OatFile::OpenWritable(int zip_fd,
return ElfOatFile::OpenElfFile(zip_fd,
file,
location,
- nullptr,
- nullptr,
- true,
- false,
+ /* writable */ true,
+ /* executable */ false,
/*low_4gb*/false,
abs_dex_location,
+ /* reservation */ nullptr,
error_msg);
}
@@ -1514,12 +1589,11 @@ OatFile* OatFile::OpenReadable(int zip_fd,
return ElfOatFile::OpenElfFile(zip_fd,
file,
location,
- nullptr,
- nullptr,
- false,
- false,
+ /* writable */ false,
+ /* executable */ false,
/*low_4gb*/false,
abs_dex_location,
+ /* reservation */ nullptr,
error_msg);
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 4ed26facf7..f20c603bf2 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -86,11 +86,11 @@ class OatFile {
const std::string& filename,
const std::string& location,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg);
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg);
// Similar to OatFile::Open(const std::string...), but accepts input vdex and
// odex files as file descriptors. We also take zip_fd in case the vdex does not
@@ -100,11 +100,11 @@ class OatFile {
int oat_fd,
const std::string& oat_location,
uint8_t* requested_base,
- uint8_t* oat_file_begin,
bool executable,
bool low_4gb,
const char* abs_dex_location,
- std::string* error_msg);
+ /*inout*/MemMap* reservation, // Where to load if not null.
+ /*out*/std::string* error_msg);
// Open an oat file from an already opened File.
// Does not use dlopen underneath so cannot be used for runtime use
@@ -146,7 +146,7 @@ class OatFile {
const OatHeader& GetOatHeader() const;
- class OatMethod FINAL {
+ class OatMethod final {
public:
void LinkMethod(ArtMethod* method) const;
@@ -201,7 +201,7 @@ class OatFile {
friend class OatClass;
};
- class OatClass FINAL {
+ class OatClass final {
public:
ClassStatus GetStatus() const {
return status_;
@@ -444,7 +444,7 @@ class OatFile {
// support forward declarations of inner classes, and we want to
// forward-declare OatDexFile so that we can store an opaque pointer to an
// OatDexFile in DexFile.
-class OatDexFile FINAL {
+class OatDexFile final {
public:
// Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
std::unique_ptr<const DexFile> OpenDexFile(std::string* error_msg) const;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index f7c74cc23b..4ed7e35eee 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -36,7 +36,6 @@
#include "exec_utils.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
-#include "hidden_api.h"
#include "image.h"
#include "oat.h"
#include "runtime.h"
@@ -182,30 +181,6 @@ bool OatFileAssistant::IsInBootClassPath() {
return false;
}
-bool OatFileAssistant::Lock(std::string* error_msg) {
- CHECK(error_msg != nullptr);
- CHECK(flock_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
-
- // Note the lock will only succeed for secondary dex files and in test
- // environment.
- //
- // The lock *will fail* for all primary apks in a production environment.
- // The app does not have permissions to create locks next to its dex location
- // (be it system, data or vendor parition). We also cannot use the odex or
- // oat location for the same reasoning.
- //
- // This is best effort and if it fails it's unlikely that we will be able
- // to generate oat files anyway.
- std::string lock_file_name = dex_location_ + "." + GetInstructionSetString(isa_) + ".flock";
-
- flock_ = LockedFile::Open(lock_file_name.c_str(), error_msg);
- if (flock_.get() == nullptr) {
- unlink(lock_file_name.c_str());
- return false;
- }
- return true;
-}
-
int OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target,
bool profile_changed,
bool downgrade,
@@ -221,72 +196,10 @@ int OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target,
return -dexopt_needed;
}
-// Figure out the currently specified compile filter option in the runtime.
-// Returns true on success, false if the compiler filter is invalid, in which
-// case error_msg describes the problem.
-static bool GetRuntimeCompilerFilterOption(CompilerFilter::Filter* filter,
- std::string* error_msg) {
- CHECK(filter != nullptr);
- CHECK(error_msg != nullptr);
-
- *filter = OatFileAssistant::kDefaultCompilerFilterForDexLoading;
- for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
- if (option.starts_with("--compiler-filter=")) {
- const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
- if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, filter)) {
- *error_msg = std::string("Unknown --compiler-filter value: ")
- + std::string(compiler_filter_string);
- return false;
- }
- }
- }
- return true;
-}
-
bool OatFileAssistant::IsUpToDate() {
return GetBestInfo().Status() == kOatUpToDate;
}
-OatFileAssistant::ResultOfAttemptToUpdate
-OatFileAssistant::MakeUpToDate(bool profile_changed,
- ClassLoaderContext* class_loader_context,
- std::string* error_msg) {
- // The method doesn't use zip_fd_ and directly opens dex files at dex_locations_.
- CHECK_EQ(-1, zip_fd_) << "MakeUpToDate should not be called with zip_fd";
-
- CompilerFilter::Filter target;
- if (!GetRuntimeCompilerFilterOption(&target, error_msg)) {
- return kUpdateNotAttempted;
- }
-
- OatFileInfo& info = GetBestInfo();
- // TODO(calin, jeffhao): the context should really be passed to GetDexOptNeeded: b/62269291.
- // This is actually not trivial in the current logic as it will interact with the collision
- // check:
- // - currently, if the context does not match but we have no collisions we still accept the
- // oat file.
- // - if GetDexOptNeeded would return kDex2OatFromScratch for a context mismatch and we make
- // the oat code up to date the collision check becomes useless.
- // - however, MakeUpToDate will not always succeed (e.g. for primary apks, or for dex files
- // loaded in other processes). So it boils down to how far do we want to complicate
- // the logic in order to enable the use of oat files. Maybe its time to try simplify it.
- switch (info.GetDexOptNeeded(
- target, profile_changed, /*downgrade*/ false, class_loader_context)) {
- case kNoDexOptNeeded:
- return kUpdateSucceeded;
-
- // TODO: For now, don't bother with all the different ways we can call
- // dex2oat to generate the oat file. Always generate the oat file as if it
- // were kDex2OatFromScratch.
- case kDex2OatFromScratch:
- case kDex2OatForBootImage:
- case kDex2OatForRelocation:
- case kDex2OatForFilter:
- return GenerateOatFileNoChecks(info, target, class_loader_context, error_msg);
- }
- UNREACHABLE();
-}
-
std::unique_ptr<OatFile> OatFileAssistant::GetBestOatFile() {
return GetBestInfo().ReleaseFileForUse();
}
@@ -615,243 +528,6 @@ static bool DexLocationToOdexNames(const std::string& location,
return true;
}
-// Prepare a subcomponent of the odex directory.
-// (i.e. create and set the expected permissions on the path `dir`).
-static bool PrepareDirectory(const std::string& dir, std::string* error_msg) {
- struct stat dir_stat;
- if (TEMP_FAILURE_RETRY(stat(dir.c_str(), &dir_stat)) == 0) {
- // The directory exists. Check if it is indeed a directory.
- if (!S_ISDIR(dir_stat.st_mode)) {
- *error_msg = dir + " is not a dir";
- return false;
- } else {
- // The dir is already on disk.
- return true;
- }
- }
-
- // Failed to stat. We need to create the directory.
- if (errno != ENOENT) {
- *error_msg = "Could not stat isa dir " + dir + ":" + strerror(errno);
- return false;
- }
-
- mode_t mode = S_IRWXU | S_IXGRP | S_IXOTH;
- if (mkdir(dir.c_str(), mode) != 0) {
- *error_msg = "Could not create dir " + dir + ":" + strerror(errno);
- return false;
- }
- if (chmod(dir.c_str(), mode) != 0) {
- *error_msg = "Could not create the oat dir " + dir + ":" + strerror(errno);
- return false;
- }
- return true;
-}
-
-// Prepares the odex directory for the given dex location.
-static bool PrepareOdexDirectories(const std::string& dex_location,
- const std::string& expected_odex_location,
- InstructionSet isa,
- std::string* error_msg) {
- std::string actual_odex_location;
- std::string oat_dir;
- std::string isa_dir;
- if (!DexLocationToOdexNames(
- dex_location, isa, &actual_odex_location, &oat_dir, &isa_dir, error_msg)) {
- return false;
- }
- DCHECK_EQ(expected_odex_location, actual_odex_location);
-
- if (!PrepareDirectory(oat_dir, error_msg)) {
- return false;
- }
- if (!PrepareDirectory(isa_dir, error_msg)) {
- return false;
- }
- return true;
-}
-
-class Dex2oatFileWrapper {
- public:
- explicit Dex2oatFileWrapper(File* file)
- : file_(file),
- unlink_file_at_destruction_(true) {
- }
-
- ~Dex2oatFileWrapper() {
- if (unlink_file_at_destruction_ && (file_ != nullptr)) {
- file_->Erase(/*unlink*/ true);
- }
- }
-
- File* GetFile() { return file_.get(); }
-
- void DisableUnlinkAtDestruction() {
- unlink_file_at_destruction_ = false;
- }
-
- private:
- std::unique_ptr<File> file_;
- bool unlink_file_at_destruction_;
-};
-
-OatFileAssistant::ResultOfAttemptToUpdate OatFileAssistant::GenerateOatFileNoChecks(
- OatFileAssistant::OatFileInfo& info,
- CompilerFilter::Filter filter,
- const ClassLoaderContext* class_loader_context,
- std::string* error_msg) {
- CHECK(error_msg != nullptr);
-
- Runtime* runtime = Runtime::Current();
- if (!runtime->IsDex2OatEnabled()) {
- *error_msg = "Generation of oat file for dex location " + dex_location_
- + " not attempted because dex2oat is disabled.";
- return kUpdateNotAttempted;
- }
-
- if (info.Filename() == nullptr) {
- *error_msg = "Generation of oat file for dex location " + dex_location_
- + " not attempted because the oat file name could not be determined.";
- return kUpdateNotAttempted;
- }
- const std::string& oat_file_name = *info.Filename();
- const std::string& vdex_file_name = GetVdexFilename(oat_file_name);
-
- // dex2oat ignores missing dex files and doesn't report an error.
- // Check explicitly here so we can detect the error properly.
- // TODO: Why does dex2oat behave that way?
- struct stat dex_path_stat;
- if (TEMP_FAILURE_RETRY(stat(dex_location_.c_str(), &dex_path_stat)) != 0) {
- *error_msg = "Could not access dex location " + dex_location_ + ":" + strerror(errno);
- return kUpdateNotAttempted;
- }
-
- // If this is the odex location, we need to create the odex file layout (../oat/isa/..)
- if (!info.IsOatLocation()) {
- if (!PrepareOdexDirectories(dex_location_, oat_file_name, isa_, error_msg)) {
- return kUpdateNotAttempted;
- }
- }
-
- // Set the permissions for the oat and the vdex files.
- // The user always gets read and write while the group and others propagate
- // the reading access of the original dex file.
- mode_t file_mode = S_IRUSR | S_IWUSR |
- (dex_path_stat.st_mode & S_IRGRP) |
- (dex_path_stat.st_mode & S_IROTH);
-
- Dex2oatFileWrapper vdex_file_wrapper(OS::CreateEmptyFile(vdex_file_name.c_str()));
- File* vdex_file = vdex_file_wrapper.GetFile();
- if (vdex_file == nullptr) {
- *error_msg = "Generation of oat file " + oat_file_name
- + " not attempted because the vdex file " + vdex_file_name
- + " could not be opened.";
- return kUpdateNotAttempted;
- }
-
- if (fchmod(vdex_file->Fd(), file_mode) != 0) {
- *error_msg = "Generation of oat file " + oat_file_name
- + " not attempted because the vdex file " + vdex_file_name
- + " could not be made world readable.";
- return kUpdateNotAttempted;
- }
-
- Dex2oatFileWrapper oat_file_wrapper(OS::CreateEmptyFile(oat_file_name.c_str()));
- File* oat_file = oat_file_wrapper.GetFile();
- if (oat_file == nullptr) {
- *error_msg = "Generation of oat file " + oat_file_name
- + " not attempted because the oat file could not be created.";
- return kUpdateNotAttempted;
- }
-
- if (fchmod(oat_file->Fd(), file_mode) != 0) {
- *error_msg = "Generation of oat file " + oat_file_name
- + " not attempted because the oat file could not be made world readable.";
- return kUpdateNotAttempted;
- }
-
- std::vector<std::string> args;
- args.push_back("--dex-file=" + dex_location_);
- args.push_back("--output-vdex-fd=" + std::to_string(vdex_file->Fd()));
- args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
- args.push_back("--oat-location=" + oat_file_name);
- args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
- const std::string dex2oat_context = class_loader_context == nullptr
- ? OatFile::kSpecialSharedLibrary
- : class_loader_context->EncodeContextForDex2oat(/*base_dir*/ "");
- args.push_back("--class-loader-context=" + dex2oat_context);
-
- if (!Dex2Oat(args, error_msg)) {
- return kUpdateFailed;
- }
-
- if (vdex_file->FlushCloseOrErase() != 0) {
- *error_msg = "Unable to close vdex file " + vdex_file_name;
- return kUpdateFailed;
- }
-
- if (oat_file->FlushCloseOrErase() != 0) {
- *error_msg = "Unable to close oat file " + oat_file_name;
- return kUpdateFailed;
- }
-
- // Mark that the odex file has changed and we should try to reload.
- info.Reset();
- // We have compiled successfully. Disable the auto-unlink.
- vdex_file_wrapper.DisableUnlinkAtDestruction();
- oat_file_wrapper.DisableUnlinkAtDestruction();
-
- return kUpdateSucceeded;
-}
-
-bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
- std::string* error_msg) {
- Runtime* runtime = Runtime::Current();
- std::string image_location = ImageLocation();
- if (image_location.empty()) {
- *error_msg = "No image location found for Dex2Oat.";
- return false;
- }
-
- std::vector<std::string> argv;
- argv.push_back(runtime->GetCompilerExecutable());
- if (runtime->IsJavaDebuggable()) {
- argv.push_back("--debuggable");
- }
- runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
- if (!runtime->IsVerificationEnabled()) {
- argv.push_back("--compiler-filter=verify-none");
- }
-
- if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kNoChecks) {
- argv.push_back("--runtime-arg");
- argv.push_back("-Xhidden-api-checks");
- }
-
- if (runtime->MustRelocateIfPossible()) {
- argv.push_back("--runtime-arg");
- argv.push_back("-Xrelocate");
- } else {
- argv.push_back("--runtime-arg");
- argv.push_back("-Xnorelocate");
- }
-
- if (!kIsTargetBuild) {
- argv.push_back("--host");
- }
-
- argv.push_back("--boot-image=" + image_location);
-
- std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
- argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
- argv.insert(argv.end(), args.begin(), args.end());
-
- std::string command_line(android::base::Join(argv, ' '));
- return Exec(argv, error_msg);
-}
-
bool OatFileAssistant::DexLocationToOdexFilename(const std::string& location,
InstructionSet isa,
std::string* odex_filename,
@@ -885,16 +561,6 @@ bool OatFileAssistant::DexLocationToOatFilename(const std::string& location,
return GetDalvikCacheFilename(location.c_str(), cache_dir.c_str(), oat_filename, error_msg);
}
-std::string OatFileAssistant::ImageLocation() {
- Runtime* runtime = Runtime::Current();
- const std::vector<gc::space::ImageSpace*>& image_spaces =
- runtime->GetHeap()->GetBootImageSpaces();
- if (image_spaces.empty()) {
- return "";
- }
- return image_spaces[0]->GetImageLocation();
-}
-
const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
if (!required_dex_checksums_attempted_) {
required_dex_checksums_attempted_ = true;
@@ -1165,22 +831,22 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
vdex_fd_,
oat_fd_,
filename_.c_str(),
- nullptr,
- nullptr,
+ /* requested_base */ nullptr,
executable,
- false /* low_4gb */,
+ /* low_4gb */ false,
oat_file_assistant_->dex_location_.c_str(),
+ /* reservation */ nullptr,
&error_msg));
}
} else {
file_.reset(OatFile::Open(/* zip_fd */ -1,
filename_.c_str(),
filename_.c_str(),
- nullptr,
- nullptr,
+ /* requested_base */ nullptr,
executable,
- false /* low_4gb */,
+ /* low_4gb */ false,
oat_file_assistant_->dex_location_.c_str(),
+ /* reservation */ nullptr,
&error_msg));
}
if (file_.get() == nullptr) {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index a6d0961835..dbfbdf9fbc 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -48,11 +48,6 @@ class ImageSpace;
// dex location is in the boot class path.
class OatFileAssistant {
public:
- // The default compile filter to use when optimizing dex file at load time if they
- // are out of date.
- static const CompilerFilter::Filter kDefaultCompilerFilterForDexLoading =
- CompilerFilter::kQuicken;
-
enum DexOptNeeded {
// No dexopt should (or can) be done to update the apk/jar.
// Matches Java: dalvik.system.DexFile.NO_DEXOPT_NEEDED = 0
@@ -144,24 +139,6 @@ class OatFileAssistant {
// path.
bool IsInBootClassPath();
- // Obtains a lock on the target oat file.
- // Only one OatFileAssistant object can hold the lock for a target oat file
- // at a time. The Lock is released automatically when the OatFileAssistant
- // object goes out of scope. The Lock() method must not be called if the
- // lock has already been acquired.
- //
- // Returns true on success.
- // Returns false on error, in which case error_msg will contain more
- // information on the error.
- //
- // The 'error_msg' argument must not be null.
- //
- // This is intended to be used to avoid race conditions when multiple
- // processes generate oat files, such as when a foreground Activity and
- // a background Service both use DexClassLoaders pointing to the same dex
- // file.
- bool Lock(std::string* error_msg);
-
// Return what action needs to be taken to produce up-to-date code for this
// dex location. If "downgrade" is set to false, it verifies if the current
// compiler filter is at least as good as an oat file generated with the
@@ -187,33 +164,6 @@ class OatFileAssistant {
// irrespective of the compiler filter of the up-to-date code.
bool IsUpToDate();
- // Return code used when attempting to generate updated code.
- enum ResultOfAttemptToUpdate {
- kUpdateFailed, // We tried making the code up to date, but
- // encountered an unexpected failure.
- kUpdateNotAttempted, // We wanted to update the code, but determined we
- // should not make the attempt.
- kUpdateSucceeded // We successfully made the code up to date
- // (possibly by doing nothing).
- };
-
- // Attempts to generate or relocate the oat file as needed to make it up to
- // date based on the current runtime and compiler options.
- // profile_changed should be true to indicate the profile has recently
- // changed for this dex location.
- //
- // If the dex files need to be made up to date, class_loader_context will be
- // passed to dex2oat.
- //
- // Returns the result of attempting to update the code.
- //
- // If the result is not kUpdateSucceeded, the value of error_msg will be set
- // to a string describing why there was a failure or the update was not
- // attempted. error_msg must not be null.
- ResultOfAttemptToUpdate MakeUpToDate(bool profile_changed,
- ClassLoaderContext* class_loader_context,
- std::string* error_msg);
-
// Returns an oat file that can be used for loading dex files.
// Returns null if no suitable oat file was found.
//
@@ -284,18 +234,6 @@ class OatFileAssistant {
// Returns the status of the oat file for the dex location.
OatStatus OatFileStatus();
- // Executes dex2oat using the current runtime configuration overridden with
- // the given arguments. This does not check to see if dex2oat is enabled in
- // the runtime configuration.
- // Returns true on success.
- //
- // If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be null.
- //
- // TODO: The OatFileAssistant probably isn't the right place to have this
- // function.
- static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
-
// Constructs the odex file name for the given dex location.
// Returns true on success, in which case odex_filename is set to the odex
// file name.
@@ -436,20 +374,6 @@ class OatFileAssistant {
bool file_released_ = false;
};
- // Generate the oat file for the given info from the dex file using the
- // current runtime compiler options, the specified filter and class loader
- // context.
- // This does not check the current status before attempting to generate the
- // oat file.
- //
- // If the result is not kUpdateSucceeded, the value of error_msg will be set
- // to a string describing why there was a failure or the update was not
- // attempted. error_msg must not be null.
- ResultOfAttemptToUpdate GenerateOatFileNoChecks(OatFileInfo& info,
- CompilerFilter::Filter target,
- const ClassLoaderContext* class_loader_context,
- std::string* error_msg);
-
// Return info for the best oat file.
OatFileInfo& GetBestInfo();
@@ -473,13 +397,6 @@ class OatFileAssistant {
// location.
OatStatus GivenOatFileStatus(const OatFile& file);
- // Returns the current image location.
- // Returns an empty string if the image location could not be retrieved.
- //
- // TODO: This method should belong with an image file manager, not
- // the oat file assistant.
- static std::string ImageLocation();
-
// Gets the dex checksums required for an up-to-date oat file.
// Returns cached_required_dex_checksums if the required checksums were
// located. Returns null if the required checksums were not found. The
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 0b3c61d474..5a2997809b 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -41,11 +41,6 @@
namespace art {
-static const std::string kSpecialSharedLibrary = "&"; // NOLINT [runtime/string] [4]
-static ClassLoaderContext* kSpecialSharedLibraryContext = nullptr;
-
-static constexpr char kDex2oatCmdLineHiddenApiArg[] = " --runtime-arg -Xhidden-api-checks";
-
class OatFileAssistantTest : public DexoptTest {
public:
void VerifyOptimizationStatus(const std::string& file,
@@ -68,14 +63,6 @@ class OatFileAssistantTest : public DexoptTest {
}
};
-class OatFileAssistantNoDex2OatTest : public DexoptTest {
- public:
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
- DexoptTest::SetUpRuntimeOptions(options);
- options->push_back(std::make_pair("-Xnodex2oat", nullptr));
- }
-};
-
class ScopedNonWritable {
public:
explicit ScopedNonWritable(const std::string& dex_location) {
@@ -109,6 +96,97 @@ static bool IsExecutedAsRoot() {
return geteuid() == 0;
}
+// Case: We have a MultiDEX file and up-to-date ODEX file for it with relative
+// encoded dex locations.
+// Expect: The oat file status is kNoDexOptNeeded.
+TEST_F(OatFileAssistantTest, RelativeEncodedDexLocation) {
+ std::string dex_location = GetScratchDir() + "/RelativeEncodedDexLocation.jar";
+ std::string odex_location = GetOdexDir() + "/RelativeEncodedDexLocation.odex";
+
+ // Create the dex file
+ Copy(GetMultiDexSrc1(), dex_location);
+
+ // Create the oat file with relative encoded dex location.
+ std::vector<std::string> args = {
+ "--dex-file=" + dex_location,
+ "--dex-location=" + std::string("RelativeEncodedDexLocation.jar"),
+ "--oat-file=" + odex_location,
+ "--compiler-filter=speed"
+ };
+
+ std::string error_msg;
+ ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
+ // Verify we can load both dex files.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(2u, dex_files.size());
+}
+
+TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string odex_location = GetOdexDir() + "/TestDex.odex";
+ std::string context_location = GetScratchDir() + "/ContextDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+ Copy(GetDexSrc2(), context_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ std::string context_str = "PCL[" + context_location + "]";
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
+ ASSERT_TRUE(context != nullptr);
+ ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
+
+ std::string error_msg;
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--oat-file=" + odex_location);
+ args.push_back("--class-loader-context=" + context_str);
+ ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ EXPECT_NE(nullptr, oat_file.get());
+ EXPECT_EQ(context->EncodeContextForOatFile(""),
+ oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
+}
+
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string odex_location = GetOdexDir() + "/TestDex.odex";
+ std::string context_location = GetScratchDir() + "/ContextDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+ Copy(GetDexSrc2(), context_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ std::string context_str = "PCL[" + context_location + "]";
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
+ ASSERT_TRUE(context != nullptr);
+ ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
+
+ std::string error_msg;
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--oat-file=" + odex_location);
+ args.push_back("--class-loader-context=" + context_str);
+ ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
+ // A relative context simulates a dependent split context.
+ std::unique_ptr<ClassLoaderContext> relative_context =
+ ClassLoaderContext::Create("PCL[ContextDex.jar]");
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kDefaultCompilerFilter,
+ /* downgrade */ false,
+ /* profile_changed */ false,
+ relative_context.get()));
+}
+
// Case: We have a DEX file, but no OAT file for it.
// Expect: The status is kDex2OatNeeded.
TEST_F(OatFileAssistantTest, DexNoOat) {
@@ -145,11 +223,6 @@ TEST_F(OatFileAssistantTest, NoDexNoOat) {
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
- // Trying to make the oat file up to date should not fail or crash.
- std::string error_msg;
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
-
// Trying to get the best oat file should fail, but not crash.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
EXPECT_EQ(nullptr, oat_file.get());
@@ -584,37 +657,6 @@ TEST_F(OatFileAssistantTest, StrippedMultiDexNonMainOutOfDate) {
EXPECT_EQ(OatFileAssistant::kOatDexOutOfDate, oat_file_assistant.OatFileStatus());
}
-// Case: We have a MultiDEX file and up-to-date ODEX file for it with relative
-// encoded dex locations.
-// Expect: The oat file status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, RelativeEncodedDexLocation) {
- std::string dex_location = GetScratchDir() + "/RelativeEncodedDexLocation.jar";
- std::string odex_location = GetOdexDir() + "/RelativeEncodedDexLocation.odex";
-
- // Create the dex file
- Copy(GetMultiDexSrc1(), dex_location);
-
- // Create the oat file with relative encoded dex location.
- std::vector<std::string> args;
- args.push_back("--dex-file=" + dex_location);
- args.push_back("--dex-location=" + std::string("RelativeEncodedDexLocation.jar"));
- args.push_back("--oat-file=" + odex_location);
- args.push_back("--compiler-filter=speed");
-
- std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
-
- // Verify we can load both dex files.
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
-
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
- std::vector<std::unique_ptr<const DexFile>> dex_files;
- dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
- EXPECT_EQ(2u, dex_files.size());
-}
-
// Case: We have a DEX file and an OAT file out of date with respect to the
// dex checksum.
TEST_F(OatFileAssistantTest, OatDexOutOfDate) {
@@ -872,13 +914,6 @@ TEST_F(OatFileAssistantTest, ResourceOnlyDex) {
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
- // Make the oat file up to date. This should have no effect.
- std::string error_msg;
- Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg)) <<
- error_msg;
-
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -1037,35 +1072,6 @@ TEST_F(OatFileAssistantTest, LoadNoExecOatUpToDate) {
EXPECT_EQ(1u, dex_files.size());
}
-// Case: We don't have a DEX file and can't write the oat file.
-// Expect: We should fail to generate the oat file without crashing.
-TEST_F(OatFileAssistantTest, GenNoDex) {
- if (IsExecutedAsRoot()) {
- // We cannot simulate non writable locations when executed as root: b/38000545.
- LOG(ERROR) << "Test skipped because it's running as root";
- return;
- }
-
- std::string dex_location = GetScratchDir() + "/GenNoDex.jar";
-
- ScopedNonWritable scoped_non_writable(dex_location);
- ASSERT_TRUE(scoped_non_writable.IsSuccessful());
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
- std::string error_msg;
- Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
- // We should get kUpdateSucceeded from MakeUpToDate since there's nothing
- // that can be done in this situation.
- ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
-
- // Verify it didn't create an oat in the default location (dalvik-cache).
- OatFileAssistant ofm(dex_location.c_str(), kRuntimeISA, false);
- EXPECT_EQ(OatFileAssistant::kOatCannotOpen, ofm.OatFileStatus());
- // Verify it didn't create the odex file in the default location (../oat/isa/...odex)
- EXPECT_EQ(OatFileAssistant::kOatCannotOpen, ofm.OdexFileStatus());
-}
-
// Turn an absolute path into a path relative to the current working
// directory.
static std::string MakePathRelative(const std::string& target) {
@@ -1131,13 +1137,6 @@ TEST_F(OatFileAssistantTest, ShortDexLocation) {
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
-
- // Trying to make it up to date should have no effect.
- std::string error_msg;
- Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
- EXPECT_TRUE(error_msg.empty());
}
// Case: Non-standard extension for dex file.
@@ -1156,11 +1155,12 @@ TEST_F(OatFileAssistantTest, LongDexExtension) {
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
}
+
// A task to generate a dex location. Used by the RaceToGenerate test.
class RaceGenerateTask : public Task {
public:
- explicit RaceGenerateTask(const std::string& dex_location, const std::string& oat_location)
- : dex_location_(dex_location), oat_location_(oat_location), loaded_oat_file_(nullptr)
+ RaceGenerateTask(const std::string& dex_location, const std::string& oat_location)
+ : dex_location_(dex_location), oat_location_(oat_location), loaded_oat_file_(nullptr)
{}
void Run(Thread* self ATTRIBUTE_UNUSED) {
@@ -1169,6 +1169,21 @@ class RaceGenerateTask : public Task {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
const OatFile* oat_file = nullptr;
+ {
+ // Create the oat file.
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location_);
+ args.push_back("--oat-file=" + oat_location_);
+ std::string error_msg;
+ if (kIsTargetBuild) {
+ // Don't check whether dex2oat is successful: given we're running kNumThreads in
+ // parallel, low memory killer might just kill some of the dex2oat invocations.
+ DexoptTest::Dex2Oat(args, &error_msg);
+ } else {
+ ASSERT_TRUE(DexoptTest::Dex2Oat(args, &error_msg)) << error_msg;
+ }
+ }
+
dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
dex_location_.c_str(),
Runtime::Current()->GetSystemClassLoader(),
@@ -1176,8 +1191,9 @@ class RaceGenerateTask : public Task {
&oat_file,
&error_msgs);
CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n');
- CHECK(dex_files[0]->GetOatDexFile() != nullptr) << dex_files[0]->GetLocation();
- loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
+ if (dex_files[0]->GetOatDexFile() != nullptr) {
+ loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
+ }
CHECK_EQ(loaded_oat_file_, oat_file);
}
@@ -1191,12 +1207,8 @@ class RaceGenerateTask : public Task {
const OatFile* loaded_oat_file_;
};
-// Test the case where multiple processes race to generate an oat file.
-// This simulates multiple processes using multiple threads.
-//
-// We want unique Oat files to be loaded even when there is a race to load.
-// TODO: The test case no longer tests locking the way it was intended since we now get multiple
-// copies of the same Oat files mapped at different locations.
+// Test the case where dex2oat invocations race with multiple processes trying to
+// load the oat file.
TEST_F(OatFileAssistantTest, RaceToGenerate) {
std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
std::string oat_location = GetOdexDir() + "/RaceToGenerate.oat";
@@ -1209,31 +1221,32 @@ TEST_F(OatFileAssistantTest, RaceToGenerate) {
// take a while to generate.
Copy(GetLibCoreDexFileNames()[0], dex_location);
- const int kNumThreads = 32;
+ const size_t kNumThreads = 32;
Thread* self = Thread::Current();
ThreadPool thread_pool("Oat file assistant test thread pool", kNumThreads);
std::vector<std::unique_ptr<RaceGenerateTask>> tasks;
- for (int i = 0; i < kNumThreads; i++) {
+ for (size_t i = 0; i < kNumThreads; i++) {
std::unique_ptr<RaceGenerateTask> task(new RaceGenerateTask(dex_location, oat_location));
thread_pool.AddTask(self, task.get());
tasks.push_back(std::move(task));
}
thread_pool.StartWorkers(self);
- thread_pool.Wait(self, true, false);
+ thread_pool.Wait(self, /* do_work */ true, /* may_hold_locks */ false);
- // Verify every task got a unique oat file.
+ // Verify that tasks which got an oat file got a unique one.
std::set<const OatFile*> oat_files;
for (auto& task : tasks) {
const OatFile* oat_file = task->GetLoadedOatFile();
- EXPECT_TRUE(oat_files.find(oat_file) == oat_files.end());
- oat_files.insert(oat_file);
+ if (oat_file != nullptr) {
+ EXPECT_TRUE(oat_files.find(oat_file) == oat_files.end());
+ oat_files.insert(oat_file);
+ }
}
}
-// Case: We have a DEX file and an ODEX file, no OAT file, and dex2oat is
-// disabled.
+// Case: We have a DEX file and an ODEX file, and no OAT file,
// Expect: We should load the odex file non-executable.
-TEST_F(OatFileAssistantNoDex2OatTest, LoadDexOdexNoOat) {
+TEST_F(DexoptTest, LoadDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadDexOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/LoadDexOdexNoOat.odex";
@@ -1252,10 +1265,9 @@ TEST_F(OatFileAssistantNoDex2OatTest, LoadDexOdexNoOat) {
EXPECT_EQ(1u, dex_files.size());
}
-// Case: We have a MultiDEX file and an ODEX file, no OAT file, and dex2oat is
-// disabled.
+// Case: We have a MultiDEX file and an ODEX file, and no OAT file.
// Expect: We should load the odex file non-executable.
-TEST_F(OatFileAssistantNoDex2OatTest, LoadMultiDexOdexNoOat) {
+TEST_F(DexoptTest, LoadMultiDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadMultiDexOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/LoadMultiDexOdexNoOat.odex";
@@ -1274,36 +1286,6 @@ TEST_F(OatFileAssistantNoDex2OatTest, LoadMultiDexOdexNoOat) {
EXPECT_EQ(2u, dex_files.size());
}
-TEST_F(OatFileAssistantTest, RuntimeCompilerFilterOptionUsed) {
- std::string dex_location = GetScratchDir() + "/RuntimeCompilerFilterOptionUsed.jar";
- Copy(GetDexSrc1(), dex_location);
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
- std::string error_msg;
- Runtime::Current()->AddCompilerOption("--compiler-filter=quicken");
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg)) <<
- error_msg;
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
-
- Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg))
- << error_msg;
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
-
- Runtime::Current()->AddCompilerOption("--compiler-filter=bogus");
- EXPECT_EQ(OatFileAssistant::kUpdateNotAttempted,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
-}
-
TEST(OatFileAssistantUtilsTest, DexLocationToOdexFilename) {
std::string error_msg;
std::string odex_file;
@@ -1350,112 +1332,6 @@ TEST_F(OatFileAssistantTest, DexOptStatusValues) {
}
}
-// Verify that when no compiler filter is passed the default one from OatFileAssistant is used.
-TEST_F(OatFileAssistantTest, DefaultMakeUpToDateFilter) {
- std::string dex_location = GetScratchDir() + "/TestDex.jar";
- Copy(GetDexSrc1(), dex_location);
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
- const CompilerFilter::Filter default_filter =
- OatFileAssistant::kDefaultCompilerFilterForDexLoading;
- std::string error_msg;
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg)) <<
- error_msg;
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(default_filter));
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- EXPECT_NE(nullptr, oat_file.get());
- EXPECT_EQ(default_filter, oat_file->GetCompilerFilter());
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithSpecialSharedLibrary) {
- std::string dex_location = GetScratchDir() + "/TestDex.jar";
- Copy(GetDexSrc1(), dex_location);
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
- const CompilerFilter::Filter default_filter =
- OatFileAssistant::kDefaultCompilerFilterForDexLoading;
- std::string error_msg;
- int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(default_filter));
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- EXPECT_NE(nullptr, oat_file.get());
- EXPECT_EQ(kSpecialSharedLibrary,
- oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
- std::string dex_location = GetScratchDir() + "/TestDex.jar";
- std::string context_location = GetScratchDir() + "/ContextDex.jar";
- Copy(GetDexSrc1(), dex_location);
- Copy(GetDexSrc2(), context_location);
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
- const CompilerFilter::Filter default_filter =
- OatFileAssistant::kDefaultCompilerFilterForDexLoading;
- std::string error_msg;
- std::string context_str = "PCL[" + context_location + "]";
- std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
- ASSERT_TRUE(context != nullptr);
- ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
-
- int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(default_filter, false, false, context.get()));
-
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- EXPECT_NE(nullptr, oat_file.get());
- EXPECT_EQ(context->EncodeContextForOatFile(""),
- oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiDisabled) {
- hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
- hiddenapi::EnforcementPolicy::kNoChecks);
-
- std::string dex_location = GetScratchDir() + "/TestDexHiddenApiDisabled.jar";
- Copy(GetDexSrc1(), dex_location);
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
- std::string error_msg;
- int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- EXPECT_NE(nullptr, oat_file.get());
-
- const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
- EXPECT_NE(nullptr, cmd_line);
- EXPECT_EQ(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiEnabled) {
- hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
- hiddenapi::EnforcementPolicy::kBlacklistOnly);
-
- std::string dex_location = GetScratchDir() + "/TestDexHiddenApiEnabled.jar";
- Copy(GetDexSrc1(), dex_location);
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
- std::string error_msg;
- int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- EXPECT_NE(nullptr, oat_file.get());
-
- const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
- EXPECT_NE(nullptr, cmd_line);
- EXPECT_NE(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
-}
-
TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string context_location = GetScratchDir() + "/ContextDex.jar";
@@ -1464,19 +1340,12 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
- const CompilerFilter::Filter default_filter =
- OatFileAssistant::kDefaultCompilerFilterForDexLoading;
std::string error_msg;
std::string context_str = "PCL[" + context_location + "]";
std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
ASSERT_TRUE(context != nullptr);
ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
- int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(default_filter, false, false, context.get()));
-
// Update the context by overriding the jar file.
Copy(GetMultiDexSrc2(), context_location);
std::unique_ptr<ClassLoaderContext> updated_context = ClassLoaderContext::Create(context_str);
@@ -1484,88 +1353,10 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
// DexOptNeeded should advise compilation from scratch.
EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
oat_file_assistant.GetDexOptNeeded(
- default_filter, false, false, updated_context.get()));
-}
-
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
- std::string dex_location = GetScratchDir() + "/TestDex.jar";
- std::string context_location = GetScratchDir() + "/ContextDex.jar";
- Copy(GetDexSrc1(), dex_location);
- Copy(GetDexSrc2(), context_location);
-
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
- const CompilerFilter::Filter default_filter =
- OatFileAssistant::kDefaultCompilerFilterForDexLoading;
- std::string error_msg;
- std::string context_str = "PCL[" + context_location + "]";
- std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
- ASSERT_TRUE(context != nullptr);
- ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
-
- int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-
- // A relative context simulates a dependent split context.
- std::unique_ptr<ClassLoaderContext> relative_context =
- ClassLoaderContext::Create("PCL[ContextDex.jar]");
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- oat_file_assistant.GetDexOptNeeded(
- default_filter, false, false, relative_context.get()));
-}
-
-TEST_F(OatFileAssistantTest, SystemOdex) {
- std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
- std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
- std::string system_location = GetAndroidRoot() + "/OatUpToDate.jar";
-
- std::string error_msg;
-
- Copy(GetDexSrc1(), dex_location);
- EXPECT_FALSE(LocationIsOnSystem(dex_location.c_str()));
-
- {
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- true,
- false);
- int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
- ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
- EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
- }
-
- {
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- true,
- true);
- int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
- ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
- EXPECT_FALSE(oat_file_assistant.GetBestOatFile()->IsExecutable());
- }
-
- Copy(GetDexSrc1(), system_location);
- EXPECT_TRUE(LocationIsOnSystem(system_location.c_str()));
-
- {
- OatFileAssistant oat_file_assistant(system_location.c_str(),
- kRuntimeISA,
- true,
- false);
- int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
- ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
- EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
- }
-
- {
- OatFileAssistant oat_file_assistant(system_location.c_str(),
- kRuntimeISA,
- true,
- true);
- int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
- ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
- EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
- }
+ CompilerFilter::kDefaultCompilerFilter,
+ /* downgrade */ false,
+ /* profile_changed */ false,
+ updated_context.get()));
}
// TODO: More Tests:
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 59a1045ba2..bcad4a3428 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -465,57 +465,15 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
!runtime->IsAotCompiler(),
only_use_system_oat_files_);
- // Lock the target oat location to avoid races generating and loading the
- // oat file.
- std::string error_msg;
- if (!oat_file_assistant.Lock(/*out*/&error_msg)) {
- // Don't worry too much if this fails. If it does fail, it's unlikely we
- // can generate an oat file anyway.
- VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
- }
-
- const OatFile* source_oat_file = nullptr;
-
- if (!oat_file_assistant.IsUpToDate()) {
- // Update the oat file on disk if we can, based on the --compiler-filter
- // option derived from the current runtime options.
- // This may fail, but that's okay. Best effort is all that matters here.
- // TODO(calin): b/64530081 b/66984396. Pass a null context to verify and compile
- // secondary dex files in isolation (and avoid to extract/verify the main apk
- // if it's in the class path). Note this trades correctness for performance
- // since the resulting slow down is unacceptable in some cases until b/64530081
- // is fixed.
- // We still pass the class loader context when the classpath string of the runtime
- // is not empty, which is the situation when ART is invoked standalone.
- ClassLoaderContext* actual_context = Runtime::Current()->GetClassPathString().empty()
- ? nullptr
- : context.get();
- switch (oat_file_assistant.MakeUpToDate(/*profile_changed*/ false,
- actual_context,
- /*out*/ &error_msg)) {
- case OatFileAssistant::kUpdateFailed:
- LOG(WARNING) << error_msg;
- break;
-
- case OatFileAssistant::kUpdateNotAttempted:
- // Avoid spamming the logs if we decided not to attempt making the oat
- // file up to date.
- VLOG(oat) << error_msg;
- break;
-
- case OatFileAssistant::kUpdateSucceeded:
- // Nothing to do.
- break;
- }
- }
-
// Get the oat file on disk.
std::unique_ptr<const OatFile> oat_file(oat_file_assistant.GetBestOatFile().release());
VLOG(oat) << "OatFileAssistant(" << dex_location << ").GetBestOatFile()="
<< reinterpret_cast<uintptr_t>(oat_file.get())
<< " (executable=" << (oat_file != nullptr ? oat_file->IsExecutable() : false) << ")";
+ const OatFile* source_oat_file = nullptr;
CheckCollisionResult check_collision_result = CheckCollisionResult::kPerformedHasCollisions;
+ std::string error_msg;
if ((class_loader != nullptr || dex_elements != nullptr) && oat_file != nullptr) {
// Prevent oat files from being loaded if no class_loader or dex_elements are provided.
// This can happen when the deprecated DexFile.<init>(String) is called directly, and it
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 12dfe20d56..51d8fca6c5 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -77,11 +77,11 @@ TEST_F(OatFileTest, LoadOat) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
oat_location.c_str(),
oat_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr);
@@ -105,11 +105,11 @@ TEST_F(OatFileTest, ChangingMultiDexUncompressed) {
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
oat_location.c_str(),
oat_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_EQ(2u, odex_file->GetOatDexFiles().size());
@@ -120,13 +120,13 @@ TEST_F(OatFileTest, ChangingMultiDexUncompressed) {
// And try to load again.
std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
- oat_location.c_str(),
- oat_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
+ oat_location,
+ oat_location,
+ /* requested_base */ nullptr,
+ /* executable */ false,
+ /* low_4gb */ false,
dex_location.c_str(),
+ /* reservation */ nullptr,
&error_msg));
EXPECT_TRUE(odex_file == nullptr);
EXPECT_NE(std::string::npos, error_msg.find("expected 2 uncompressed dex files, but found 1"))
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index a44e5a4b54..4d16eb537d 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -220,9 +220,6 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define({"-Xrelocate", "-Xnorelocate"})
.WithValues({true, false})
.IntoKey(M::Relocate)
- .Define({"-Xdex2oat", "-Xnodex2oat"})
- .WithValues({true, false})
- .IntoKey(M::Dex2Oat)
.Define({"-Ximage-dex2oat", "-Xnoimage-dex2oat"})
.WithValues({true, false})
.IntoKey(M::ImageDex2Oat)
@@ -518,9 +515,12 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options,
LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath);
}
- if (args.GetOrDefault(M::UseJitCompilation) && args.GetOrDefault(M::Interpret)) {
- Usage("-Xusejit:true and -Xint cannot be specified together");
- Exit(0);
+ if (args.GetOrDefault(M::Interpret)) {
+ if (args.Exists(M::UseJitCompilation) && *args.Get(M::UseJitCompilation)) {
+ Usage("-Xusejit:true and -Xint cannot be specified together\n");
+ Exit(0);
+ }
+ args.Set(M::UseJitCompilation, false);
}
// Set a default boot class path if we didn't get an explicit one via command line.
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 36dea60367..f1e485b951 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -30,7 +30,7 @@ namespace proxy_test {
class ProxyTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
// The creation of a Proxy class uses WellKnownClasses. These are not normally initialized by
// CommonRuntimeTest so we need to do that now.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7f5717f736..7b92151c66 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -58,7 +58,7 @@ QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimizatio
full_fragment_done_(false) {}
// Finds catch handler.
-class CatchBlockStackVisitor FINAL : public StackVisitor {
+class CatchBlockStackVisitor final : public StackVisitor {
public:
CatchBlockStackVisitor(Thread* self,
Context* context,
@@ -72,7 +72,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
skip_frames_(skip_frames) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
@@ -350,7 +350,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
}
// Prepares deoptimization.
-class DeoptimizeStackVisitor FINAL : public StackVisitor {
+class DeoptimizeStackVisitor final : public StackVisitor {
public:
DeoptimizeStackVisitor(Thread* self,
Context* context,
@@ -399,7 +399,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
if (method == nullptr || single_frame_done_) {
@@ -667,14 +667,14 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
}
// Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
+class DumpFramesWithTypeStackVisitor final : public StackVisitor {
public:
explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
show_details_(show_details) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
if (show_details_) {
LOG(INFO) << "|> pc = " << std::hex << GetCurrentQuickFramePc();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index facebda953..6878cc08c8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -216,7 +216,6 @@ Runtime::Runtime()
must_relocate_(false),
is_concurrent_gc_enabled_(true),
is_explicit_gc_disabled_(false),
- dex2oat_enabled_(true),
image_dex2oat_enabled_(true),
default_stack_size_(0),
heap_(nullptr),
@@ -425,7 +424,7 @@ Runtime::~Runtime() {
low_4gb_arena_pool_.reset();
arena_pool_.reset();
jit_arena_pool_.reset();
- protected_fault_page_.reset();
+ protected_fault_page_.Reset();
MemMap::Shutdown();
// TODO: acquire a static mutex on Runtime to avoid racing.
@@ -1162,18 +1161,17 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
{
constexpr uintptr_t kSentinelAddr =
RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
- protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
- reinterpret_cast<uint8_t*>(kSentinelAddr),
- kPageSize,
- PROT_NONE,
- /* low_4g */ true,
- /* reuse */ false,
- /* error_msg */ nullptr));
- if (protected_fault_page_ == nullptr) {
+ protected_fault_page_ = MemMap::MapAnonymous("Sentinel fault page",
+ reinterpret_cast<uint8_t*>(kSentinelAddr),
+ kPageSize,
+ PROT_NONE,
+ /* low_4g */ true,
+ /* error_msg */ nullptr);
+ if (!protected_fault_page_.IsValid()) {
LOG(WARNING) << "Could not reserve sentinel fault page";
- } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
+ } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
- protected_fault_page_.reset();
+ protected_fault_page_.Reset();
}
}
@@ -1196,7 +1194,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
is_zygote_ = runtime_options.Exists(Opt::Zygote);
is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
- dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
@@ -2639,7 +2636,7 @@ class UpdateEntryPointsClassVisitor : public ClassVisitor {
explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
: instrumentation_(instrumentation) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a98e8a81ed..f0bf7548af 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -29,6 +29,7 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "deoptimization_kind.h"
#include "dex/dex_file_types.h"
@@ -86,7 +87,6 @@ class InternTable;
class IsMarkedVisitor;
class JavaVMExt;
class LinearAlloc;
-class MemMap;
class MonitorList;
class MonitorPool;
class NullPointerHandler;
@@ -142,10 +142,6 @@ class Runtime {
return must_relocate_;
}
- bool IsDex2OatEnabled() const {
- return dex2oat_enabled_ && IsImageDex2OatEnabled();
- }
-
bool IsImageDex2OatEnabled() const {
return image_dex2oat_enabled_;
}
@@ -846,7 +842,6 @@ class Runtime {
bool must_relocate_;
bool is_concurrent_gc_enabled_;
bool is_explicit_gc_disabled_;
- bool dex2oat_enabled_;
bool image_dex2oat_enabled_;
std::string compiler_executable_;
@@ -1090,7 +1085,7 @@ class Runtime {
std::atomic<uint32_t> deoptimization_counts_[
static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
- std::unique_ptr<MemMap> protected_fault_page_;
+ MemMap protected_fault_page_;
uint32_t verifier_logging_threshold_ms_;
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 794ac19c4b..aaedb23e45 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -50,7 +50,7 @@ namespace art {
class RuntimeCallbacksTest : public CommonRuntimeTest {
protected:
- void SetUp() OVERRIDE {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
Thread* self = Thread::Current();
@@ -60,7 +60,7 @@ class RuntimeCallbacksTest : public CommonRuntimeTest {
AddListener();
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
{
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -101,10 +101,10 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
}
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddThreadLifecycleCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveThreadLifecycleCallback(&cb_);
}
@@ -117,7 +117,7 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
};
struct Callback : public ThreadLifecycleCallback {
- void ThreadStart(Thread* self) OVERRIDE {
+ void ThreadStart(Thread* self) override {
if (state == CallbackState::kBase) {
state = CallbackState::kStarted;
stored_self = self;
@@ -126,7 +126,7 @@ class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest
}
}
- void ThreadDeath(Thread* self) OVERRIDE {
+ void ThreadDeath(Thread* self) override {
if (state == CallbackState::kStarted && self == stored_self) {
state = CallbackState::kDied;
} else {
@@ -190,19 +190,18 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava)
TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
std::string error_msg;
- std::unique_ptr<MemMap> stack(MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
- nullptr,
- 128 * kPageSize, // Just some small stack.
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- ASSERT_FALSE(stack == nullptr) << error_msg;
+ MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
+ /* addr */ nullptr,
+ 128 * kPageSize, // Just some small stack.
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(stack.IsValid()) << error_msg;
const char* reason = "ThreadLifecycleCallback test thread";
pthread_attr_t attr;
CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
- CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack->Begin(), stack->Size()), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack.Begin(), stack.Size()), reason);
pthread_t pthread;
CHECK_PTHREAD_CALL(pthread_create,
(&pthread,
@@ -220,10 +219,10 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttac
class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddClassLoadCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveClassLoadCallback(&cb_);
}
@@ -253,14 +252,14 @@ class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
}
struct Callback : public ClassLoadCallback {
- virtual void ClassPreDefine(const char* descriptor,
- Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
- Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
- const DexFile& initial_dex_file,
- const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
- /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
- /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void ClassPreDefine(const char* descriptor,
+ Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
+ Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
+ const DexFile& initial_dex_file,
+ const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+ /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
+ /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const std::string& location = initial_dex_file.GetLocation();
std::string event =
std::string("PreDefine:") + descriptor + " <" +
@@ -268,14 +267,14 @@ class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
data.push_back(event);
}
- void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string tmp;
std::string event = std::string("Load:") + klass->GetDescriptor(&tmp);
data.push_back(event);
}
void ClassPrepare(Handle<mirror::Class> temp_klass,
- Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string tmp, tmp2;
std::string event = std::string("Prepare:") + klass->GetDescriptor(&tmp)
+ "[" + temp_klass->GetDescriptor(&tmp2) + "]";
@@ -320,15 +319,15 @@ TEST_F(ClassLoadCallbackRuntimeCallbacksTest, ClassLoadCallback) {
class RuntimeSigQuitCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddRuntimeSigQuitCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimeSigQuitCallback(&cb_);
}
struct Callback : public RuntimeSigQuitCallback {
- void SigQuit() OVERRIDE {
+ void SigQuit() override {
++sigquit_count;
}
@@ -363,20 +362,20 @@ TEST_F(RuntimeSigQuitCallbackRuntimeCallbacksTest, SigQuit) {
class RuntimePhaseCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&cb_);
}
- void TearDown() OVERRIDE {
+ void TearDown() override {
// Bypass RuntimeCallbacksTest::TearDown, as the runtime is already gone.
CommonRuntimeTest::TearDown();
}
struct Callback : public RuntimePhaseCallback {
- void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) OVERRIDE {
+ void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) override {
if (p == RuntimePhaseCallback::RuntimePhase::kInitialAgents) {
if (start_seen > 0 || init_seen > 0 || death_seen > 0) {
LOG(FATAL) << "Unexpected order";
@@ -435,10 +434,10 @@ TEST_F(RuntimePhaseCallbackRuntimeCallbacksTest, Phases) {
class MonitorWaitCallbacksTest : public RuntimeCallbacksTest {
protected:
- void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void AddListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(&cb_);
}
- void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(&cb_);
}
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index ef21f9f9e0..ae1e08f10b 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -21,7 +21,7 @@
// This file defines the list of keys for RuntimeOptions.
// These can be used with RuntimeOptions.Get/Set/etc, for example:
-// RuntimeOptions opt; bool* dex2oat_enabled = opt.Get(RuntimeOptions::Dex2Oat);
+// RuntimeOptions opt; bool* image_dex2oat_enabled = opt.Get(RuntimeOptions::ImageDex2Oat);
//
// Column Descriptions:
// <<Type>> <<Key Name>> <<Default Value>>
@@ -69,7 +69,7 @@ RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
-RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, false)
+RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, true)
RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true)
RUNTIME_OPTIONS_KEY (bool, MadviseRandomAccess, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold)
@@ -88,7 +88,6 @@ RUNTIME_OPTIONS_KEY (std::vector<std::string>, \
RUNTIME_OPTIONS_KEY (std::string, JniTrace)
RUNTIME_OPTIONS_KEY (std::string, PatchOat)
RUNTIME_OPTIONS_KEY (bool, Relocate, kDefaultMustRelocate)
-RUNTIME_OPTIONS_KEY (bool, Dex2Oat, true)
RUNTIME_OPTIONS_KEY (bool, ImageDex2Oat, true)
RUNTIME_OPTIONS_KEY (bool, Interpret, false) // -Xint
// Disable the compiler for CC (for now).
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ce99fb9591..eb9c661d18 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -461,7 +461,7 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
: StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
- bool VisitFrame() OVERRIDE {
+ bool VisitFrame() override {
frames++;
return true;
}
@@ -487,7 +487,7 @@ bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next
next_dex_pc_(0) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (found_frame_) {
ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -520,7 +520,7 @@ void StackVisitor::DescribeStack(Thread* thread) {
explicit DescribeStackVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
return true;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index df7f19d118..19fe4ea7c5 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -25,6 +25,12 @@
#include <sys/resource.h>
#include <sys/time.h>
+#if __has_feature(hwaddress_sanitizer)
+#include <sanitizer/hwasan_interface.h>
+#else
+#define __hwasan_tag_pointer(p, t) (p)
+#endif
+
#include <algorithm>
#include <bitset>
#include <cerrno>
@@ -623,7 +629,9 @@ void Thread::InstallImplicitProtection() {
#endif
volatile char space[kPageSize - (kAsanMultiplier * 256)];
char sink ATTRIBUTE_UNUSED = space[zero]; // NOLINT
- if (reinterpret_cast<uintptr_t>(space) >= target + kPageSize) {
+ // Remove tag from the pointer. Nop in non-hwasan builds.
+ uintptr_t addr = reinterpret_cast<uintptr_t>(__hwasan_tag_pointer(space, 0));
+ if (addr >= target + kPageSize) {
Touch(target);
}
zero *= 2; // Try to avoid tail recursion.
@@ -1486,7 +1494,7 @@ class BarrierClosure : public Closure {
public:
explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
wrapped_->Run(self);
barrier_.Pass(self);
}
@@ -1844,7 +1852,7 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
static constexpr size_t kMaxRepetition = 3u;
VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
ObjPtr<mirror::Class> c = m->GetDeclaringClass();
@@ -1883,24 +1891,24 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
return VisitMethodResult::kContinueMethod;
}
- VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
return VisitMethodResult::kContinueMethod;
}
void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId);
}
void VisitSleepingObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId);
}
void VisitBlockedOnObject(mirror::Object* obj,
ThreadState state,
uint32_t owner_tid)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
const char* msg;
switch (state) {
@@ -1919,7 +1927,7 @@ struct StackDumpVisitor : public MonitorObjectsStackVisitor {
PrintObject(obj, msg, owner_tid);
}
void VisitLockedObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId);
}
@@ -2216,7 +2224,7 @@ class MonitorExitVisitor : public SingleRootVisitor {
// NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ override NO_THREAD_SAFETY_ANALYSIS {
if (self_->HoldsLock(entered_monitor)) {
LOG(WARNING) << "Calling MonitorExit on object "
<< entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
@@ -2845,7 +2853,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
protected:
VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
soaa_, m, GetDexPc(/* abort on error */ false));
@@ -2856,7 +2864,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
return VisitMethodResult::kContinueMethod;
}
- VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
lock_objects_.push_back({});
lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
@@ -2866,24 +2874,24 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu
}
void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitSleepingObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitBlockedOnObject(mirror::Object* obj,
ThreadState state ATTRIBUTE_UNUSED,
uint32_t owner_tid ATTRIBUTE_UNUSED)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
}
void VisitLockedObject(mirror::Object* obj)
- OVERRIDE
+ override
REQUIRES_SHARED(Locks::mutator_lock_) {
frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
}
@@ -3450,7 +3458,7 @@ Context* Thread::GetLongJumpContext() {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
-struct CurrentMethodVisitor FINAL : public StackVisitor {
+struct CurrentMethodVisitor final : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread,
@@ -3461,7 +3469,7 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
method_(nullptr),
dex_pc_(0),
abort_on_error_(abort_on_error) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -3857,7 +3865,7 @@ void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
class VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ override REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyObject(root);
}
};
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ba333f6dd9..cddc275839 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -199,7 +199,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack)
static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
// A closure used by Thread::Dump.
-class DumpCheckpoint FINAL : public Closure {
+class DumpCheckpoint final : public Closure {
public:
DumpCheckpoint(std::ostream* os, bool dump_native_stack)
: os_(os),
@@ -211,7 +211,7 @@ class DumpCheckpoint FINAL : public Closure {
}
}
- void Run(Thread* thread) OVERRIDE {
+ void Run(Thread* thread) override {
// Note thread and self may not be equal if thread was already suspended at the point of the
// request.
Thread* self = Thread::Current();
@@ -1476,6 +1476,9 @@ void ThreadList::Unregister(Thread* self) {
list_.remove(self);
break;
}
+ // In the case where we are not suspended yet, sleep to leave other threads time to execute.
+ // This is important if there are realtime threads. b/111277984
+ usleep(1);
}
// We failed to remove the thread due to a suspend request, loop and try again.
}
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 26ca19054d..28fc59c814 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -46,19 +46,23 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
// Add an inaccessible page to catch stack overflow.
stack_size += kPageSize;
std::string error_msg;
- stack_.reset(MemMap::MapAnonymous(name.c_str(), nullptr, stack_size, PROT_READ | PROT_WRITE,
- false, false, &error_msg));
- CHECK(stack_.get() != nullptr) << error_msg;
- CHECK_ALIGNED(stack_->Begin(), kPageSize);
+ stack_ = MemMap::MapAnonymous(name.c_str(),
+ /* addr */ nullptr,
+ stack_size,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ &error_msg);
+ CHECK(stack_.IsValid()) << error_msg;
+ CHECK_ALIGNED(stack_.Begin(), kPageSize);
CheckedCall(mprotect,
"mprotect bottom page of thread pool worker stack",
- stack_->Begin(),
+ stack_.Begin(),
kPageSize,
PROT_NONE);
const char* reason = "new thread pool worker thread";
pthread_attr_t attr;
CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
- CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_->Begin(), stack_->Size()), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_.Begin(), stack_.Size()), reason);
CHECK_PTHREAD_CALL(pthread_create, (&pthread_, &attr, &Callback, this), reason);
CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 2784953d69..98a1193e72 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -53,8 +53,8 @@ class ThreadPoolWorker {
static const size_t kDefaultStackSize = 1 * MB;
size_t GetStackSize() const {
- DCHECK(stack_.get() != nullptr);
- return stack_->Size();
+ DCHECK(stack_.IsValid());
+ return stack_.Size();
}
virtual ~ThreadPoolWorker();
@@ -71,7 +71,7 @@ class ThreadPoolWorker {
ThreadPool* const thread_pool_;
const std::string name_;
- std::unique_ptr<MemMap> stack_;
+ MemMap stack_;
pthread_t pthread_;
Thread* thread_;
diff --git a/runtime/trace.h b/runtime/trace.h
index 1fae250d77..5d9649320a 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -102,7 +102,7 @@ enum TraceAction {
// Class for recording event traces. Trace data is either collected
// synchronously during execution (TracingMode::kMethodTracingActive),
// or by a separate sampling thread (TracingMode::kSampleProfilingActive).
-class Trace FINAL : public instrumentation::InstrumentationListener {
+class Trace final : public instrumentation::InstrumentationListener {
public:
enum TraceFlag {
kTraceCountAllocs = 1,
@@ -181,57 +181,57 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void MethodExited(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
const JValue& return_value)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void MethodUnwind(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void DexPcMoved(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t new_dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
- OVERRIDE;
+ override;
void FieldRead(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void FieldWritten(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field,
const JValue& field_value)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void ExceptionThrown(Thread* thread,
Handle<mirror::Throwable> exception_object)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void Branch(Thread* thread,
ArtMethod* method,
uint32_t dex_pc,
int32_t dex_pc_offset)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void InvokeVirtualOrInterface(Thread* thread,
Handle<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
- REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) override;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 7adf140218..de6edd2ff3 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -39,7 +39,7 @@ class String;
} // namespace mirror
class InternTable;
-class Transaction FINAL {
+class Transaction final {
public:
static constexpr const char* kAbortExceptionDescriptor = "dalvik.system.TransactionAbortError";
static constexpr const char* kAbortExceptionSignature = "Ldalvik/system/TransactionAbortError;";
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 32aa86dc93..452cd8e359 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -144,23 +144,24 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
mmap_reuse = false;
}
CHECK(!mmap_reuse || mmap_addr != nullptr);
- std::unique_ptr<MemMap> mmap(MemMap::MapFileAtAddress(
+ MemMap mmap = MemMap::MapFileAtAddress(
mmap_addr,
vdex_length,
(writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
unquicken ? MAP_PRIVATE : MAP_SHARED,
file_fd,
- 0 /* start offset */,
+ /* start */ 0u,
low_4gb,
- mmap_reuse,
vdex_filename.c_str(),
- error_msg));
- if (mmap == nullptr) {
+ mmap_reuse,
+ /* reservation */ nullptr,
+ error_msg);
+ if (!mmap.IsValid()) {
*error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
return nullptr;
}
- std::unique_ptr<VdexFile> vdex(new VdexFile(mmap.release()));
+ std::unique_ptr<VdexFile> vdex(new VdexFile(std::move(mmap)));
if (!vdex->IsValid()) {
*error_msg = "Vdex file is not valid";
return nullptr;
@@ -175,7 +176,7 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
/* decompile_return_instruction */ false);
// Update the quickening info size to pretend there isn't any.
size_t offset = vdex->GetDexSectionHeaderOffset();
- reinterpret_cast<DexSectionHeader*>(vdex->mmap_->Begin() + offset)->quickening_info_size_ = 0;
+ reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
}
*error_msg = "Success";
@@ -299,10 +300,6 @@ void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
decompile_return_instruction);
}
}
- method.UnHideAccessFlags();
- }
- for (const ClassAccessor::Field& field : class_accessor.GetFields()) {
- field.UnHideAccessFlags();
}
}
}
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 866a57e7d2..a39ec3128f 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -153,7 +153,7 @@ class VdexFile {
typedef uint32_t VdexChecksum;
using QuickeningTableOffsetType = uint32_t;
- explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
+ explicit VdexFile(MemMap&& mmap) : mmap_(std::move(mmap)) {}
// Returns nullptr if the vdex file cannot be opened or is not valid.
// The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
@@ -215,9 +215,9 @@ class VdexFile {
error_msg);
}
- const uint8_t* Begin() const { return mmap_->Begin(); }
- const uint8_t* End() const { return mmap_->End(); }
- size_t Size() const { return mmap_->Size(); }
+ const uint8_t* Begin() const { return mmap_.Begin(); }
+ const uint8_t* End() const { return mmap_.End(); }
+ size_t Size() const { return mmap_.Size(); }
const VerifierDepsHeader& GetVerifierDepsHeader() const {
return *reinterpret_cast<const VerifierDepsHeader*>(Begin());
@@ -260,7 +260,7 @@ class VdexFile {
}
bool IsValid() const {
- return mmap_->Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
+ return mmap_.Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
}
// This method is for iterating over the dex files in the vdex. If `cursor` is null,
@@ -328,7 +328,7 @@ class VdexFile {
return DexBegin() + GetDexSectionHeader().GetDexSize();
}
- std::unique_ptr<MemMap> mmap_;
+ MemMap mmap_;
DISALLOW_COPY_AND_ASSIGN(VdexFile);
};
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index e67067cdde..e5e71a4d07 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -25,7 +25,7 @@
namespace art {
namespace verifier {
-class InstructionFlags FINAL {
+class InstructionFlags final {
public:
InstructionFlags() : flags_(0) {}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 29da376091..3099b231e2 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -378,11 +378,11 @@ class RegType {
};
// Bottom type.
-class ConflictType FINAL : public RegType {
+class ConflictType final : public RegType {
public:
- bool IsConflict() const OVERRIDE { return true; }
+ bool IsConflict() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static const ConflictType* GetInstance() PURE;
@@ -396,7 +396,7 @@ class ConflictType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kConflict;
}
@@ -414,11 +414,11 @@ class ConflictType FINAL : public RegType {
// A variant of the bottom type used to specify an undefined value in the
// incoming registers.
// Merging with UndefinedType yields ConflictType which is the true bottom.
-class UndefinedType FINAL : public RegType {
+class UndefinedType final : public RegType {
public:
- bool IsUndefined() const OVERRIDE { return true; }
+ bool IsUndefined() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static const UndefinedType* GetInstance() PURE;
@@ -432,7 +432,7 @@ class UndefinedType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -453,7 +453,7 @@ class PrimitiveType : public RegType {
const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
};
class Cat1Type : public PrimitiveType {
@@ -462,10 +462,10 @@ class Cat1Type : public PrimitiveType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
-class IntegerType FINAL : public Cat1Type {
+class IntegerType final : public Cat1Type {
public:
- bool IsInteger() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInteger() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const IntegerType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -473,7 +473,7 @@ class IntegerType FINAL : public Cat1Type {
static const IntegerType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kInteger;
}
@@ -487,10 +487,10 @@ class IntegerType FINAL : public Cat1Type {
static const IntegerType* instance_;
};
-class BooleanType FINAL : public Cat1Type {
+class BooleanType final : public Cat1Type {
public:
- bool IsBoolean() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsBoolean() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const BooleanType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -498,7 +498,7 @@ class BooleanType FINAL : public Cat1Type {
static const BooleanType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kBoolean;
}
@@ -513,10 +513,10 @@ class BooleanType FINAL : public Cat1Type {
static const BooleanType* instance_;
};
-class ByteType FINAL : public Cat1Type {
+class ByteType final : public Cat1Type {
public:
- bool IsByte() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsByte() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const ByteType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -524,7 +524,7 @@ class ByteType FINAL : public Cat1Type {
static const ByteType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kByte;
}
@@ -538,10 +538,10 @@ class ByteType FINAL : public Cat1Type {
static const ByteType* instance_;
};
-class ShortType FINAL : public Cat1Type {
+class ShortType final : public Cat1Type {
public:
- bool IsShort() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsShort() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const ShortType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -549,7 +549,7 @@ class ShortType FINAL : public Cat1Type {
static const ShortType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kShort;
}
@@ -562,10 +562,10 @@ class ShortType FINAL : public Cat1Type {
static const ShortType* instance_;
};
-class CharType FINAL : public Cat1Type {
+class CharType final : public Cat1Type {
public:
- bool IsChar() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsChar() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const CharType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -573,7 +573,7 @@ class CharType FINAL : public Cat1Type {
static const CharType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kChar;
}
@@ -587,10 +587,10 @@ class CharType FINAL : public Cat1Type {
static const CharType* instance_;
};
-class FloatType FINAL : public Cat1Type {
+class FloatType final : public Cat1Type {
public:
- bool IsFloat() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsFloat() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
static const FloatType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -598,7 +598,7 @@ class FloatType FINAL : public Cat1Type {
static const FloatType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kFloat;
}
@@ -619,11 +619,11 @@ class Cat2Type : public PrimitiveType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
-class LongLoType FINAL : public Cat2Type {
+class LongLoType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsLongLo() const OVERRIDE { return true; }
- bool IsLong() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsLongLo() const override { return true; }
+ bool IsLong() const override { return true; }
static const LongLoType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -631,7 +631,7 @@ class LongLoType FINAL : public Cat2Type {
static const LongLoType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kLongLo;
}
@@ -645,10 +645,10 @@ class LongLoType FINAL : public Cat2Type {
static const LongLoType* instance_;
};
-class LongHiType FINAL : public Cat2Type {
+class LongHiType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsLongHi() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsLongHi() const override { return true; }
static const LongHiType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -656,7 +656,7 @@ class LongHiType FINAL : public Cat2Type {
static const LongHiType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -670,11 +670,11 @@ class LongHiType FINAL : public Cat2Type {
static const LongHiType* instance_;
};
-class DoubleLoType FINAL : public Cat2Type {
+class DoubleLoType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsDoubleLo() const OVERRIDE { return true; }
- bool IsDouble() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsDoubleLo() const override { return true; }
+ bool IsDouble() const override { return true; }
static const DoubleLoType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -682,7 +682,7 @@ class DoubleLoType FINAL : public Cat2Type {
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kDoubleLo;
}
@@ -696,10 +696,10 @@ class DoubleLoType FINAL : public Cat2Type {
static const DoubleLoType* instance_;
};
-class DoubleHiType FINAL : public Cat2Type {
+class DoubleHiType final : public Cat2Type {
public:
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsDoubleHi() const OVERRIDE { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsDoubleHi() const override { return true; }
static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
@@ -707,7 +707,7 @@ class DoubleHiType FINAL : public Cat2Type {
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -751,30 +751,30 @@ class ConstantType : public RegType {
}
}
- bool IsZero() const OVERRIDE {
+ bool IsZero() const override {
return IsPreciseConstant() && ConstantValue() == 0;
}
- bool IsOne() const OVERRIDE {
+ bool IsOne() const override {
return IsPreciseConstant() && ConstantValue() == 1;
}
- bool IsConstantChar() const OVERRIDE {
+ bool IsConstantChar() const override {
return IsConstant() && ConstantValue() >= 0 &&
ConstantValue() <= std::numeric_limits<uint16_t>::max();
}
- bool IsConstantByte() const OVERRIDE {
+ bool IsConstantByte() const override {
return IsConstant() &&
ConstantValue() >= std::numeric_limits<int8_t>::min() &&
ConstantValue() <= std::numeric_limits<int8_t>::max();
}
- bool IsConstantShort() const OVERRIDE {
+ bool IsConstantShort() const override {
return IsConstant() &&
ConstantValue() >= std::numeric_limits<int16_t>::min() &&
ConstantValue() <= std::numeric_limits<int16_t>::max();
}
- virtual bool IsConstantTypes() const OVERRIDE { return true; }
+ bool IsConstantTypes() const override { return true; }
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
@@ -782,7 +782,7 @@ class ConstantType : public RegType {
const uint32_t constant_;
};
-class PreciseConstType FINAL : public ConstantType {
+class PreciseConstType final : public ConstantType {
public:
PreciseConstType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -790,94 +790,94 @@ class PreciseConstType FINAL : public ConstantType {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstant() const OVERRIDE { return true; }
+ bool IsPreciseConstant() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class PreciseConstLoType FINAL : public ConstantType {
+class PreciseConstLoType final : public ConstantType {
public:
PreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPreciseConstantLo() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class PreciseConstHiType FINAL : public ConstantType {
+class PreciseConstHiType final : public ConstantType {
public:
PreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsPreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPreciseConstantHi() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstType FINAL : public ConstantType {
+class ImpreciseConstType final : public ConstantType {
public:
ImpreciseConstType(uint32_t constat, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constat, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstant() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstLoType FINAL : public ConstantType {
+class ImpreciseConstLoType final : public ConstantType {
public:
ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstantLo() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
-class ImpreciseConstHiType FINAL : public ConstantType {
+class ImpreciseConstHiType final : public ConstantType {
public:
ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {
CheckConstructorInvariants(this);
}
- bool IsImpreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsImpreciseConstantHi() const override { return true; }
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kNotAssignable;
}
};
// Special "null" type that captures the semantics of null / bottom.
-class NullType FINAL : public RegType {
+class NullType final : public RegType {
public:
- bool IsNull() const OVERRIDE {
+ bool IsNull() const override {
return true;
}
@@ -892,15 +892,15 @@ class NullType FINAL : public RegType {
static void Destroy();
- std::string Dump() const OVERRIDE {
+ std::string Dump() const override {
return "null";
}
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
- bool IsConstantTypes() const OVERRIDE {
+ bool IsConstantTypes() const override {
return true;
}
@@ -925,15 +925,15 @@ class UninitializedType : public RegType {
uint16_t cache_id)
: RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
- bool IsUninitializedTypes() const OVERRIDE;
- bool IsNonZeroReferenceTypes() const OVERRIDE;
+ bool IsUninitializedTypes() const override;
+ bool IsNonZeroReferenceTypes() const override;
uint32_t GetAllocationPc() const {
DCHECK(IsUninitializedTypes());
return allocation_pc_;
}
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
@@ -942,7 +942,7 @@ class UninitializedType : public RegType {
};
// Similar to ReferenceType but not yet having been passed to a constructor.
-class UninitializedReferenceType FINAL : public UninitializedType {
+class UninitializedReferenceType final : public UninitializedType {
public:
UninitializedReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -953,16 +953,16 @@ class UninitializedReferenceType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUninitializedReference() const OVERRIDE { return true; }
+ bool IsUninitializedReference() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a
// constructor.
-class UnresolvedUninitializedRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedRefType final : public UninitializedType {
public:
UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
@@ -971,19 +971,19 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedAndUninitializedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// Similar to UninitializedReferenceType but special case for the this argument
// of a constructor.
-class UninitializedThisReferenceType FINAL : public UninitializedType {
+class UninitializedThisReferenceType final : public UninitializedType {
public:
UninitializedThisReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -993,17 +993,17 @@ class UninitializedThisReferenceType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
+ bool IsUninitializedThisReference() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
-class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedThisRefType final : public UninitializedType {
public:
UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
@@ -1012,19 +1012,19 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
+ bool IsUnresolvedAndUninitializedThisReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// A type of register holding a reference to an Object of type GetClass or a
// sub-class.
-class ReferenceType FINAL : public RegType {
+class ReferenceType final : public RegType {
public:
ReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
@@ -1033,15 +1033,15 @@ class ReferenceType FINAL : public RegType {
CheckConstructorInvariants(this);
}
- bool IsReference() const OVERRIDE { return true; }
+ bool IsReference() const override { return true; }
- bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+ bool IsNonZeroReferenceTypes() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1049,22 +1049,22 @@ class ReferenceType FINAL : public RegType {
// A type of register holding a reference to an Object of type GetClass and only
// an object of that
// type.
-class PreciseReferenceType FINAL : public RegType {
+class PreciseReferenceType final : public RegType {
public:
PreciseReferenceType(ObjPtr<mirror::Class> klass,
const StringPiece& descriptor,
uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsPreciseReference() const OVERRIDE { return true; }
+ bool IsPreciseReference() const override { return true; }
- bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+ bool IsNonZeroReferenceTypes() const override { return true; }
- bool HasClassVirtual() const OVERRIDE { return true; }
+ bool HasClassVirtual() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1076,9 +1076,9 @@ class UnresolvedType : public RegType {
REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
- bool IsNonZeroReferenceTypes() const OVERRIDE;
+ bool IsNonZeroReferenceTypes() const override;
- AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ AssignmentType GetAssignmentTypeImpl() const override {
return AssignmentType::kReference;
}
};
@@ -1086,7 +1086,7 @@ class UnresolvedType : public RegType {
// Similar to ReferenceType except the Class couldn't be loaded. Assignability
// and other tests made
// of this type must be conservative.
-class UnresolvedReferenceType FINAL : public UnresolvedType {
+class UnresolvedReferenceType final : public UnresolvedType {
public:
UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1094,18 +1094,18 @@ class UnresolvedReferenceType FINAL : public UnresolvedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
};
// Type representing the super-class of an unresolved type.
-class UnresolvedSuperClass FINAL : public UnresolvedType {
+class UnresolvedSuperClass final : public UnresolvedType {
public:
UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
uint16_t cache_id)
@@ -1116,19 +1116,19 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
CheckConstructorInvariants(this);
}
- bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
+ bool IsUnresolvedSuperClass() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
uint16_t GetUnresolvedSuperClassChildId() const {
DCHECK(IsUnresolvedSuperClass());
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
@@ -1136,7 +1136,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
// A merge of unresolved (and resolved) types. If the types were resolved this may be
// Conflict or another known ReferenceType.
-class UnresolvedMergedType FINAL : public UnresolvedType {
+class UnresolvedMergedType final : public UnresolvedType {
public:
// Note: the constructor will copy the unresolved BitVector, not use it directly.
UnresolvedMergedType(const RegType& resolved,
@@ -1154,17 +1154,17 @@ class UnresolvedMergedType FINAL : public UnresolvedType {
return unresolved_types_;
}
- bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
+ bool IsUnresolvedMergedReference() const override { return true; }
- bool IsUnresolvedTypes() const OVERRIDE { return true; }
+ bool IsUnresolvedTypes() const override { return true; }
- bool IsArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
- std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
const RegTypeCache* const reg_type_cache_;
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 15a38f3fd7..0430d205af 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -1042,7 +1042,7 @@ TEST_F(RegTypeTest, ConstPrecision) {
class RegTypeOOMTest : public RegTypeTest {
protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ void SetUpRuntimeOptions(RuntimeOptions *options) override {
SetUpRuntimeOptionsForFillHeap(options);
// We must not appear to be a compiler, or we'll abort on the host.
diff --git a/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h
index 8b665292af..e726500452 100644
--- a/simulator/code_simulator_arm64.h
+++ b/simulator/code_simulator_arm64.h
@@ -36,11 +36,11 @@ class CodeSimulatorArm64 : public CodeSimulator {
static CodeSimulatorArm64* CreateCodeSimulatorArm64();
virtual ~CodeSimulatorArm64();
- void RunFrom(intptr_t code_buffer) OVERRIDE;
+ void RunFrom(intptr_t code_buffer) override;
- bool GetCReturnBool() const OVERRIDE;
- int32_t GetCReturnInt32() const OVERRIDE;
- int64_t GetCReturnInt64() const OVERRIDE;
+ bool GetCReturnBool() const override;
+ int32_t GetCReturnInt32() const override;
+ int64_t GetCReturnInt64() const override;
private:
CodeSimulatorArm64();
diff --git a/test/071-dexfile-map-clean/run b/test/071-dexfile-map-clean/run
index 9c100ec497..afa2ff7462 100755
--- a/test/071-dexfile-map-clean/run
+++ b/test/071-dexfile-map-clean/run
@@ -14,13 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Run without dex2oat so that we don't create oat/vdex files
-# when trying to load the secondary dex file.
-
-# In this way, the secondary dex file will be forced to be
-# loaded directly.
-#
-# In addition, make sure we call 'sync'
+# Make sure we call 'sync'
# before executing dalvikvm because otherwise
# it's highly likely the pushed JAR files haven't
# been committed to permanent storage yet,
@@ -28,4 +22,4 @@
# the memory is dirty (despite being file-backed).
# (Note: this was reproducible 100% of the time on
# a target angler device).
-./default-run "$@" --no-dex2oat --sync
+./default-run "$@" --sync
diff --git a/test/071-dexfile-map-clean/src/Main.java b/test/071-dexfile-map-clean/src/Main.java
index 8a196dd52e..e784ac601c 100644
--- a/test/071-dexfile-map-clean/src/Main.java
+++ b/test/071-dexfile-map-clean/src/Main.java
@@ -90,19 +90,6 @@ public class Main {
return true;
}
- // This test takes relies on dex2oat being skipped.
- // (enforced in 'run' file by using '--no-dex2oat'
- //
- // This could happen in a non-test situation
- // if a secondary dex file is loaded (but not yet maintenance-mode compiled)
- // with JIT.
- //
- // Or it could also happen if a secondary dex file is loaded and forced
- // into running into the interpreter (e.g. duplicate classes).
- //
- // Rather than relying on those weird fallbacks,
- // we force the runtime not to dex2oat the dex file to ensure
- // this test is repeatable and less brittle.
private static void testDexMemoryMaps() throws Exception {
// Ensure that the secondary dex file is mapped clean (directly from JAR file).
String smaps = new String(Files.readAllBytes(Paths.get("/proc/self/smaps")));
diff --git a/test/1000-non-moving-space-stress/expected.txt b/test/1000-non-moving-space-stress/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/1000-non-moving-space-stress/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/1000-non-moving-space-stress/info.txt b/test/1000-non-moving-space-stress/info.txt
new file mode 100644
index 0000000000..a20459f900
--- /dev/null
+++ b/test/1000-non-moving-space-stress/info.txt
@@ -0,0 +1,5 @@
+Regression test for a bug that used to trigger GC crashes during a
+sticky-bit CC (young-generation) collection involving an unreachable
+newly allocated object in the non-moving space with a dangling
+reference to an object cleared or moved from a newly allocated region
+of the region space.
diff --git a/test/1000-non-moving-space-stress/src-art/Main.java b/test/1000-non-moving-space-stress/src-art/Main.java
new file mode 100644
index 0000000000..18bfdd3c04
--- /dev/null
+++ b/test/1000-non-moving-space-stress/src-art/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.VMRuntime;
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ VMRuntime runtime = VMRuntime.getRuntime();
+
+ try {
+ int N = 1024 * 1024;
+ int S = 512;
+ for (int n = 0; n < N; ++n) {
+ // Allocate unreachable objects.
+ $noinline$Alloc(runtime);
+ // Allocate an object with a substantial size to increase memory
+ // pressure and eventually trigger non-explicit garbage collection
+ // (explicit garbage collections triggered by java.lang.Runtime.gc()
+ // are always full GCs). Upon garbage collection, the objects
+ // allocated in $noinline$Alloc used to trigger a crash.
+ Object[] moving_array = new Object[S];
+ }
+ } catch (OutOfMemoryError e) {
+ // Stop here.
+ }
+ System.out.println("passed");
+ }
+
+ // When using the Concurrent Copying (CC) collector (default collector),
+ // this method allocates an object in the non-moving space and an object
+ // in the region space, make the former reference the later, and returns
+ // nothing (so that none of these objects are reachable when upon return).
+ static void $noinline$Alloc(VMRuntime runtime) {
+ Object[] non_moving_array = (Object[]) runtime.newNonMovableArray(Object.class, 1);
+ // Small object, unlikely to trigger garbage collection.
+ non_moving_array[0] = new Object();
+ }
+
+}
diff --git a/test/116-nodex2oat/expected.txt b/test/116-nodex2oat/expected.txt
index 157dfc4ea4..c6c7daa21d 100644
--- a/test/116-nodex2oat/expected.txt
+++ b/test/116-nodex2oat/expected.txt
@@ -1,9 +1,2 @@
-Run -Xnodex2oat
JNI_OnLoad called
-Has oat is false, is dex2oat enabled is false.
-Run -Xdex2oat
-JNI_OnLoad called
-Has oat is true, is dex2oat enabled is true.
-Run default
-JNI_OnLoad called
-Has oat is true, is dex2oat enabled is true.
+Has oat is false.
diff --git a/test/116-nodex2oat/run b/test/116-nodex2oat/run
index d7984cece2..9063685fe7 100755
--- a/test/116-nodex2oat/run
+++ b/test/116-nodex2oat/run
@@ -24,20 +24,4 @@ if [[ "${flags}" == *--prebuild* || "${flags}" != *--no-prebuild* ]] ; then
exit 1
fi
-# Make sure we can run without an oat file.
-echo "Run -Xnodex2oat"
-${RUN} ${flags} --runtime-option -Xnodex2oat
-return_status1=$?
-
-# Make sure we can run with the oat file.
-echo "Run -Xdex2oat"
-${RUN} ${flags} --runtime-option -Xdex2oat
-return_status2=$?
-
-# Make sure we can run with the default settings.
-echo "Run default"
${RUN} ${flags}
-return_status3=$?
-
-# Make sure we don't silently ignore an early failure.
-(exit $return_status1) && (exit $return_status2) && (exit $return_status3)
diff --git a/test/116-nodex2oat/src/Main.java b/test/116-nodex2oat/src/Main.java
index 229735f4b8..5491c492a9 100644
--- a/test/116-nodex2oat/src/Main.java
+++ b/test/116-nodex2oat/src/Main.java
@@ -17,17 +17,8 @@
public class Main {
public static void main(String[] args) {
System.loadLibrary(args[0]);
- System.out.println(
- "Has oat is " + hasOatFile() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
-
- if (hasOatFile() && !isDex2OatEnabled()) {
- throw new Error("Application with dex2oat disabled runs with an oat file");
- } else if (!hasOatFile() && isDex2OatEnabled()) {
- throw new Error("Application with dex2oat enabled runs without an oat file");
- }
+ System.out.println("Has oat is " + hasOatFile() + ".");
}
private native static boolean hasOatFile();
-
- private native static boolean isDex2OatEnabled();
}
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
index 0cd4715d09..7a24e31775 100644
--- a/test/117-nopatchoat/expected.txt
+++ b/test/117-nopatchoat/expected.txt
@@ -1,12 +1,3 @@
-Run without dex2oat/patchoat
JNI_OnLoad called
-dex2oat & patchoat are disabled, has oat is true, has executable oat is expected.
-This is a function call
-Run with dexoat/patchoat
-JNI_OnLoad called
-dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
-This is a function call
-Run default
-JNI_OnLoad called
-dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
+Has oat is true, has executable oat is expected.
This is a function call
diff --git a/test/117-nopatchoat/run b/test/117-nopatchoat/run
index 0627fe5069..4c33f7a450 100755
--- a/test/117-nopatchoat/run
+++ b/test/117-nopatchoat/run
@@ -34,20 +34,4 @@ if [[ "${flags}" == *--no-relocate* ]] ; then
exit 1
fi
-# Make sure we can run without relocation
-echo "Run without dex2oat/patchoat"
-${RUN} ${flags} --runtime-option -Xnodex2oat
-return_status1=$?
-
-# Make sure we can run with the oat file.
-echo "Run with dexoat/patchoat"
-${RUN} ${flags} --runtime-option -Xdex2oat
-return_status2=$?
-
-# Make sure we can run with the default settings.
-echo "Run default"
${RUN} ${flags}
-return_status3=$?
-
-# Make sure we don't silently ignore an early failure.
-(exit $return_status1) && (exit $return_status2) && (exit $return_status3)
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
index 816eb171a4..ef47ab9ee4 100644
--- a/test/117-nopatchoat/src/Main.java
+++ b/test/117-nopatchoat/src/Main.java
@@ -23,18 +23,13 @@ public class Main {
// Hitting this condition should be rare and ideally we would prevent it from happening but
// there is no way to do so without major changes to the run-test framework.
boolean executable_correct = (needsRelocation() ?
- hasExecutableOat() == (isDex2OatEnabled() || isRelocationDeltaZero()) :
+ hasExecutableOat() == isRelocationDeltaZero() :
hasExecutableOat() == true);
System.out.println(
- "dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
- ", has oat is " + hasOatFile() + ", has executable oat is " + (
+ "Has oat is " + hasOatFile() + ", has executable oat is " + (
executable_correct ? "expected" : "not expected") + ".");
- if (!hasOatFile() && isDex2OatEnabled()) {
- throw new Error("Application with dex2oat enabled runs without an oat file");
- }
-
System.out.println(functionCall());
}
@@ -47,8 +42,6 @@ public class Main {
return ret.substring(0, ret.length() - 1);
}
- private native static boolean isDex2OatEnabled();
-
private native static boolean needsRelocation();
private native static boolean hasOatFile();
diff --git a/test/118-noimage-dex2oat/run b/test/118-noimage-dex2oat/run
index e1e2577ae3..d68b0a0b2c 100644
--- a/test/118-noimage-dex2oat/run
+++ b/test/118-noimage-dex2oat/run
@@ -47,12 +47,12 @@ bpath_arg="--runtime-option -Xbootclasspath:${bpath}"
# Make sure we can run without an oat file.
echo "Run -Xnoimage-dex2oat"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat --runtime-option -Xnodex2oat
+${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat
return_status1=$?
# Make sure we cannot run without an oat file without fallback.
echo "Run -Xnoimage-dex2oat -Xno-dex-file-fallback"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat --runtime-option -Xnodex2oat \
+${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat \
--runtime-option -Xno-dex-file-fallback
return_status2=$?
diff --git a/test/134-nodex2oat-nofallback/run b/test/134-nodex2oat-nofallback/run
index 33265ac471..359d22dfa0 100755
--- a/test/134-nodex2oat-nofallback/run
+++ b/test/134-nodex2oat-nofallback/run
@@ -17,6 +17,6 @@
flags="${@}"
# Make sure we cannot run without an oat file without fallback.
-${RUN} ${flags} --runtime-option -Xnodex2oat --runtime-option -Xno-dex-file-fallback
+${RUN} ${flags} --runtime-option -Xno-dex-file-fallback
# Suppress the exit value. This isn't expected to be successful.
echo "Exit status:" $?
diff --git a/test/134-nodex2oat-nofallback/src/Main.java b/test/134-nodex2oat-nofallback/src/Main.java
index 086ffb9295..73f67c4e38 100644
--- a/test/134-nodex2oat-nofallback/src/Main.java
+++ b/test/134-nodex2oat-nofallback/src/Main.java
@@ -17,17 +17,8 @@
public class Main {
public static void main(String[] args) {
System.loadLibrary(args[0]);
- System.out.println(
- "Has oat is " + hasOat() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
-
- if (hasOat() && !isDex2OatEnabled()) {
- throw new Error("Application with dex2oat disabled runs with an oat file");
- } else if (!hasOat() && isDex2OatEnabled()) {
- throw new Error("Application with dex2oat enabled runs without an oat file");
- }
+ System.out.println("Has oat is " + hasOat());
}
private native static boolean hasOat();
-
- private native static boolean isDex2OatEnabled();
}
diff --git a/test/138-duplicate-classes-check2/run b/test/138-duplicate-classes-check2/run
deleted file mode 100755
index 8494ad9aad..0000000000
--- a/test/138-duplicate-classes-check2/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# We want to run as no-dex-file-fallback to confirm that even though the -ex file has a symbolic
-# reference to A, there's no class-def, so we don't detect a collision.
-exec ${RUN} --runtime-option -Xno-dex-file-fallback "${@}"
diff --git a/test/147-stripped-dex-fallback/run b/test/147-stripped-dex-fallback/run
index 37c3e1fd88..1f1d22e64e 100755
--- a/test/147-stripped-dex-fallback/run
+++ b/test/147-stripped-dex-fallback/run
@@ -21,4 +21,4 @@ if [[ "${flags}" == *--no-prebuild* ]] ; then
exit 1
fi
-${RUN} ${flags} --strip-dex --runtime-option -Xnodex2oat
+${RUN} ${flags} --strip-dex
diff --git a/test/167-visit-locks/visit_locks.cc b/test/167-visit-locks/visit_locks.cc
index e79c880639..8955f5a08e 100644
--- a/test/167-visit-locks/visit_locks.cc
+++ b/test/167-visit-locks/visit_locks.cc
@@ -42,7 +42,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testVisitLocks(JNIEnv*, jclass) {
: StackVisitor(thread, context, StackWalkKind::kIncludeInlinedFrames) {
}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
// Ignore runtime methods.
diff --git a/test/1945-proxy-method-arguments/get_args.cc b/test/1945-proxy-method-arguments/get_args.cc
index 211ae10ab0..859e229d9e 100644
--- a/test/1945-proxy-method-arguments/get_args.cc
+++ b/test/1945-proxy-method-arguments/get_args.cc
@@ -27,7 +27,7 @@ namespace art {
namespace {
// Visit a proxy method Quick frame at a given depth.
-class GetProxyQuickFrameVisitor FINAL : public StackVisitor {
+class GetProxyQuickFrameVisitor final : public StackVisitor {
public:
GetProxyQuickFrameVisitor(art::Thread* target, art::Context* ctx, size_t frame_depth)
REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -36,7 +36,7 @@ class GetProxyQuickFrameVisitor FINAL : public StackVisitor {
frame_depth_(frame_depth),
quick_frame_(nullptr) {}
- bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true;
}
diff --git a/test/203-multi-checkpoint/multi_checkpoint.cc b/test/203-multi-checkpoint/multi_checkpoint.cc
index 0799b6ed2d..424e9f1a96 100644
--- a/test/203-multi-checkpoint/multi_checkpoint.cc
+++ b/test/203-multi-checkpoint/multi_checkpoint.cc
@@ -28,7 +28,7 @@ struct TestClosure : public Closure {
bool second_run;
bool second_run_interleaved;
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
if (!first_run_start) {
CHECK(!second_run);
@@ -62,7 +62,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_checkCheckpointsRun(JNIEnv*, jclass)
}
struct SetupClosure : public Closure {
- void Run(Thread* self) OVERRIDE {
+ void Run(Thread* self) override {
CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
ScopedObjectAccess soa(self);
MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
index 211d142a2b..db3f1f4bf7 100644
--- a/test/305-other-fault-handler/fault_handler.cc
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -29,21 +29,22 @@
namespace art {
-class TestFaultHandler FINAL : public FaultHandler {
+class TestFaultHandler final : public FaultHandler {
public:
explicit TestFaultHandler(FaultManager* manager)
: FaultHandler(manager),
- map_error_(""),
+ map_error_(),
target_map_(MemMap::MapAnonymous("test-305-mmap",
/* addr */ nullptr,
/* byte_count */ kPageSize,
/* prot */ PROT_NONE,
/* low_4gb */ false,
/* reuse */ false,
+ /* reservation */ nullptr,
/* error_msg */ &map_error_,
/* use_ashmem */ false)),
was_hit_(false) {
- CHECK(target_map_ != nullptr) << "Unable to create segfault target address " << map_error_;
+ CHECK(target_map_.IsValid()) << "Unable to create segfault target address " << map_error_;
manager_->AddHandler(this, /*in_generated_code*/false);
}
@@ -51,7 +52,7 @@ class TestFaultHandler FINAL : public FaultHandler {
manager_->RemoveHandler(this);
}
- bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
CHECK_EQ(sig, SIGSEGV);
CHECK_EQ(reinterpret_cast<uint32_t*>(siginfo->si_addr),
GetTargetPointer()) << "Segfault on unexpected address!";
@@ -59,16 +60,16 @@ class TestFaultHandler FINAL : public FaultHandler {
was_hit_ = true;
LOG(INFO) << "SEGV Caught. mprotecting map.";
- CHECK(target_map_->Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
+ CHECK(target_map_.Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
LOG(INFO) << "Setting value to be read.";
*GetTargetPointer() = kDataValue;
LOG(INFO) << "Changing prot to be read-only.";
- CHECK(target_map_->Protect(PROT_READ)) << "Failed to mprotect R-only";
+ CHECK(target_map_.Protect(PROT_READ)) << "Failed to mprotect R-only";
return true;
}
void CauseSegfault() {
- CHECK_EQ(target_map_->GetProtect(), PROT_NONE);
+ CHECK_EQ(target_map_.GetProtect(), PROT_NONE);
// This will segfault. The handler should deal with it though and we will get a value out of it.
uint32_t data = *GetTargetPointer();
@@ -78,19 +79,19 @@ class TestFaultHandler FINAL : public FaultHandler {
CHECK(was_hit_);
CHECK_EQ(data, kDataValue) << "Unexpected read value from mmap";
- CHECK_EQ(target_map_->GetProtect(), PROT_READ);
+ CHECK_EQ(target_map_.GetProtect(), PROT_READ);
LOG(INFO) << "Success!";
}
private:
uint32_t* GetTargetPointer() {
- return reinterpret_cast<uint32_t*>(target_map_->Begin() + 8);
+ return reinterpret_cast<uint32_t*>(target_map_.Begin() + 8);
}
static constexpr uint32_t kDataValue = 0xDEADBEEF;
std::string map_error_;
- std::unique_ptr<MemMap> target_map_;
+ MemMap target_map_;
bool was_hit_;
};
diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java
index 6b6619fa43..25ec3f53b1 100644
--- a/test/565-checker-condition-liveness/src/Main.java
+++ b/test/565-checker-condition-liveness/src/Main.java
@@ -30,26 +30,52 @@ public class Main {
public static int p(float arg) {
return (arg > 5.0f) ? 0 : -1;
}
+
+ /// CHECK-START-{ARM,ARM64}: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[23,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,23,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,23,25]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[23,25]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+ /// CHECK-DAG: ArrayLength liveness:22
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
+ /// CHECK-DAG: TryBoundary
+
+ /// CHECK-START-{ARM,ARM64}-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,23,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,23,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,23,25]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[23,25]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+ /// CHECK-DAG: ArrayLength liveness:22
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
+ /// CHECK-DAG: TryBoundary
- /// CHECK-START: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
- /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[21,25]
- /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,21,25]
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,21,25]
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[21,25]
+ // X86 and X86_64 generate at use site the ArrayLength, meaning only the BoundsCheck will have environment uses.
+ /// CHECK-START-{X86,X86_64}: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,25]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[25]
/// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
/// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+ /// CHECK-DAG: ArrayLength liveness:22
/// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
/// CHECK-DAG: TryBoundary
- /// CHECK-START-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
- /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,21,25]
- /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,21,25]
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,21,25]
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[21,25]
+ /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,25]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[25]
/// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
/// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+ /// CHECK-DAG: ArrayLength liveness:22
/// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
/// CHECK-DAG: TryBoundary
+
//
// A value live at a throwing instruction in a try block may be copied by
// the exception handler to its location at the top of the catch block.
@@ -60,22 +86,44 @@ public class Main {
}
}
- /// CHECK-START: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-START-{ARM,ARM64}: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
/// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[]
- /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,17,21]
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,17,21]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,19,21]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,19,21]
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[]
/// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
/// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:16
+ /// CHECK-DAG: ArrayLength liveness:18
/// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
- /// CHECK-START-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
- /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,17,21]
- /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,17,21]
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,17,21]
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[17,21]
+ /// CHECK-START-{ARM,ARM64}-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,19,21]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,19,21]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,19,21]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[19,21]
/// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
/// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:16
+ /// CHECK-DAG: ArrayLength liveness:18
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+
+ /// CHECK-START-{X86,X86_64}: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,21]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,21]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:16
+ /// CHECK-DAG: ArrayLength liveness:18
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+
+ /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,21]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,21]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,21]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[21]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:16
+ /// CHECK-DAG: ArrayLength liveness:18
/// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
public static void testBoundsCheck(int x, Object y, int[] a) {
a[1] = x;
@@ -83,20 +131,32 @@ public class Main {
/// CHECK-START: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
/// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[25]
- /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[13,19,25]
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[13,19,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[13,21,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[13,21,25]
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 env_uses:[25]
/// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:12
/// CHECK-DAG: NullCheck env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:18
+ /// CHECK-DAG: ArrayLength liveness:20
+ /// CHECK-DAG: Deoptimize env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
+
+ /// CHECK-START-{ARM,ARM64}-DEBUGGABLE: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[13,21,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[13,21,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[13,21,25]
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 env_uses:[21,25]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:12
+ /// CHECK-DAG: NullCheck env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:18
+ /// CHECK-DAG: ArrayLength liveness:20
/// CHECK-DAG: Deoptimize env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
- /// CHECK-START-DEBUGGABLE: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
- /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[13,19,25]
- /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[13,19,25]
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[13,19,25]
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 env_uses:[19,25]
+ /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[13,21,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[13,21,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[13,21,25]
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 env_uses:[21,25]
/// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:12
/// CHECK-DAG: NullCheck env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:18
+ /// CHECK-DAG: ArrayLength liveness:20
/// CHECK-DAG: Deoptimize env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
//
// A value that's not live in compiled code may still be needed in interpreter,
diff --git a/test/616-cha-unloading/cha_unload.cc b/test/616-cha-unloading/cha_unload.cc
index b17be6bd07..b5166ce1a7 100644
--- a/test/616-cha-unloading/cha_unload.cc
+++ b/test/616-cha-unloading/cha_unload.cc
@@ -35,7 +35,7 @@ class FindPointerAllocatorVisitor : public AllocatorVisitor {
explicit FindPointerAllocatorVisitor(void* ptr) : is_found(false), ptr_(ptr) {}
bool Visit(LinearAlloc* alloc)
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
is_found = alloc->Contains(ptr_);
return !is_found;
}
diff --git a/test/638-checker-inline-cache-intrinsic/run b/test/638-checker-inline-cache-intrinsic/run
index f43681dd56..15403100f5 100644
--- a/test/638-checker-inline-cache-intrinsic/run
+++ b/test/638-checker-inline-cache-intrinsic/run
@@ -14,4 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Set threshold to 100 to math the iterations done in the test.
+# Pass --verbose-methods to only generate the CFG of these methods.
exec ${RUN} --jit --runtime-option -Xjitthreshold:100 -Xcompiler-option --verbose-methods=inlineMonomorphic,knownReceiverType,stringEquals $@
diff --git a/test/652-deopt-intrinsic/run b/test/652-deopt-intrinsic/run
index 97d1ff16bb..1acedf9add 100755
--- a/test/652-deopt-intrinsic/run
+++ b/test/652-deopt-intrinsic/run
@@ -15,4 +15,8 @@
# limitations under the License.
# Ensure this test is not subject to code collection.
-exec ${RUN} "$@" --runtime-option -Xjitinitialsize:32M
+# We also need at least a few invocations of the method Main.$noinline$doCall
+# to ensure the inline cache sees the two types being passed to the method. Pass
+# a large number in case there's some weights on some invocation kinds (eg
+# compiler to interpreter transitions).
+exec ${RUN} "$@" --runtime-option -Xjitinitialsize:32M --runtime-option -Xjitthreshold:1000
diff --git a/test/667-jit-jni-stub/run b/test/667-jit-jni-stub/run
index f235c6bc90..b7ce9132ab 100755
--- a/test/667-jit-jni-stub/run
+++ b/test/667-jit-jni-stub/run
@@ -16,4 +16,4 @@
# Disable AOT compilation of JNI stubs.
# Ensure this test is not subject to unexpected code collection.
-${RUN} "${@}" --no-prebuild --no-dex2oat --runtime-option -Xjitinitialsize:32M
+${RUN} "${@}" --no-prebuild --runtime-option -Xjitinitialsize:32M
diff --git a/test/667-jit-jni-stub/src/Main.java b/test/667-jit-jni-stub/src/Main.java
index 794308d6e1..234c5daf4c 100644
--- a/test/667-jit-jni-stub/src/Main.java
+++ b/test/667-jit-jni-stub/src/Main.java
@@ -18,7 +18,7 @@ public class Main {
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
if (isAotCompiled(Main.class, "hasJit")) {
- throw new Error("This test must be run with --no-prebuild --no-dex2oat!");
+ throw new Error("This test must be run with --no-prebuild!");
}
if (!hasJit()) {
return;
diff --git a/test/677-fsi/expected.txt b/test/677-fsi/expected.txt
index c7fb8fed77..2b073430b6 100644
--- a/test/677-fsi/expected.txt
+++ b/test/677-fsi/expected.txt
@@ -1,3 +1,2 @@
oat file has dex code, but APK has uncompressed dex code
-oat file has dex code, but APK has uncompressed dex code
Hello World
diff --git a/test/677-fsi2/expected.txt b/test/677-fsi2/expected.txt
index de008470fe..557db03de9 100644
--- a/test/677-fsi2/expected.txt
+++ b/test/677-fsi2/expected.txt
@@ -1,4 +1 @@
-Run default
-Hello World
-Run without dex2oat
Hello World
diff --git a/test/677-fsi2/run b/test/677-fsi2/run
index 039a6a78f0..651f082863 100644
--- a/test/677-fsi2/run
+++ b/test/677-fsi2/run
@@ -14,12 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-echo "Run default"
${RUN} $@ --runtime-option -Xonly-use-system-oat-files
-return_status1=$?
-
-echo "Run without dex2oat"
-${RUN} $@ --no-dex2oat --runtime-option -Xonly-use-system-oat-files
-return_status2=$?
-
-(exit $return_status1) && (exit $return_status2)
diff --git a/test/718-zipfile-finalizer/expected.txt b/test/718-zipfile-finalizer/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/718-zipfile-finalizer/expected.txt
diff --git a/test/718-zipfile-finalizer/info.txt b/test/718-zipfile-finalizer/info.txt
new file mode 100644
index 0000000000..c8b827e63c
--- /dev/null
+++ b/test/718-zipfile-finalizer/info.txt
@@ -0,0 +1,2 @@
+Test that ZipFile.finalize doesn't throw exceptions
+in the presence of a not fully constructed instance.
diff --git a/test/718-zipfile-finalizer/src/Main.java b/test/718-zipfile-finalizer/src/Main.java
new file mode 100644
index 0000000000..3eb439b7a6
--- /dev/null
+++ b/test/718-zipfile-finalizer/src/Main.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.zip.ZipFile;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ // By throwing an exception when setting up arguments of
+ // the constructor, we end up with a not fully constructed
+ // ZipFile.
+ try {
+ new ZipFile(null, throwException(), null);
+ throw new Error("Expected Exception");
+ } catch (Exception e) {
+ // expected
+ }
+
+ // Run finalizers. The golden file of this test checks
+ // that no exception is thrown from finalizers.
+ System.gc();
+ System.runFinalization();
+ }
+
+ public static int throwException() throws Exception {
+ throw new Exception();
+ }
+}
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index d2f69ef3de..521f9a6c72 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -87,7 +87,7 @@ extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateThroughHeapCount(
jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
jlong size ATTRIBUTE_UNUSED,
jlong* tag_ptr ATTRIBUTE_UNUSED,
- jint length ATTRIBUTE_UNUSED) OVERRIDE {
+ jint length ATTRIBUTE_UNUSED) override {
counter++;
if (counter == stop_after) {
return JVMTI_VISIT_ABORT;
@@ -120,7 +120,7 @@ extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateThroughHeapData(
jintArray lengths) {
class DataIterationConfig : public IterationConfig {
public:
- jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) OVERRIDE {
+ jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) override {
class_tags_.push_back(class_tag);
sizes_.push_back(size);
tags_.push_back(*tag_ptr);
@@ -164,7 +164,7 @@ extern "C" JNIEXPORT void JNICALL Java_art_Test906_iterateThroughHeapAdd(
jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
jlong size ATTRIBUTE_UNUSED,
jlong* tag_ptr,
- jint length ATTRIBUTE_UNUSED) OVERRIDE {
+ jint length ATTRIBUTE_UNUSED) override {
jlong current_tag = *tag_ptr;
if (current_tag != 0) {
*tag_ptr = current_tag + 10;
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index b07554ca46..b0e0f07db8 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -41,8 +41,6 @@ namespace Test913Heaps {
using android::base::StringPrintf;
-#define FINAL final
-#define OVERRIDE override
#define UNREACHABLE __builtin_unreachable
extern "C" JNIEXPORT void JNICALL Java_art_Test913_forceGarbageCollection(
@@ -144,7 +142,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
jint stop_after,
jint follow_set,
jobject jniRef) {
- class PrintIterationConfig FINAL : public IterationConfig {
+ class PrintIterationConfig final : public IterationConfig {
public:
PrintIterationConfig(jint _stop_after, jint _follow_set)
: counter_(0),
@@ -160,7 +158,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
jlong* tag_ptr,
jlong* referrer_tag_ptr,
jint length,
- void* user_data ATTRIBUTE_UNUSED) OVERRIDE {
+ void* user_data ATTRIBUTE_UNUSED) override {
jlong tag = *tag_ptr;
// Ignore any jni-global roots with untagged classes. These can be from the environment,
@@ -303,7 +301,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
}
protected:
- std::string PrintArrowType() const OVERRIDE {
+ std::string PrintArrowType() const override {
char* name = nullptr;
if (info_.jni_local.method != nullptr) {
jvmti_env->GetMethodName(info_.jni_local.method, &name, nullptr, nullptr);
@@ -349,7 +347,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
}
protected:
- std::string PrintArrowType() const OVERRIDE {
+ std::string PrintArrowType() const override {
char* name = nullptr;
if (info_.stack_local.method != nullptr) {
jvmti_env->GetMethodName(info_.stack_local.method, &name, nullptr, nullptr);
@@ -391,7 +389,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
: Elem(referrer, referree, size, length), string_(string) {}
protected:
- std::string PrintArrowType() const OVERRIDE {
+ std::string PrintArrowType() const override {
return string_;
}
diff --git a/test/916-obsolete-jit/run b/test/916-obsolete-jit/run
index b6d406fd99..c6e62ae6cd 100755
--- a/test/916-obsolete-jit/run
+++ b/test/916-obsolete-jit/run
@@ -14,12 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# We are testing the redefinition of compiled code but with jvmti we only allow
-# jitted compiled code so always add the --jit argument.
-if [[ "$@" == *"--jit"* ]]; then
- other_args=""
-else
- other_args="--jit"
-fi
-./default-run "$@" ${other_args} \
- --jvmti
+./default-run "$@" --jvmti
diff --git a/test/988-method-trace/expected.txt b/test/988-method-trace/expected.txt
index 75ee112c60..b263308573 100644
--- a/test/988-method-trace/expected.txt
+++ b/test/988-method-trace/expected.txt
@@ -70,6 +70,27 @@ fibonacci(30)=832040
fibonacci(5)=5
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> art.Test988$NativeOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$NativeOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$NativeOp.applyAsInt(int)
+..=> static int art.Test988.nativeFibonacci(int)
+..<= static int art.Test988.nativeFibonacci(int) -> <class java.lang.Integer: 5>
+.<= public int art.Test988$NativeOp.applyAsInt(int) -> <class java.lang.Integer: 5>
+.=> public art.Test988$FibResult(java.lang.String,int,int)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibResult(java.lang.String,int,int) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(5)=5
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
=> art.Test988$IterOp()
.=> public java.lang.Object()
.<= public java.lang.Object() -> <null: null>
@@ -147,8 +168,8 @@ fibonacci(5)=5
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
art.Test988.iter_fibonacci(Test988.java:255)
art.Test988$IterOp.applyAsInt(Test988.java:250)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:336)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:344)
<additional hidden frames>
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -167,8 +188,8 @@ fibonacci(5)=5
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
art.Test988.iter_fibonacci(Test988.java:255)
art.Test988$IterOp.applyAsInt(Test988.java:250)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:336)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:344)
<additional hidden frames>
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
@@ -250,8 +271,8 @@ fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
art.Test988.fibonacci(Test988.java:277)
art.Test988$RecurOp.applyAsInt(Test988.java:272)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:337)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:345)
<additional hidden frames>
>
....<= public java.lang.Throwable(java.lang.String) -> <null: null>
@@ -270,8 +291,53 @@ fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
art.Test988.fibonacci(Test988.java:277)
art.Test988$RecurOp.applyAsInt(Test988.java:272)
- art.Test988.doFibTest(Test988.java:378)
- art.Test988.run(Test988.java:337)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:345)
+ <additional hidden frames>
+
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> art.Test988$NativeOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$NativeOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$NativeOp.applyAsInt(int)
+..=> static int art.Test988.nativeFibonacci(int)
+...=> public java.lang.Error(java.lang.String)
+....=> public java.lang.Throwable(java.lang.String)
+.....=> public java.lang.Object()
+.....<= public java.lang.Object() -> <null: null>
+.....=> public static final java.util.List java.util.Collections.emptyList()
+.....<= public static final java.util.List java.util.Collections.emptyList() -> <class java.util.Collections$EmptyList: []>
+.....=> public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace()
+......=> private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
+......<= private static java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
+.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: bad argument
+ art.Test988.nativeFibonacci(Native Method)
+ art.Test988$NativeOp.applyAsInt(Test988.java:287)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:346)
+ <additional hidden frames>
+>
+....<= public java.lang.Throwable(java.lang.String) -> <null: null>
+...<= public java.lang.Error(java.lang.String) -> <null: null>
+..<= static int art.Test988.nativeFibonacci(int) EXCEPTION
+.<= public int art.Test988$NativeOp.applyAsInt(int) EXCEPTION
+.=> public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(-19) -> java.lang.Error: bad argument
+ art.Test988.nativeFibonacci(Native Method)
+ art.Test988$NativeOp.applyAsInt(Test988.java:287)
+ art.Test988.doFibTest(Test988.java:388)
+ art.Test988.run(Test988.java:346)
<additional hidden frames>
.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
diff --git a/test/988-method-trace/src/art/Test988.java b/test/988-method-trace/src/art/Test988.java
index 5720d1d87d..075e075c0f 100644
--- a/test/988-method-trace/src/art/Test988.java
+++ b/test/988-method-trace/src/art/Test988.java
@@ -282,6 +282,13 @@ public class Test988 {
}
}
+ static final class NativeOp implements IntUnaryOperator {
+ public int applyAsInt(int x) {
+ return nativeFibonacci(x);
+ }
+ }
+ static native int nativeFibonacci(int n);
+
static final class TestRunnableInvokeHandler implements InvocationHandler {
public Object invoke(Object proxy, Method m, Object[] args) throws Throwable {
return null;
@@ -333,8 +340,10 @@ public class Test988 {
Thread.currentThread());
doFibTest(30, new IterOp());
doFibTest(5, new RecurOp());
+ doFibTest(5, new NativeOp());
doFibTest(-19, new IterOp());
doFibTest(-19, new RecurOp());
+ doFibTest(-19, new NativeOp());
runnable.run();
@@ -358,6 +367,7 @@ public class Test988 {
ArrayList.class.toString();
RecurOp.class.toString();
IterOp.class.toString();
+ NativeOp.class.toString();
StringBuilder.class.toString();
Runnable.class.toString();
TestRunnableInvokeHandler.class.toString();
diff --git a/libartbase/base/fuchsia_compat.h b/test/988-method-trace/trace_fib.cc
index 018bac0528..682f273ac1 100644
--- a/libartbase/base/fuchsia_compat.h
+++ b/test/988-method-trace/trace_fib.cc
@@ -14,23 +14,28 @@
* limitations under the License.
*/
-#ifndef ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
-#define ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
+#include <jni.h>
-// stubs for features lacking in Fuchsia
+namespace art {
+namespace Test988MethodTrace {
-struct rlimit {
- int rlim_cur;
-};
-
-#define RLIMIT_FSIZE (1)
-#define RLIM_INFINITY (-1)
-static int getrlimit(int resource, struct rlimit *rlim) {
- LOG(FATAL) << "getrlimit not available for Fuchsia";
+extern "C" JNIEXPORT jint JNICALL Java_art_Test988_nativeFibonacci(JNIEnv* env, jclass, jint n) {
+ if (n < 0) {
+ env->ThrowNew(env->FindClass("java/lang/Error"), "bad argument");
+ return -1;
+ } else if (n == 0) {
+ return 0;
+ }
+ jint x = 1;
+ jint y = 1;
+ for (jint i = 3; i <= n; i++) {
+ jint z = x + y;
+ x = y;
+ y = z;
+ }
+ return y;
}
-static int ashmem_create_region(const char *name, size_t size) {
- LOG(FATAL) << "ashmem_create_region not available for Fuchsia";
-}
+} // namespace Test988MethodTrace
+} // namespace art
-#endif // ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
diff --git a/test/Android.bp b/test/Android.bp
index a3de382059..e2656516ef 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -262,6 +262,7 @@ art_cc_defaults {
"984-obsolete-invoke/obsolete_invoke.cc",
"986-native-method-bind/native_bind.cc",
"987-agent-bind/agent_bind.cc",
+ "988-method-trace/trace_fib.cc",
"989-method-trace-throw/method_trace.cc",
"991-field-trace-2/field_trace.cc",
"992-source-data/source_file.cc",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 53d4c372c4..ffaa2cd3c7 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -49,6 +49,7 @@ TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti-target libopenjdkjvmtid-target
TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 9344b24b5d..da79164f12 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -71,13 +71,6 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_runtimeIsSoftFail(JNIEnv* env AT
return Runtime::Current()->IsVerificationSoftFail() ? JNI_TRUE : JNI_FALSE;
}
-// public static native boolean isDex2OatEnabled();
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv* env ATTRIBUTE_UNUSED,
- jclass cls ATTRIBUTE_UNUSED) {
- return Runtime::Current()->IsDex2OatEnabled();
-}
-
// public static native boolean hasImage();
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv* env ATTRIBUTE_UNUSED,
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 192274e5ae..d74d2efa12 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -77,7 +77,7 @@ struct MethodIsInterpretedVisitor : public StackVisitor {
prev_was_runtime_(true),
require_deoptable_(require_deoptable) {}
- virtual bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (goal_ == GetMethod()) {
method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
method_found_ = true;
diff --git a/test/dexdump/bytecodes.txt b/test/dexdump/bytecodes.txt
index e1a381ec09..1ed66e8a10 100644
--- a/test/dexdump/bytecodes.txt
+++ b/test/dexdump/bytecodes.txt
@@ -176,7 +176,7 @@ Class #3 -
ins : 1
outs : 1
insns size : 4 16-bit code units
-0009a8: |[0009a8] com.google.android.test.R.attr.<init>:()V
+0009a8: |[0009a8] com.google.android.test.R$attr.<init>:()V
0009b8: 7010 1900 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019
0009be: 0e00 |0003: return-void
catches : (none)
@@ -228,7 +228,7 @@ Class #4 -
ins : 1
outs : 1
insns size : 4 16-bit code units
-0009c0: |[0009c0] com.google.android.test.R.drawable.<init>:()V
+0009c0: |[0009c0] com.google.android.test.R$drawable.<init>:()V
0009d0: 7010 1900 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019
0009d6: 0e00 |0003: return-void
catches : (none)
diff --git a/test/dexdump/bytecodes.xml b/test/dexdump/bytecodes.xml
index d08c2e929c..d4ee3a7eee 100755
--- a/test/dexdump/bytecodes.xml
+++ b/test/dexdump/bytecodes.xml
@@ -71,7 +71,7 @@
>
</constructor>
</class>
-<class name="R.attr"
+<class name="R$attr"
extends="java.lang.Object"
interface="false"
abstract="false"
@@ -79,15 +79,15 @@
final="true"
visibility="public"
>
-<constructor name="R.attr"
- type="com.google.android.test.R.attr"
+<constructor name="R$attr"
+ type="com.google.android.test.R$attr"
static="false"
final="false"
visibility="public"
>
</constructor>
</class>
-<class name="R.drawable"
+<class name="R$drawable"
extends="java.lang.Object"
interface="false"
abstract="false"
@@ -105,8 +105,8 @@
value="2130837504"
>
</field>
-<constructor name="R.drawable"
- type="com.google.android.test.R.drawable"
+<constructor name="R$drawable"
+ type="com.google.android.test.R$drawable"
static="false"
final="false"
visibility="public"
diff --git a/test/dexdump/checkers.xml b/test/dexdump/checkers.xml
index 4e56ea2d66..3d3bac2d69 100755
--- a/test/dexdump/checkers.xml
+++ b/test/dexdump/checkers.xml
@@ -181,7 +181,7 @@
final="true"
visibility="public"
>
-<parameter name="arg0" type="android.content.SharedPreferences.Editor">
+<parameter name="arg0" type="android.content.SharedPreferences$Editor">
</parameter>
</method>
<method name="a"
diff --git a/test/dexdump/invoke-custom.txt b/test/dexdump/invoke-custom.txt
index cfab248168..1bfa0532b2 100644
--- a/test/dexdump/invoke-custom.txt
+++ b/test/dexdump/invoke-custom.txt
@@ -58,7 +58,7 @@ Class #0 -
ins : 2
outs : 2
insns size : 4 16-bit code units
-001b18: |[001b18] TestBadBootstrapArguments.TestersConstantCallSite.<init>:(Ljava/lang/invoke/MethodHandle;)V
+001b18: |[001b18] TestBadBootstrapArguments$TestersConstantCallSite.<init>:(Ljava/lang/invoke/MethodHandle;)V
001b28: 7020 d200 1000 |0000: invoke-direct {v0, v1}, Ljava/lang/invoke/ConstantCallSite;.<init>:(Ljava/lang/invoke/MethodHandle;)V // method@00d2
001b2e: 0e00 |0003: return-void
catches : (none)
@@ -537,7 +537,7 @@ Class #2 -
ins : 2
outs : 1
insns size : 4 16-bit code units
-002abc: |[002abc] TestInvocationKinds.Widget.<init>:(I)V
+002abc: |[002abc] TestInvocationKinds$Widget.<init>:(I)V
002acc: 7010 bf00 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@00bf
002ad2: 0e00 |0003: return-void
catches : (none)
@@ -586,7 +586,7 @@ Class #3 -
ins : 1
outs : 1
insns size : 4 16-bit code units
-002ee8: |[002ee8] TestInvokeCustomWithConcurrentThreads.1.<init>:()V
+002ee8: |[002ee8] TestInvokeCustomWithConcurrentThreads$1.<init>:()V
002ef8: 7010 cf00 0000 |0000: invoke-direct {v0}, Ljava/lang/ThreadLocal;.<init>:()V // method@00cf
002efe: 0e00 |0003: return-void
catches : (none)
@@ -605,7 +605,7 @@ Class #3 -
ins : 1
outs : 1
insns size : 13 16-bit code units
-002ea0: |[002ea0] TestInvokeCustomWithConcurrentThreads.1.initialValue:()Ljava/lang/Integer;
+002ea0: |[002ea0] TestInvokeCustomWithConcurrentThreads$1.initialValue:()Ljava/lang/Integer;
002eb0: 7100 6500 0000 |0000: invoke-static {}, LTestInvokeCustomWithConcurrentThreads;.access$000:()Ljava/util/concurrent/atomic/AtomicInteger; // method@0065
002eb6: 0c00 |0003: move-result-object v0
002eb8: 6e10 f100 0000 |0004: invoke-virtual {v0}, Ljava/util/concurrent/atomic/AtomicInteger;.getAndIncrement:()I // method@00f1
@@ -628,7 +628,7 @@ Class #3 -
ins : 1
outs : 1
insns size : 5 16-bit code units
-002ecc: |[002ecc] TestInvokeCustomWithConcurrentThreads.1.initialValue:()Ljava/lang/Object;
+002ecc: |[002ecc] TestInvokeCustomWithConcurrentThreads$1.initialValue:()Ljava/lang/Object;
002edc: 6e10 6100 0100 |0000: invoke-virtual {v1}, LTestInvokeCustomWithConcurrentThreads$1;.initialValue:()Ljava/lang/Integer; // method@0061
002ee2: 0c00 |0003: move-result-object v0
002ee4: 1100 |0004: return-object v0
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 713fd35523..bd58ae37ec 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -59,7 +59,7 @@ USE_JVMTI="n"
VERIFY="y" # y=yes,n=no,s=softfail
ZYGOTE=""
DEX_VERIFY=""
-USE_DEX2OAT_AND_PATCHOAT="y"
+USE_PATCHOAT="y"
INSTRUCTION_SET_FEATURES=""
ARGS=""
EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS.
@@ -166,14 +166,9 @@ while true; do
shift
BOOT_IMAGE="$1"
shift
- elif [ "x$1" = "x--no-dex2oat" ]; then
- DEX2OAT="-Xcompiler:${FALSE_BIN}"
- USE_DEX2OAT_AND_PATCHOAT="n"
- PREBUILD="n" # Do not use prebuilt odex, either.
- shift
elif [ "x$1" = "x--no-patchoat" ]; then
PATCHOAT="-Xpatchoat:${FALSE_BIN}"
- USE_DEX2OAT_AND_PATCHOAT="n"
+ USE_PATCHOAT="n"
shift
elif [ "x$1" = "x--relocate" ]; then
RELOCATE="y"
@@ -549,6 +544,7 @@ if [ "$HAVE_IMAGE" = "n" ]; then
fi
bpath="${framework}/core-libart${bpath_suffix}.jar"
bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
+ bpath="${bpath}:${framework}/core-simple${bpath_suffix}.jar"
bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
@@ -588,7 +584,7 @@ if [ "$USE_GDB" = "y" ]; then
fi
if [ "$INTERPRETER" = "y" ]; then
- INT_OPTS="-Xint"
+ INT_OPTS="${INT_OPTS} -Xint"
if [ "$VERIFY" = "y" ] ; then
INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=quicken"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=quicken"
@@ -604,7 +600,7 @@ if [ "$INTERPRETER" = "y" ]; then
fi
if [ "$JIT" = "y" ]; then
- INT_OPTS="-Xusejit:true"
+ INT_OPTS="${INT_OPTS} -Xusejit:true"
if [ "$VERIFY" = "y" ] ; then
INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=quicken"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=quicken"
@@ -613,6 +609,8 @@ if [ "$JIT" = "y" ]; then
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=assume-verified"
DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
fi
+else
+ INT_OPTS="${INT_OPTS} -Xusejit:false"
fi
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
@@ -810,7 +808,7 @@ RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}detect_leaks=0"
if [ "$EXTERNAL_LOG_TAGS" = "n" ]; then
if [ "$DEV_MODE" = "y" ]; then
export ANDROID_LOG_TAGS='*:d'
- elif [ "$USE_DEX2OAT_AND_PATCHOAT" = "n" ]; then
+ elif [ "$USE_PATCHOAT" = "n" ]; then
# All tests would log the error of failing dex2oat/patchoat. Be silent here and only
# log fatal events.
export ANDROID_LOG_TAGS='*:s'
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 7322a35884..0a179c7093 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -76,7 +76,7 @@
},
{
"tests" : "629-vdex-speed",
- "variant": "interp-ac | no-dex2oat | interpreter | jit | relocate-npatchoat",
+ "variant": "interp-ac | interpreter | jit | relocate-npatchoat",
"description": "629 requires compilation."
},
{
@@ -163,7 +163,7 @@
},
{
"tests": "147-stripped-dex-fallback",
- "variant": "no-dex2oat | no-image | relocate-npatchoat",
+ "variant": "no-image | relocate-npatchoat",
"description": ["147-stripped-dex-fallback is disabled because it",
"requires --prebuild."]
},
@@ -174,7 +174,7 @@
"119-noimage-patchoat",
"137-cfi",
"138-duplicate-classes-check2"],
- "variant": "no-dex2oat | no-image | relocate-npatchoat",
+ "variant": "no-image | relocate-npatchoat",
"description": ["All these tests check that we have sane behavior if we",
"don't have a patchoat or dex2oat. Therefore we",
"shouldn't run them in situations where we actually",
@@ -246,7 +246,7 @@
"613-inlining-dex-cache",
"626-set-resolved-string",
"638-checker-inline-cache-intrinsic"],
- "variant": "trace | stream",
+ "variant": "trace | stream",
"description": ["These tests expect JIT compilation, which is",
"suppressed when tracing."]
},
@@ -257,6 +257,11 @@
"suppressed when tracing."]
},
{
+ "tests": "638-checker-inline-cache-intrinsic",
+ "variant": "interpreter | interp-ac",
+ "description": ["Test expects JIT compilation"]
+ },
+ {
"tests": "597-deopt-invoke-stub",
"variant": "speed-profile | interp-ac | interpreter | optimizing | trace | stream",
"description": ["This test expects JIT compilation and no AOT for",
@@ -644,9 +649,9 @@
},
{
"tests": "660-clinit",
- "variant": "no-image | no-dex2oat | no-prebuild | jvmti-stress | redefine-stress",
+ "variant": "no-image | no-prebuild | jvmti-stress | redefine-stress",
"description": ["Tests <clinit> for app images, which --no-image, --no-prebuild, ",
- "--no-dex2oat, and --redefine-stress do not create"]
+ "and --redefine-stress do not create"]
},
{
"tests": ["961-default-iface-resolution-gen",
@@ -669,7 +674,7 @@
},
{
"tests": "661-oat-writer-layout",
- "variant": "interp-ac | interpreter | jit | no-dex2oat | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
+ "variant": "interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
"description": ["Test is designed to only check --compiler-filter=speed"]
},
{
@@ -986,6 +991,7 @@
"678-quickening",
"679-locks",
"999-redefine-hiddenapi",
+ "1000-non-moving-space-stress",
"1951-monitor-enter-no-suspend"],
"variant": "jvm",
"description": ["Doesn't run on RI."]
@@ -1004,7 +1010,7 @@
},
{
"tests": "677-fsi",
- "variant": "no-dex2oat | no-image | no-prebuild | relocate-npatchoat | jvm",
+ "variant": "no-image | no-prebuild | relocate-npatchoat | jvm",
"description": ["Test requires a successful dex2oat invocation"]
},
{
@@ -1038,12 +1044,6 @@
"description": ["Test timing out under gcstress possibly due to slower unwinding by libbacktrace"]
},
{
- "tests": ["624-checker-stringops"],
- "variant": "optimizing & gcstress | speed-profile & gcstress",
- "bug": "b/111545159",
- "description": ["Seem to expose some error with our gc when run in these configurations"]
- },
- {
"tests": ["021-string2"],
"variant": "jit & debuggable",
"bug": "b/109791792",
diff --git a/test/run-test b/test/run-test
index d90eccdf75..ef173026c1 100755
--- a/test/run-test
+++ b/test/run-test
@@ -148,7 +148,6 @@ jvmti_redefine_stress="false"
strace="false"
always_clean="no"
never_clean="no"
-have_dex2oat="yes"
have_patchoat="yes"
have_image="yes"
multi_image_suffix=""
@@ -195,9 +194,6 @@ while true; do
lib="libdvm.so"
runtime="dalvik"
shift
- elif [ "x$1" = "x--no-dex2oat" ]; then
- have_dex2oat="no"
- shift
elif [ "x$1" = "x--no-patchoat" ]; then
have_patchoat="no"
shift
@@ -580,10 +576,6 @@ if [ "$have_patchoat" = "no" ]; then
run_args="${run_args} --no-patchoat"
fi
-if [ "$have_dex2oat" = "no" ]; then
- run_args="${run_args} --no-dex2oat"
-fi
-
if [ ! "$runtime" = "jvm" ]; then
run_args="${run_args} --lib $lib"
fi
@@ -639,11 +631,6 @@ if [ "$bisection_search" = "yes" -a "$prebuild_mode" = "yes" ]; then
usage="yes"
fi
-if [ "$bisection_search" = "yes" -a "$have_dex2oat" = "no" ]; then
- err_echo "--bisection-search and --no-dex2oat are mutually exclusive"
- usage="yes"
-fi
-
if [ "$bisection_search" = "yes" -a "$have_patchoat" = "no" ]; then
err_echo "--bisection-search and --no-patchoat are mutually exclusive"
usage="yes"
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index d5a4d454a1..84490bf0e4 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -184,7 +184,7 @@ target_config = {
'run-test' : ['--relocate-npatchoat']
},
'art-no-dex2oat' : {
- 'run-test' : ['--no-dex2oat']
+ # Deprecated configuration.
},
'art-heap-poisoning' : {
'run-test' : ['--interpreter',
@@ -265,13 +265,11 @@ target_config = {
'ART_USE_READ_BARRIER' : 'false'
}
},
- # TODO: Remove this configuration, when the ART Buildbot is no
- # longer using it for 'host-x86_64-valgrind'.
- 'art-gtest-valgrind64': {
- # Disabled: Valgrind is no longer supported.
- # 'make' : 'valgrind-test-art-host64',
- 'env': {
- 'ART_USE_READ_BARRIER' : 'false'
+ 'art-generational-cc': {
+ 'make' : 'test-art-host-gtest',
+ 'run-test' : [],
+ 'env' : {
+ 'ART_USE_GENERATIONAL_CC' : 'true'
}
},
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index e8d4290d28..10c8619307 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -145,7 +145,7 @@ def gather_test_info():
VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image', 'multipicimage'}
VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
- VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'no-dex2oat', 'prebuild'}
+ VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
@@ -414,8 +414,6 @@ def run_tests(tests):
options_test += ' --prebuild'
elif prebuild == 'no-prebuild':
options_test += ' --no-prebuild'
- elif prebuild == 'no-dex2oat':
- options_test += ' --no-prebuild --no-dex2oat'
if cdex_level:
# Add option and remove the cdex- prefix.
diff --git a/test/ti-agent/ti_macros.h b/test/ti-agent/ti_macros.h
index d91338324f..a871270dcf 100644
--- a/test/ti-agent/ti_macros.h
+++ b/test/ti-agent/ti_macros.h
@@ -19,8 +19,6 @@
#include "android-base/macros.h"
-#define FINAL final
-#define OVERRIDE override
#define UNREACHABLE __builtin_unreachable
#endif // ART_TEST_TI_AGENT_TI_MACROS_H_
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 8c748172bb..3b5230fd02 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -165,6 +165,7 @@ LOCAL_STATIC_JAVA_LIBRARIES := ahat junit-host
LOCAL_IS_HOST_MODULE := true
LOCAL_MODULE_TAGS := tests
LOCAL_MODULE := ahat-tests
+LOCAL_TEST_CONFIG := ahat-tests.xml
LOCAL_COMPATIBILITY_SUITE := general-tests
include $(BUILD_HOST_JAVA_LIBRARY)
AHAT_TEST_JAR := $(LOCAL_BUILT_MODULE)
diff --git a/tools/ahat/AndroidTest.xml b/tools/ahat/ahat-tests.xml
index b07905a9a7..b07905a9a7 100644
--- a/tools/ahat/AndroidTest.xml
+++ b/tools/ahat/ahat-tests.xml
diff --git a/tools/art b/tools/art
index aebf5a6778..9c032c0cb4 100644
--- a/tools/art
+++ b/tools/art
@@ -16,28 +16,9 @@
# shell dialect that should work on the host (e.g. bash), and
# Android (e.g. mksh).
-# Globals
-ART_BINARY=dalvikvm
-DELETE_ANDROID_DATA="no"
-LAUNCH_WRAPPER=
-LIBART=libart.so
-JIT_PROFILE="no"
-ALLOW_DEFAULT_JDWP="no"
-VERBOSE="no"
-CLEAN_OAT_FILES="yes"
-EXTRA_OPTIONS=()
-
-# Follow all sym links to get the program name.
-if [ z"$BASH_SOURCE" != z ]; then
- PROG_NAME="$BASH_SOURCE"
-else
- PROG_NAME="$0"
-fi
-while [ -h "$PROG_NAME" ]; do
- # On Mac OS, readlink -f doesn't work.
- PROG_NAME="$(readlink "$PROG_NAME")"
-done
-
+######################################
+# Functions
+######################################
function find_libdir() {
# Get the actual file, $1 is the ART_BINARY_PATH and may be a symbolic link.
# Use realpath instead of readlink because Android does not have a readlink.
@@ -48,29 +29,6 @@ function find_libdir() {
fi
}
-function replace_compiler_filter_with_quicken() {
- ARGS_WITH_QUICKEN=("$@")
-
- found="false"
- ((index=0))
- while ((index <= $#)); do
- what="${ARGS_WITH_QUICKEN[$index]}"
-
- case "$what" in
- --compiler-filter=*)
- ARGS_WITH_QUICKEN[$index]="--compiler-filter=quicken"
- found="true"
- ;;
- esac
-
- ((index++))
- shift
- done
- if [ "$found" != "true" ]; then
- ARGS_WITH_QUICKEN=(-Xcompiler-option --compiler-filter=quicken "${ARGS_WITH_QUICKEN[@]}")
- fi
-}
-
function usage() {
cat 1>&2 <<EOF
Usage: art [OPTIONS] [--] [ART_OPTIONS] CLASS
@@ -224,19 +182,81 @@ function detect_boot_image_location() {
echo "$image_location"
}
-# If android logging is not explicitly set, only print warnings and errors.
-if [ -z "$ANDROID_LOG_TAGS" ]; then
- ANDROID_LOG_TAGS='*:w'
-fi
+function run_dex2oat() {
+ local class_loader_context=
+ for dex_file in "${DEX2OAT_CLASSPATH[@]}"
+ do
+ while [ -h "$dex_file" ]; do
+ # On Mac OS, readlink -f doesn't work.
+ dex_file="$(readlink "$dex_file")"
+ done
+ # Create oat file directory.
+ verbose_run mkdir -p $(dirname "$dex_file")/oat/$ISA
+ local oat_file=$(basename "$dex_file")
+ local oat_file=$(dirname "$dex_file")/oat/$ISA/${oat_file%.*}.odex
+ # When running dex2oat use the exact same context as when running dalvikvm.
+ # (see run_art function)
+ verbose_run ANDROID_DATA=$ANDROID_DATA \
+ ANDROID_ROOT=$ANDROID_ROOT \
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
+ PATH=$ANDROID_ROOT/bin:$PATH \
+ LD_USE_LOAD_BIAS=1 \
+ ANDROID_LOG_TAGS=$ANDROID_LOG_TAGS \
+ $DEX2OAT_BINARY_PATH \
+ --runtime-arg -Xnorelocate \
+ --boot-image=$DEX2OAT_BOOT_IMAGE \
+ --instruction-set=$ISA \
+ --class-loader-context="PCL[$class_loader_context]" \
+ "${DEX2OAT_FLAGS[@]}" \
+ --dex-file=$dex_file \
+ --oat-file=$oat_file
+ if [[ -n $class_loader_context ]]; then
+ class_loader_context+=":"
+ fi
+ class_loader_context+="$dex_file"
+ done
+}
+
+# Extract the dex2oat flags from the list of arguments.
+# -Xcompiler-options arguments are stored in DEX2OAT_FLAGS array
+# -cp argument is split by ':' and stored in DEX2OAT_CLASSPATH
+# -Ximage argument is stored in DEX2OAT_BOOT_IMAGE
+function extract_dex2oat_flags() {
+ while [ $# -gt 0 ]; do
+ case $1 in
+ -Xcompiler-option)
+ DEX2OAT_FLAGS+=("$2")
+ shift
+ ;;
+ -Ximage:*)
+ DEX2OAT_BOOT_IMAGE=$1
+ # Remove '-Ximage:' from the argument.
+ DEX2OAT_BOOT_IMAGE=${DEX2OAT_BOOT_IMAGE##-Ximage:}
+ ;;
+ -cp)
+ # Reset any previously parsed classpath, just like dalvikvm
+ # only supports one -cp argument.
+ DEX2OAT_CLASSPATH=()
+ # TODO: support -classpath and CLASSPATH
+ local oifs=$IFS
+ IFS=':'
+ for classpath_elem in $2
+ do
+ DEX2OAT_CLASSPATH+=("$classpath_elem")
+ done
+ shift
+ IFS=$oifs
+ ;;
+ esac
+ shift
+ done
+}
# Runs dalvikvm, returns its exit code.
# (Oat directories are cleaned up in between runs)
function run_art() {
- local image_location="$(detect_boot_image_location)"
local ret
- # First cleanup any left-over 'oat' files from the last time dalvikvm was run.
- cleanup_oat_directory_for_classpath "$@"
# Run dalvikvm.
verbose_run ANDROID_DATA="$ANDROID_DATA" \
ANDROID_ROOT="$ANDROID_ROOT" \
@@ -247,7 +267,7 @@ function run_art() {
$LAUNCH_WRAPPER $ART_BINARY_PATH $lib \
-XXlib:"$LIBART" \
-Xnorelocate \
- -Ximage:"$image_location" \
+ -Ximage:"$DEFAULT_IMAGE_LOCATION" \
"$@"
ret=$?
@@ -258,6 +278,23 @@ function run_art() {
return $ret
}
+######################################
+# Globals
+######################################
+ART_BINARY=dalvikvm
+DEX2OAT_BINARY=dex2oat
+DELETE_ANDROID_DATA="no"
+LAUNCH_WRAPPER=
+LIBART=libart.so
+JIT_PROFILE="no"
+ALLOW_DEFAULT_JDWP="no"
+VERBOSE="no"
+CLEAN_OAT_FILES="yes"
+EXTRA_OPTIONS=()
+DEX2OAT_FLAGS=()
+DEX2OAT_CLASSPATH=()
+
+# Parse arguments
while [[ "$1" = "-"* ]]; do
case "$1" in
--)
@@ -275,6 +312,7 @@ while [[ "$1" = "-"* ]]; do
;& # Fallthrough
--debug)
LIBART="libartd.so"
+ DEX2OAT_BINARY=dex2oatd
# Expect that debug mode wants all checks.
EXTRA_OPTIONS+=(-XX:SlowDebug=true)
;;
@@ -329,6 +367,17 @@ if [ $# -eq 0 ]; then
exit 1
fi
+# Follow all sym links to get the program name.
+if [[ -n "$BASH_SOURCE" ]]; then
+ PROG_NAME="$BASH_SOURCE"
+else
+ PROG_NAME="$0"
+fi
+while [ -h "$PROG_NAME" ]; do
+ # On Mac OS, readlink -f doesn't work.
+ PROG_NAME="$(readlink "$PROG_NAME")"
+done
+
PROG_DIR="$(cd "${PROG_NAME%/*}" ; pwd -P)"
ANDROID_ROOT=$PROG_DIR/..
ART_BINARY_PATH=$ANDROID_ROOT/bin/$ART_BINARY
@@ -341,8 +390,32 @@ EOF
exit 1
fi
+DEX2OAT_BINARY_PATH=$ANDROID_ROOT/bin/$DEX2OAT_BINARY
+
+if [ ! -x "$DEX2OAT_BINARY_PATH" ]; then
+ echo "Warning: Android Compiler not found: $DEX2OAT_BINARY_PATH"
+fi
+
+######################################
+# Main program
+######################################
+
+# If android logging is not explicitly set, only print warnings and errors.
+if [ -z "$ANDROID_LOG_TAGS" ]; then
+ ANDROID_LOG_TAGS='*:w'
+fi
+
LIBDIR="$(find_libdir $ART_BINARY_PATH)"
LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBDIR
+DEFAULT_IMAGE_LOCATION="$(detect_boot_image_location)"
+DEX2OAT_BOOT_IMAGE="$DEFAULT_IMAGE_LOCATION"
+ISA=$(LD_LIBRARY_PATH=$LD_LIBRARY_PATH $ART_BINARY_PATH -showversion | (read art version number isa && echo $isa))
+
+# Extract the dex2oat flags from the list of arguments.
+# -Xcompiler-options arguments are stored in DEX2OAT_FLAGS array
+# -cp argument is split by ':' and stored in DEX2OAT_CLASSPATH
+# -Ximage argument is stored in DEX2OAT_BOOTIMAGE
+extract_dex2oat_flags "$@"
# If ANDROID_DATA is the system ANDROID_DATA or is not set, use our own,
# and ensure we delete it at the end.
@@ -360,31 +433,34 @@ fi
if [ "$PERF" != "" ]; then
LAUNCH_WRAPPER="perf record -g --call-graph dwarf -F 10000 -o $ANDROID_DATA/perf.data -e cycles:u $LAUNCH_WRAPPER"
- EXTRA_OPTIONS+=(-Xcompiler-option --generate-debug-info)
+ DEX2OAT_FLAGS+=(--generate-debug-info)
fi
if [ "$ALLOW_DEFAULT_JDWP" = "no" ]; then
EXTRA_OPTIONS+=(-XjdwpProvider:none)
fi
+# First cleanup any left-over 'oat' files from the last time dalvikvm was run.
+cleanup_oat_directory_for_classpath "$@"
+
+# Protect additional arguments in quotes to preserve whitespaces (used by
+# run-jdwp-test.sh when running on device), '$' (may be used as part of
+# classpath) and other special characters when evaluated.
+EXTRA_OPTIONS+=("$@")
+
if [ "$JIT_PROFILE" = "yes" ]; then
# Create the profile. The runtime expects profiles to be created before
# execution.
PROFILE_PATH="$ANDROID_DATA/primary.prof"
touch "$PROFILE_PATH"
- # Replace the compiler filter with quicken so that we
- # can capture the profile.
- ARGS_WITH_QUICKEN=
- replace_compiler_filter_with_quicken "$@"
-
run_art -Xjitsaveprofilinginfo \
-Xps-min-methods-to-save:1 \
-Xps-min-classes-to-save:1 \
-Xps-min-notification-before-wake:10 \
-Xps-profile-path:$PROFILE_PATH \
-Xusejit:true \
- "${ARGS_WITH_QUICKEN[@]}" \
+ ${EXTRA_OPTIONS[@]} \
&> "$ANDROID_DATA/profile_gen.log"
EXIT_STATUS=$?
@@ -400,13 +476,20 @@ if [ "$JIT_PROFILE" = "yes" ]; then
rm -rf "$ANDROID_DATA/dalvik-cache"
# Append arguments so next invocation of run_art uses the profile.
- EXTRA_OPTIONS+=(-Xcompiler-option --profile-file="$PROFILE_PATH")
+ DEX2OAT_FLAGS+=(--profile-file="$PROFILE_PATH")
fi
-# Protect additional arguments in quotes to preserve whitespaces (used by
-# run-jdwp-test.sh when running on device), '$' (may be used as part of
-# classpath) and other special characters when evaluated.
-EXTRA_OPTIONS+=("$@")
+if [ -x "$DEX2OAT_BINARY_PATH" ]; then
+ # Run dex2oat before launching ART to generate the oat files for the classpath.
+ run_dex2oat
+fi
+
+# Do not continue if the dex2oat failed.
+EXIT_STATUS=$?
+if [ $EXIT_STATUS != 0 ]; then
+ echo "Failed dex2oat invocation" >&2
+ exit $EXIT_STATUS
+fi
run_art "${EXTRA_OPTIONS[@]}"
EXIT_STATUS=$?
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
index fc62410889..bb43e67bb6 100644
--- a/tools/art_verifier/art_verifier.cc
+++ b/tools/art_verifier/art_verifier.cc
@@ -92,8 +92,7 @@ struct MethodVerifierArgs : public CmdlineArgs {
protected:
using Base = CmdlineArgs;
- virtual ParseStatus ParseCustom(const StringPiece& option,
- std::string* error_msg) OVERRIDE {
+ ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) override {
{
ParseStatus base_parse = Base::ParseCustom(option, error_msg);
if (base_parse != kParseUnknownArgument) {
@@ -119,7 +118,7 @@ struct MethodVerifierArgs : public CmdlineArgs {
return kParseOk;
}
- virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+ ParseStatus ParseChecks(std::string* error_msg) override {
// Perform the parent checks.
ParseStatus parent_checks = Base::ParseChecks(error_msg);
if (parent_checks != kParseOk) {
@@ -166,16 +165,16 @@ struct MethodVerifierArgs : public CmdlineArgs {
};
struct MethodVerifierMain : public CmdlineMain<MethodVerifierArgs> {
- bool NeedsRuntime() OVERRIDE {
+ bool NeedsRuntime() override {
return true;
}
- bool ExecuteWithoutRuntime() OVERRIDE {
+ bool ExecuteWithoutRuntime() override {
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
- bool ExecuteWithRuntime(Runtime* runtime) OVERRIDE {
+ bool ExecuteWithRuntime(Runtime* runtime) override {
CHECK(args_ != nullptr);
const size_t dex_reps = args_->dex_file_verifier_
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
new file mode 100644
index 0000000000..eb54a332eb
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
@@ -0,0 +1,66 @@
+package com.android.class2greylist;
+
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+
+import java.util.Locale;
+
+/**
+ * Encapsulates context for a single annotation on a class member.
+ */
+public class AnnotationContext {
+
+ public final Status status;
+ public final FieldOrMethod member;
+ public final JavaClass definingClass;
+ public final String signatureFormatString;
+
+ public AnnotationContext(
+ Status status,
+ FieldOrMethod member,
+ JavaClass definingClass,
+ String signatureFormatString) {
+ this.status = status;
+ this.member = member;
+ this.definingClass = definingClass;
+ this.signatureFormatString = signatureFormatString;
+ }
+
+ /**
+ * @return the full descriptor of enclosing class.
+ */
+ public String getClassDescriptor() {
+ // JavaClass.getName() returns the Java-style name (with . not /), so we must fetch
+ // the original class name from the constant pool.
+ return definingClass.getConstantPool().getConstantString(
+ definingClass.getClassNameIndex(), Const.CONSTANT_Class);
+ }
+
+ /**
+ * @return the full descriptor of this member, in the format expected in
+ * the greylist.
+ */
+ public String getMemberDescriptor() {
+ return String.format(Locale.US, signatureFormatString,
+ getClassDescriptor(), member.getName(), member.getSignature());
+ }
+
+ /**
+ * Report an error in this context. The final error message will include
+ * the class and member names, and the source file name.
+ */
+ public void reportError(String message, Object... args) {
+ StringBuilder error = new StringBuilder();
+ error.append(definingClass.getSourceFileName())
+ .append(": ")
+ .append(definingClass.getClassName())
+ .append(".")
+ .append(member.getName())
+ .append(": ")
+ .append(String.format(Locale.US, message, args));
+
+ status.error(error.toString());
+ }
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java
new file mode 100644
index 0000000000..92d2ab6d79
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java
@@ -0,0 +1,11 @@
+package com.android.class2greylist;
+
+import org.apache.bcel.classfile.AnnotationEntry;
+
+/**
+ * Interface for an annotation handler, which handle individual annotations on
+ * class members.
+ */
+public interface AnnotationHandler {
+ void handleAnnotation(AnnotationEntry annotation, AnnotationContext context);
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index 5914b26331..b805b307a3 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -16,78 +16,40 @@
package com.android.class2greylist;
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.bcel.Const;
import org.apache.bcel.classfile.AnnotationEntry;
import org.apache.bcel.classfile.DescendingVisitor;
-import org.apache.bcel.classfile.ElementValuePair;
import org.apache.bcel.classfile.EmptyVisitor;
import org.apache.bcel.classfile.Field;
import org.apache.bcel.classfile.FieldOrMethod;
import org.apache.bcel.classfile.JavaClass;
import org.apache.bcel.classfile.Method;
-import java.util.Locale;
-import java.util.Set;
-import java.util.function.Predicate;
+import java.util.Map;
/**
- * Visits a JavaClass instance and pulls out all members annotated with a
- * specific annotation. The signatures of such members are passed to {@link
- * Status#greylistEntry(String)}. Any errors result in a call to {@link
- * Status#error(String)}.
- *
- * If the annotation has a property "expectedSignature" the generated signature
- * will be verified against the one specified there. If it differs, an error
- * will be generated.
+ * Visits a JavaClass instance and passes any annotated members to a {@link AnnotationHandler}
+ * according to the map provided.
*/
public class AnnotationVisitor extends EmptyVisitor {
- private static final String EXPECTED_SIGNATURE = "expectedSignature";
-
private final JavaClass mClass;
- private final String mAnnotationType;
- private final Predicate<Member> mMemberFilter;
private final Status mStatus;
private final DescendingVisitor mDescendingVisitor;
+ private final Map<String, AnnotationHandler> mAnnotationHandlers;
/**
- * Represents a member of a class file (a field or method).
+ * Creates a visitor for a class.
+ *
+ * @param clazz Class to visit
+ * @param status For reporting debug information
+ * @param handlers Map of {@link AnnotationHandler}. The keys should be annotation names, as
+ * class descriptors.
*/
- @VisibleForTesting
- public static class Member {
-
- /**
- * Signature of this member.
- */
- public final String signature;
- /**
- * Indicates if this is a synthetic bridge method.
- */
- public final boolean bridge;
-
- public Member(String signature, boolean bridge) {
- this.signature = signature;
- this.bridge = bridge;
- }
- }
-
- public AnnotationVisitor(
- JavaClass clazz, String annotation, Set<String> publicApis, Status status) {
- this(clazz,
- annotation,
- member -> !(member.bridge && publicApis.contains(member.signature)),
- status);
- }
-
- @VisibleForTesting
- public AnnotationVisitor(
- JavaClass clazz, String annotation, Predicate<Member> memberFilter, Status status) {
+ public AnnotationVisitor(JavaClass clazz, Status status,
+ Map<String, AnnotationHandler> handlers) {
mClass = clazz;
- mAnnotationType = annotation;
- mMemberFilter = memberFilter;
mStatus = status;
+ mAnnotationHandlers = handlers;
mDescendingVisitor = new DescendingVisitor(clazz, this);
}
@@ -96,13 +58,6 @@ public class AnnotationVisitor extends EmptyVisitor {
mDescendingVisitor.visit();
}
- private static String getClassDescriptor(JavaClass clazz) {
- // JavaClass.getName() returns the Java-style name (with . not /), so we must fetch
- // the original class name from the constant pool.
- return clazz.getConstantPool().getConstantString(
- clazz.getClassNameIndex(), Const.CONSTANT_Class);
- }
-
@Override
public void visitMethod(Method method) {
visitMember(method, "L%s;->%s%s");
@@ -114,51 +69,15 @@ public class AnnotationVisitor extends EmptyVisitor {
}
private void visitMember(FieldOrMethod member, String signatureFormatString) {
- JavaClass definingClass = (JavaClass) mDescendingVisitor.predecessor();
mStatus.debug("Visit member %s : %s", member.getName(), member.getSignature());
+ AnnotationContext context = new AnnotationContext(mStatus, member,
+ (JavaClass) mDescendingVisitor.predecessor(), signatureFormatString);
for (AnnotationEntry a : member.getAnnotationEntries()) {
- if (mAnnotationType.equals(a.getAnnotationType())) {
- mStatus.debug("Member has annotation %s", mAnnotationType);
- // For fields, the same access flag means volatile, so only check for methods.
- boolean bridge = (member instanceof Method)
- && (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
- if (bridge) {
- mStatus.debug("Member is a bridge", mAnnotationType);
- }
- String signature = String.format(Locale.US, signatureFormatString,
- getClassDescriptor(definingClass), member.getName(), member.getSignature());
- for (ElementValuePair property : a.getElementValuePairs()) {
- switch (property.getNameString()) {
- case EXPECTED_SIGNATURE:
- String expected = property.getValue().stringifyValue();
- // Don't enforce for bridge methods; they're generated so won't match.
- if (!bridge && !signature.equals(expected)) {
- error(definingClass, member,
- "Expected signature does not match generated:\n"
- + "Expected: %s\n"
- + "Generated: %s", expected, signature);
- }
- break;
- }
- }
- if (mMemberFilter.test(new Member(signature, bridge))) {
- mStatus.greylistEntry(signature);
- }
+ if (mAnnotationHandlers.containsKey(a.getAnnotationType())) {
+ mStatus.debug("Member has annotation %s for which we have a handler",
+ a.getAnnotationType());
+ mAnnotationHandlers.get(a.getAnnotationType()).handleAnnotation(a, context);
}
}
}
-
- private void error(JavaClass clazz, FieldOrMethod member, String message, Object... args) {
- StringBuilder error = new StringBuilder();
- error.append(clazz.getSourceFileName())
- .append(": ")
- .append(clazz.getClassName())
- .append(".")
- .append(member.getName())
- .append(": ")
- .append(String.format(Locale.US, message, args));
-
- mStatus.error(error.toString());
- }
-
}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index abc9421e65..92620760e7 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -16,6 +16,9 @@
package com.android.class2greylist;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
@@ -30,7 +33,11 @@ import org.apache.commons.cli.ParseException;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
+import java.util.Arrays;
import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
/**
@@ -39,7 +46,18 @@ import java.util.Set;
*/
public class Class2Greylist {
- private static final String ANNOTATION_TYPE = "Landroid/annotation/UnsupportedAppUsage;";
+ private static final String GREYLIST_ANNOTATION = "Landroid/annotation/UnsupportedAppUsage;";
+ private static final Set<String> WHITELIST_ANNOTATIONS = ImmutableSet.of();
+
+ private final Status mStatus;
+ private final String mPublicApiListFile;
+ private final String[] mPerSdkOutputFiles;
+ private final String mWhitelistFile;
+ private final String[] mJarFiles;
+ private final GreylistConsumer mOutput;
+ private final Set<Integer> mAllowedSdkVersions;
+ private final Set<String> mPublicApis;
+
public static void main(String[] args) {
Options options = new Options();
@@ -49,11 +67,31 @@ public class Class2Greylist {
.withDescription("Public API list file. Used to de-dupe bridge methods.")
.create("p"));
options.addOption(OptionBuilder
+ .withLongOpt("write-greylist")
+ .hasArgs()
+ .withDescription(
+ "Specify file to write greylist to. Can be specified multiple times. " +
+ "Format is either just a filename, or \"int:filename\". If an integer is " +
+ "given, members with a matching maxTargetSdk are written to the file; if " +
+ "no integer is given, members with no maxTargetSdk are written.")
+ .create("g"));
+ options.addOption(OptionBuilder
+ .withLongOpt("write-whitelist")
+ .hasArgs(1)
+ .withDescription("Specify file to write whitelist to.")
+ .create('w'));
+ options.addOption(OptionBuilder
.withLongOpt("debug")
.hasArgs(0)
.withDescription("Enable debug")
.create("d"));
options.addOption(OptionBuilder
+ .withLongOpt("dump-all-members")
+ .withDescription("Dump all members from jar files to stdout. Ignore annotations. " +
+ "Do not use in conjunction with any other arguments.")
+ .hasArgs(0)
+ .create('m'));
+ options.addOption(OptionBuilder
.withLongOpt("help")
.hasArgs(0)
.withDescription("Show this help")
@@ -72,7 +110,7 @@ public class Class2Greylist {
if (cmd.hasOption('h')) {
help(options);
}
- String publicApiFilename = cmd.getOptionValue('p', null);
+
String[] jarFiles = cmd.getArgs();
if (jarFiles.length == 0) {
@@ -82,37 +120,130 @@ public class Class2Greylist {
Status status = new Status(cmd.hasOption('d'));
- Set<String> publicApis;
- if (publicApiFilename != null) {
+ if (cmd.hasOption('m')) {
+ dumpAllMembers(status, jarFiles);
+ } else {
try {
- publicApis = Sets.newHashSet(
- Files.readLines(new File(publicApiFilename), Charset.forName("UTF-8")));
+ Class2Greylist c2gl = new Class2Greylist(
+ status,
+ cmd.getOptionValue('p', null),
+ cmd.getOptionValues('g'),
+ cmd.getOptionValue('w', null),
+ jarFiles);
+ c2gl.main();
} catch (IOException e) {
status.error(e);
- System.exit(1);
- return;
}
+ }
+
+ if (status.ok()) {
+ System.exit(0);
} else {
- publicApis = Collections.emptySet();
+ System.exit(1);
}
+ }
+
+ @VisibleForTesting
+ Class2Greylist(Status status, String publicApiListFile, String[] perSdkLevelOutputFiles,
+ String whitelistOutputFile, String[] jarFiles) throws IOException {
+ mStatus = status;
+ mPublicApiListFile = publicApiListFile;
+ mPerSdkOutputFiles = perSdkLevelOutputFiles;
+ mWhitelistFile = whitelistOutputFile;
+ mJarFiles = jarFiles;
+ if (mPerSdkOutputFiles != null) {
+ Map<Integer, String> outputFiles = readGreylistMap(mStatus, mPerSdkOutputFiles);
+ mOutput = new FileWritingGreylistConsumer(mStatus, outputFiles, mWhitelistFile);
+ mAllowedSdkVersions = outputFiles.keySet();
+ } else {
+ // TODO remove this once per-SDK greylist support integrated into the build.
+ // Right now, mPerSdkOutputFiles is always null as the build never passes the
+ // corresponding command lind flags. Once the build is updated, can remove this.
+ mOutput = new SystemOutGreylistConsumer();
+ mAllowedSdkVersions = new HashSet<>(Arrays.asList(null, 26, 28));
+ }
+
+ if (mPublicApiListFile != null) {
+ mPublicApis = Sets.newHashSet(
+ Files.readLines(new File(mPublicApiListFile), Charset.forName("UTF-8")));
+ } else {
+ mPublicApis = Collections.emptySet();
+ }
+ }
+
+ private Map<String, AnnotationHandler> createAnnotationHandlers() {
+ return ImmutableMap.<String, AnnotationHandler>builder()
+ .put(GreylistAnnotationHandler.ANNOTATION_NAME,
+ new GreylistAnnotationHandler(
+ mStatus, mOutput, mPublicApis, mAllowedSdkVersions))
+ .put(CovariantReturnTypeHandler.ANNOTATION_NAME,
+ new CovariantReturnTypeHandler(mOutput, mPublicApis))
+ .put(CovariantReturnTypeMultiHandler.ANNOTATION_NAME,
+ new CovariantReturnTypeMultiHandler(mOutput, mPublicApis))
+ .build();
+ }
+
+ private void main() throws IOException {
+ Map<String, AnnotationHandler> handlers = createAnnotationHandlers();
+ for (String jarFile : mJarFiles) {
+ mStatus.debug("Processing jar file %s", jarFile);
+ try {
+ JarReader reader = new JarReader(mStatus, jarFile);
+ reader.stream().forEach(clazz -> new AnnotationVisitor(clazz, mStatus, handlers)
+ .visit());
+ reader.close();
+ } catch (IOException e) {
+ mStatus.error(e);
+ }
+ }
+ mOutput.close();
+ }
+
+ @VisibleForTesting
+ static Map<Integer, String> readGreylistMap(Status status, String[] argValues) {
+ Map<Integer, String> map = new HashMap<>();
+ for (String sdkFile : argValues) {
+ Integer maxTargetSdk = null;
+ String filename;
+ int colonPos = sdkFile.indexOf(':');
+ if (colonPos != -1) {
+ try {
+ maxTargetSdk = Integer.valueOf(sdkFile.substring(0, colonPos));
+ } catch (NumberFormatException nfe) {
+ status.error("Not a valid integer: %s from argument value '%s'",
+ sdkFile.substring(0, colonPos), sdkFile);
+ }
+ filename = sdkFile.substring(colonPos + 1);
+ if (filename.length() == 0) {
+ status.error("Not a valid file name: %s from argument value '%s'",
+ filename, sdkFile);
+ }
+ } else {
+ maxTargetSdk = null;
+ filename = sdkFile;
+ }
+ if (map.containsKey(maxTargetSdk)) {
+ status.error("Multiple output files for maxTargetSdk %s", maxTargetSdk);
+ } else {
+ map.put(maxTargetSdk, filename);
+ }
+ }
+ return map;
+ }
+
+ private static void dumpAllMembers(Status status, String[] jarFiles) {
for (String jarFile : jarFiles) {
status.debug("Processing jar file %s", jarFile);
try {
JarReader reader = new JarReader(status, jarFile);
- reader.stream().forEach(clazz -> new AnnotationVisitor(clazz, ANNOTATION_TYPE,
- publicApis, status).visit());
+ reader.stream().forEach(clazz -> new MemberDumpingVisitor(clazz, status)
+ .visit());
reader.close();
} catch (IOException e) {
status.error(e);
}
}
- if (status.ok()) {
- System.exit(0);
- } else {
- System.exit(1);
- }
-
}
private static void help(Options options) {
diff --git a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
new file mode 100644
index 0000000000..afd15b4c59
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
@@ -0,0 +1,89 @@
+package com.android.class2greylist;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.ElementValuePair;
+import org.apache.bcel.classfile.Method;
+
+import java.util.Locale;
+import java.util.Set;
+
+/**
+ * Handles {@code CovariantReturnType} annotations, generating whitelist
+ * entries from them.
+ *
+ * <p>A whitelist entry is generated with the same descriptor as the original
+ * method, but with the return type replaced with than specified by the
+ * {@link #RETURN_TYPE} property.
+ *
+ * <p>Methods are also validated against the public API list, to assert that
+ * the annotated method is already a public API.
+ */
+public class CovariantReturnTypeHandler implements AnnotationHandler {
+
+ private static final String SHORT_NAME = "CovariantReturnType";
+ public static final String ANNOTATION_NAME = "Ldalvik/annotation/codegen/CovariantReturnType;";
+
+ private static final String RETURN_TYPE = "returnType";
+
+ private final GreylistConsumer mConsumer;
+ private final Set<String> mPublicApis;
+
+ public CovariantReturnTypeHandler(GreylistConsumer consumer, Set<String> publicApis) {
+ mConsumer = consumer;
+ mPublicApis = publicApis;
+ }
+
+ @Override
+ public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+ // Verify that the annotation has been applied to what we expect, and
+ // has the right form. Note, this should not strictly be necessary, as
+ // the annotation has a target of just 'method' and the property
+ // returnType does not have a default value, but checking makes the code
+ // less brittle to future changes.
+ if (!(context.member instanceof Method)) {
+ context.reportError("Cannot specify %s on a field", RETURN_TYPE);
+ return;
+ }
+ String returnType = findReturnType(annotation);
+ if (returnType == null) {
+ context.reportError("No %s set on @%s", RETURN_TYPE, SHORT_NAME);
+ return;
+ }
+ if (!mPublicApis.contains(context.getMemberDescriptor())) {
+ context.reportError("Found @%s on non-SDK method", SHORT_NAME);
+ return;
+ }
+
+ // Generate the signature of overload that we expect the annotation will
+ // cause the platform dexer to create.
+ String typeSignature = context.member.getSignature();
+ int closingBrace = typeSignature.indexOf(')');
+ Preconditions.checkState(closingBrace != -1,
+ "No ) found in method type signature %s", typeSignature);
+ typeSignature = new StringBuilder()
+ .append(typeSignature.substring(0, closingBrace + 1))
+ .append(returnType)
+ .toString();
+ String signature = String.format(Locale.US, context.signatureFormatString,
+ context.getClassDescriptor(), context.member.getName(), typeSignature);
+
+ if (mPublicApis.contains(signature)) {
+ context.reportError("Signature %s generated from @%s already exists as a public API",
+ signature, SHORT_NAME);
+ return;
+ }
+ mConsumer.whitelistEntry(signature);
+ }
+
+ private String findReturnType(AnnotationEntry a) {
+ for (ElementValuePair property : a.getElementValuePairs()) {
+ if (property.getNameString().equals(RETURN_TYPE)) {
+ return property.getValue().stringifyValue();
+ }
+ }
+ // not found
+ return null;
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java
new file mode 100644
index 0000000000..bd0bf79169
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java
@@ -0,0 +1,72 @@
+package com.android.class2greylist;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+import org.apache.bcel.classfile.AnnotationElementValue;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.ArrayElementValue;
+import org.apache.bcel.classfile.ElementValue;
+import org.apache.bcel.classfile.ElementValuePair;
+
+import java.util.Set;
+
+/**
+ * Handles {@code CovariantReturnType$CovariantReturnTypes} annotations, which
+ * are generated by the compiler when multiple {@code CovariantReturnType}
+ * annotations appear on a single method.
+ *
+ * <p>The enclosed annotations are passed to {@link CovariantReturnTypeHandler}.
+ */
+public class CovariantReturnTypeMultiHandler implements AnnotationHandler {
+
+ public static final String ANNOTATION_NAME =
+ "Ldalvik/annotation/codegen/CovariantReturnType$CovariantReturnTypes;";
+
+ private static final String VALUE = "value";
+
+ private final CovariantReturnTypeHandler mWrappedHandler;
+ private final String mInnerAnnotationName;
+
+ public CovariantReturnTypeMultiHandler(GreylistConsumer consumer, Set<String> publicApis) {
+ this(consumer, publicApis, CovariantReturnTypeHandler.ANNOTATION_NAME);
+ }
+
+ @VisibleForTesting
+ public CovariantReturnTypeMultiHandler(GreylistConsumer consumer, Set<String> publicApis,
+ String innerAnnotationName) {
+ mWrappedHandler = new CovariantReturnTypeHandler(consumer, publicApis);
+ mInnerAnnotationName = innerAnnotationName;
+ }
+
+ @Override
+ public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+ // Verify that the annotation has the form we expect
+ ElementValuePair value = findValue(annotation);
+ if (value == null) {
+ context.reportError("No value found on CovariantReturnType$CovariantReturnTypes");
+ return;
+ }
+ Preconditions.checkArgument(value.getValue() instanceof ArrayElementValue);
+ ArrayElementValue array = (ArrayElementValue) value.getValue();
+
+ // call wrapped handler on each enclosed annotation:
+ for (ElementValue v : array.getElementValuesArray()) {
+ Preconditions.checkArgument(v instanceof AnnotationElementValue);
+ AnnotationElementValue aev = (AnnotationElementValue) v;
+ Preconditions.checkArgument(
+ aev.getAnnotationEntry().getAnnotationType().equals(mInnerAnnotationName));
+ mWrappedHandler.handleAnnotation(aev.getAnnotationEntry(), context);
+ }
+ }
+
+ private ElementValuePair findValue(AnnotationEntry a) {
+ for (ElementValuePair property : a.getElementValuePairs()) {
+ if (property.getNameString().equals(VALUE)) {
+ return property;
+ }
+ }
+ // not found
+ return null;
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
new file mode 100644
index 0000000000..9f334677c3
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
@@ -0,0 +1,66 @@
+package com.android.class2greylist;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+import java.util.HashMap;
+import java.util.Map;
+
+public class FileWritingGreylistConsumer implements GreylistConsumer {
+
+ private final Status mStatus;
+ private final Map<Integer, PrintStream> mSdkToPrintStreamMap;
+ private final PrintStream mWhitelistStream;
+
+ private static PrintStream openFile(String filename) throws FileNotFoundException {
+ if (filename == null) {
+ return null;
+ }
+ return new PrintStream(new FileOutputStream(new File(filename)));
+ }
+
+ private static Map<Integer, PrintStream> openFiles(
+ Map<Integer, String> filenames) throws FileNotFoundException {
+ Map<Integer, PrintStream> streams = new HashMap<>();
+ for (Map.Entry<Integer, String> entry : filenames.entrySet()) {
+ streams.put(entry.getKey(), openFile(entry.getValue()));
+ }
+ return streams;
+ }
+
+ public FileWritingGreylistConsumer(Status status, Map<Integer, String> sdkToFilenameMap,
+ String whitelistFile) throws FileNotFoundException {
+ mStatus = status;
+ mSdkToPrintStreamMap = openFiles(sdkToFilenameMap);
+ mWhitelistStream = openFile(whitelistFile);
+ }
+
+ @Override
+ public void greylistEntry(String signature, Integer maxTargetSdk) {
+ PrintStream p = mSdkToPrintStreamMap.get(maxTargetSdk);
+ if (p == null) {
+ mStatus.error("No output file for signature %s with maxTargetSdk of %d", signature,
+ maxTargetSdk == null ? "<absent>" : maxTargetSdk.toString());
+ return;
+ }
+ p.println(signature);
+ }
+
+ @Override
+ public void whitelistEntry(String signature) {
+ if (mWhitelistStream != null) {
+ mWhitelistStream.println(signature);
+ }
+ }
+
+ @Override
+ public void close() {
+ for (PrintStream p : mSdkToPrintStreamMap.values()) {
+ p.close();
+ }
+ if (mWhitelistStream != null) {
+ mWhitelistStream.close();
+ }
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
new file mode 100644
index 0000000000..460f2c3c22
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
@@ -0,0 +1,146 @@
+package com.android.class2greylist;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.ElementValue;
+import org.apache.bcel.classfile.ElementValuePair;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.Method;
+import org.apache.bcel.classfile.SimpleElementValue;
+
+import java.util.Set;
+import java.util.function.Predicate;
+
+/**
+ * Processes {@code UnsupportedAppUsage} annotations to generate greylist
+ * entries.
+ *
+ * Any annotations with a {@link #EXPECTED_SIGNATURE} property will have their
+ * generated signature verified against this, and an error will be reported if
+ * it does not match. Exclusions are made for bridge methods.
+ *
+ * Any {@link #MAX_TARGET_SDK} properties will be validated against the given
+ * set of valid values, then passed through to the greylist consumer.
+ */
+public class GreylistAnnotationHandler implements AnnotationHandler {
+
+ public static final String ANNOTATION_NAME = "Landroid/annotation/UnsupportedAppUsage;";
+
+ // properties of greylist annotations:
+ private static final String EXPECTED_SIGNATURE = "expectedSignature";
+ private static final String MAX_TARGET_SDK = "maxTargetSdk";
+
+ private final Status mStatus;
+ private final Predicate<GreylistMember> mGreylistFilter;
+ private final GreylistConsumer mGreylistConsumer;
+ private final Set<Integer> mValidMaxTargetSdkValues;
+
+ /**
+ * Represents a member of a class file (a field or method).
+ */
+ @VisibleForTesting
+ public static class GreylistMember {
+
+ /**
+ * Signature of this member.
+ */
+ public final String signature;
+ /**
+ * Indicates if this is a synthetic bridge method.
+ */
+ public final boolean bridge;
+ /**
+ * Max target SDK of property this member, if it is set, else null.
+ *
+ * Note: even though the annotation itself specified a default value,
+ * that default value is not encoded into instances of the annotation
+ * in class files. So when no value is specified in source, it will
+ * result in null appearing in here.
+ */
+ public final Integer maxTargetSdk;
+
+ public GreylistMember(String signature, boolean bridge, Integer maxTargetSdk) {
+ this.signature = signature;
+ this.bridge = bridge;
+ this.maxTargetSdk = maxTargetSdk;
+ }
+ }
+
+ public GreylistAnnotationHandler(
+ Status status,
+ GreylistConsumer greylistConsumer,
+ Set<String> publicApis,
+ Set<Integer> validMaxTargetSdkValues) {
+ this(status, greylistConsumer,
+ member -> !(member.bridge && publicApis.contains(member.signature)),
+ validMaxTargetSdkValues);
+ }
+
+ @VisibleForTesting
+ public GreylistAnnotationHandler(
+ Status status,
+ GreylistConsumer greylistConsumer,
+ Predicate<GreylistMember> greylistFilter,
+ Set<Integer> validMaxTargetSdkValues) {
+ mStatus = status;
+ mGreylistConsumer = greylistConsumer;
+ mGreylistFilter = greylistFilter;
+ mValidMaxTargetSdkValues = validMaxTargetSdkValues;
+ }
+
+ @Override
+ public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+ FieldOrMethod member = context.member;
+ boolean bridge = (member instanceof Method)
+ && (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
+ if (bridge) {
+ mStatus.debug("Member is a bridge");
+ }
+ String signature = context.getMemberDescriptor();
+ Integer maxTargetSdk = null;
+ for (ElementValuePair property : annotation.getElementValuePairs()) {
+ switch (property.getNameString()) {
+ case EXPECTED_SIGNATURE:
+ verifyExpectedSignature(context, property, signature, bridge);
+ break;
+ case MAX_TARGET_SDK:
+ maxTargetSdk = verifyAndGetMaxTargetSdk(context, property);
+ break;
+ }
+ }
+ if (mGreylistFilter.test(new GreylistMember(signature, bridge, maxTargetSdk))) {
+ mGreylistConsumer.greylistEntry(signature, maxTargetSdk);
+ }
+ }
+
+ private void verifyExpectedSignature(AnnotationContext context, ElementValuePair property,
+ String signature, boolean isBridge) {
+ String expected = property.getValue().stringifyValue();
+ // Don't enforce for bridge methods; they're generated so won't match.
+ if (!isBridge && !signature.equals(expected)) {
+ context.reportError("Expected signature does not match generated:\n"
+ + "Expected: %s\n"
+ + "Generated: %s", expected, signature);
+ }
+ }
+
+ private Integer verifyAndGetMaxTargetSdk(AnnotationContext context, ElementValuePair property) {
+ if (property.getValue().getElementValueType() != ElementValue.PRIMITIVE_INT) {
+ context.reportError("Expected property %s to be of type int; got %d",
+ property.getNameString(), property.getValue().getElementValueType());
+ }
+ int value = ((SimpleElementValue) property.getValue()).getValueInt();
+ if (!mValidMaxTargetSdkValues.contains(value)) {
+ context.reportError("Invalid value for %s: got %d, expected one of [%s]",
+ property.getNameString(),
+ value,
+ Joiner.on(",").join(mValidMaxTargetSdkValues));
+ return null;
+ }
+ return value;
+ }
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
new file mode 100644
index 0000000000..fd855e88ed
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
@@ -0,0 +1,20 @@
+package com.android.class2greylist;
+
+public interface GreylistConsumer {
+ /**
+ * Handle a new greylist entry.
+ *
+ * @param signature Signature of the member.
+ * @param maxTargetSdk maxTargetSdk value from the annotation, or null if none set.
+ */
+ void greylistEntry(String signature, Integer maxTargetSdk);
+
+ /**
+ * Handle a new whitelist entry.
+ *
+ * @param signature Signature of the member.
+ */
+ void whitelistEntry(String signature);
+
+ void close();
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
new file mode 100644
index 0000000000..6677a3f3ab
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
@@ -0,0 +1,47 @@
+package com.android.class2greylist;
+
+import org.apache.bcel.classfile.DescendingVisitor;
+import org.apache.bcel.classfile.EmptyVisitor;
+import org.apache.bcel.classfile.Field;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+import org.apache.bcel.classfile.Method;
+
+/**
+ * A class file visitor that simply prints to stdout the signature of every member within the class.
+ */
+public class MemberDumpingVisitor extends EmptyVisitor {
+
+ private final Status mStatus;
+ private final DescendingVisitor mDescendingVisitor;
+
+ /**
+ * Creates a visitor for a class.
+ *
+ * @param clazz Class to visit
+ */
+ public MemberDumpingVisitor(JavaClass clazz, Status status) {
+ mStatus = status;
+ mDescendingVisitor = new DescendingVisitor(clazz, this);
+ }
+
+ public void visit() {
+ mDescendingVisitor.visit();
+ }
+
+ @Override
+ public void visitMethod(Method method) {
+ visitMember(method, "L%s;->%s%s");
+ }
+
+ @Override
+ public void visitField(Field field) {
+ visitMember(field, "L%s;->%s:%s");
+ }
+
+ private void visitMember(FieldOrMethod member, String signatureFormatString) {
+ AnnotationContext context = new AnnotationContext(mStatus, member,
+ (JavaClass) mDescendingVisitor.predecessor(), signatureFormatString);
+ System.out.println(context.getMemberDescriptor());
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Status.java b/tools/class2greylist/src/com/android/class2greylist/Status.java
index d7078986d9..b5ee9f138f 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Status.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Status.java
@@ -42,16 +42,12 @@ public class Status {
mHasErrors = true;
}
- public void error(String message) {
+ public void error(String message, Object... args) {
System.err.print(ERROR);
- System.err.println(message);
+ System.err.println(String.format(Locale.US, message, args));
mHasErrors = true;
}
- public void greylistEntry(String signature) {
- System.out.println(signature);
- }
-
public boolean ok() {
return !mHasErrors;
}
diff --git a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
new file mode 100644
index 0000000000..ad5ad705b4
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
@@ -0,0 +1,18 @@
+package com.android.class2greylist;
+
+public class SystemOutGreylistConsumer implements GreylistConsumer {
+ @Override
+ public void greylistEntry(String signature, Integer maxTargetSdk) {
+ System.out.println(signature);
+ }
+
+ @Override
+ public void whitelistEntry(String signature) {
+ // Ignore. This class is only used when no grey/white lists are
+ // specified, so we have nowhere to write whitelist entries.
+ }
+
+ @Override
+ public void close() {
+ }
+}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java
new file mode 100644
index 0000000000..8f4a76f765
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.withSettings;
+
+import com.android.javac.Javac;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+import java.io.IOException;
+
+public class AnnotationHandlerTestBase {
+
+ @Rule
+ public TestName mTestName = new TestName();
+
+ protected Javac mJavac;
+ protected GreylistConsumer mConsumer;
+ protected Status mStatus;
+
+ @Before
+ public void baseSetup() throws IOException {
+ System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
+ mTestName.getMethodName()));
+ mConsumer = mock(GreylistConsumer.class);
+ mStatus = mock(Status.class, withSettings().verboseLogging());
+ mJavac = new Javac();
+ }
+
+ protected void assertNoErrors() {
+ verify(mStatus, never()).error(any(Throwable.class));
+ verify(mStatus, never()).error(any(), any());
+ }
+}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java b/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
new file mode 100644
index 0000000000..cb75dd30ed
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.mockito.Mock;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class Class2GreylistTest {
+
+ @Mock
+ Status mStatus;
+ @Rule
+ public TestName mTestName = new TestName();
+
+ @Before
+ public void setup() throws IOException {
+ System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
+ mTestName.getMethodName()));
+ initMocks(this);
+ }
+
+ @Test
+ public void testReadGreylistMap() throws IOException {
+ Map<Integer, String> map = Class2Greylist.readGreylistMap(mStatus,
+ new String[]{"noApi", "1:apiOne", "3:apiThree"});
+ verifyZeroInteractions(mStatus);
+ assertThat(map).containsExactly(null, "noApi", 1, "apiOne", 3, "apiThree");
+ }
+
+ @Test
+ public void testReadGreylistMapDuplicate() throws IOException {
+ Class2Greylist.readGreylistMap(mStatus,
+ new String[]{"noApi", "1:apiOne", "1:anotherOne"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testReadGreylistMapDuplicateNoApi() {
+ Class2Greylist.readGreylistMap(mStatus,
+ new String[]{"noApi", "anotherNoApi", "1:apiOne"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testReadGreylistMapInvalidInt() throws IOException {
+ Class2Greylist.readGreylistMap(mStatus, new String[]{"noApi", "a:apiOne"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testReadGreylistMapNoFilename() throws IOException {
+ Class2Greylist.readGreylistMap(mStatus, new String[]{"noApi", "1:"});
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+}
+
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
new file mode 100644
index 0000000000..10fae9b11c
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import static java.util.Collections.emptySet;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class CovariantReturnTypeHandlerTest extends AnnotationHandlerTestBase {
+
+ private static final String ANNOTATION = "Lannotation/Annotation;";
+
+ @Before
+ public void setup() throws IOException {
+ // To keep the test simpler and more concise, we don't use the real
+ // @CovariantReturnType annotation here, but use our own @Annotation.
+ // It doesn't have to match the real annotation, just have the same
+ // property (returnType).
+ mJavac.addSource("annotation.Annotation", Joiner.on('\n').join(
+ "package annotation;",
+ "import static java.lang.annotation.RetentionPolicy.CLASS;",
+ "import java.lang.annotation.Retention;",
+ "@Retention(CLASS)",
+ "public @interface Annotation {",
+ " Class<?> returnType();",
+ "}"));
+ }
+
+ @Test
+ public void testReturnTypeWhitelisted() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Annotation;",
+ "public class Class {",
+ " @Annotation(returnType=Integer.class)",
+ " public String method() {return null;}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION,
+ new CovariantReturnTypeHandler(
+ mConsumer,
+ ImmutableSet.of("La/b/Class;->method()Ljava/lang/String;")));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+ assertNoErrors();
+ verify(mConsumer, times(1)).whitelistEntry(eq("La/b/Class;->method()Ljava/lang/Integer;"));
+ }
+
+ @Test
+ public void testAnnotatedMemberNotPublicApi() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Annotation;",
+ "public class Class {",
+ " @Annotation(returnType=Integer.class)",
+ " public String method() {return null;}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION,
+ new CovariantReturnTypeHandler(
+ mConsumer,
+ emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testReturnTypeAlreadyWhitelisted() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Annotation;",
+ "public class Class {",
+ " @Annotation(returnType=Integer.class)",
+ " public String method() {return null;}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION,
+ new CovariantReturnTypeHandler(
+ mConsumer,
+ ImmutableSet.of(
+ "La/b/Class;->method()Ljava/lang/String;",
+ "La/b/Class;->method()Ljava/lang/Integer;"
+ )));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+
+ @Test
+ public void testAnnotationOnField() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Annotation;",
+ "public class Class {",
+ " @Annotation(returnType=Integer.class)",
+ " public String field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION,
+ new CovariantReturnTypeHandler(
+ mConsumer,
+ emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
new file mode 100644
index 0000000000..7f4ce62002
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import static java.util.Collections.emptySet;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class CovariantReturnTypeMultiHandlerTest extends AnnotationHandlerTestBase {
+
+
+ @Before
+ public void setup() throws IOException {
+ // To keep the test simpler and more concise, we don't use the real
+ // @CovariantReturnType annotation here, but use our own @Annotation
+ // and @Annotation.Multi that have the same semantics. It doesn't have
+ // to match the real annotation, just have the same properties
+ // (returnType and value).
+ mJavac.addSource("annotation.Annotation", Joiner.on('\n').join(
+ "package annotation;",
+ "import static java.lang.annotation.RetentionPolicy.CLASS;",
+ "import java.lang.annotation.Repeatable;",
+ "import java.lang.annotation.Retention;",
+ "@Repeatable(Annotation.Multi.class)",
+ "@Retention(CLASS)",
+ "public @interface Annotation {",
+ " Class<?> returnType();",
+ " @Retention(CLASS)",
+ " @interface Multi {",
+ " Annotation[] value();",
+ " }",
+ "}"));
+ }
+
+ @Test
+ public void testReturnTypeMulti() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Annotation;",
+ "public class Class {",
+ " @Annotation(returnType=Integer.class)",
+ " @Annotation(returnType=Long.class)",
+ " public String method() {return null;}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of("Lannotation/Annotation$Multi;",
+ new CovariantReturnTypeMultiHandler(
+ mConsumer,
+ ImmutableSet.of("La/b/Class;->method()Ljava/lang/String;"),
+ "Lannotation/Annotation;"));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> whitelist = ArgumentCaptor.forClass(String.class);
+ verify(mConsumer, times(2)).whitelistEntry(whitelist.capture());
+ assertThat(whitelist.getAllValues()).containsExactly(
+ "La/b/Class;->method()Ljava/lang/Integer;",
+ "La/b/Class;->method()Ljava/lang/Long;"
+ );
+ }
+
+ @Test
+ public void testReturnTypeMultiNotPublicApi() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Annotation;",
+ "public class Class {",
+ " @Annotation(returnType=Integer.class)",
+ " @Annotation(returnType=Long.class)",
+ " public String method() {return null;}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of("Lannotation/Annotation$Multi;",
+ new CovariantReturnTypeMultiHandler(
+ mConsumer,
+ emptySet(),
+ "Lannotation/Annotation;"));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+ verify(mStatus, atLeastOnce()).error(any(), any());
+ }
+}
diff --git a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java b/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
index 20c959db12..1a4bfb8283 100644
--- a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
@@ -14,62 +14,53 @@
* limitations under the License.
*/
-package com.android.javac;
+package com.android.class2greylist;
import static com.google.common.truth.Truth.assertThat;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.withSettings;
-import com.android.class2greylist.AnnotationVisitor;
-import com.android.class2greylist.Status;
+import static java.util.Collections.emptySet;
import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import org.junit.Before;
-import org.junit.Rule;
import org.junit.Test;
-import org.junit.rules.TestName;
import org.mockito.ArgumentCaptor;
import java.io.IOException;
+import java.util.Map;
import java.util.Set;
+import java.util.function.Predicate;
-public class AnnotationVisitorTest {
+public class GreylistAnnotationHandlerTest extends AnnotationHandlerTestBase {
private static final String ANNOTATION = "Lannotation/Anno;";
- @Rule
- public TestName mTestName = new TestName();
-
- private Javac mJavac;
- private Status mStatus;
-
@Before
public void setup() throws IOException {
- System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
- mTestName.getMethodName()));
- mStatus = mock(Status.class, withSettings().verboseLogging());
- mJavac = new Javac();
mJavac.addSource("annotation.Anno", Joiner.on('\n').join(
"package annotation;",
- "import static java.lang.annotation.RetentionPolicy.CLASS;",
- "import java.lang.annotation.Retention;",
- "import java.lang.annotation.Target;",
+ "import static java.lang.annotation.RetentionPolicy.CLASS;",
+ "import java.lang.annotation.Retention;",
"@Retention(CLASS)",
"public @interface Anno {",
" String expectedSignature() default \"\";",
+ " int maxTargetSdk() default Integer.MAX_VALUE;",
"}"));
}
- private void assertNoErrors() {
- verify(mStatus, never()).error(any(Throwable.class));
- verify(mStatus, never()).error(any(String.class));
+ private GreylistAnnotationHandler createGreylistHandler(
+ Predicate<GreylistAnnotationHandler.GreylistMember> greylistFilter,
+ Set<Integer> validMaxTargetSdkValues) {
+ return new GreylistAnnotationHandler(
+ mStatus, mConsumer, greylistFilter, validMaxTargetSdkValues);
}
@Test
@@ -83,12 +74,13 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -103,12 +95,13 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;-><init>()V");
}
@@ -123,12 +116,13 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->i:I");
}
@@ -143,12 +137,13 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -163,10 +158,11 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
- verify(mStatus, times(1)).error(any(String.class));
+ verify(mStatus, times(1)).error(any(), any());
}
@Test
@@ -182,12 +178,13 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), ANNOTATION, x -> true,
- mStatus).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class$Inner;->method()V");
}
@@ -200,11 +197,12 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
assertNoErrors();
- verify(mStatus, never()).greylistEntry(any(String.class));
+ verify(mConsumer, never()).greylistEntry(any(String.class), any());
}
@Test
@@ -218,12 +216,13 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()))
+ ).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
@@ -245,15 +244,15 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus)
- .visit();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -277,15 +276,15 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus)
- .visit();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
- .visit();
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -313,17 +312,17 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(
- mJavac.getCompiledClass("a.b.Interface"), ANNOTATION, x -> true, mStatus).visit();
- new AnnotationVisitor(
- mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus).visit();
- new AnnotationVisitor(
- mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus).visit();
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Interface"), mStatus, handlerMap)
+ .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Base;->method(Ljava/lang/Object;)V");
@@ -350,15 +349,20 @@ public class AnnotationVisitorTest {
Set<String> publicApis = Sets.newHashSet(
"La/b/Base;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/Object;)V");
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, publicApis,
- mStatus).visit();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, publicApis,
- mStatus).visit();
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION,
+ new GreylistAnnotationHandler(
+ mStatus,
+ mConsumer,
+ publicApis,
+ emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// The bridge method generated for the above, is a public API so should be excluded
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
@@ -373,12 +377,14 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION,
- member -> !member.bridge, // exclude bridge methods
- mStatus).visit();
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(
+ member -> !member.bridge, // exclude bridge methods
+ emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->field:I");
}
@@ -393,8 +399,73 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION,
- x -> true, mStatus).visit();
- verify(mStatus, times(1)).error(any(String.class));
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, emptySet()));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+ verify(mStatus, times(1)).error(any(), any());
+ }
+
+ @Test
+ public void testMethodMaxTargetSdk() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(maxTargetSdk=1)",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(
+ x -> true,
+ ImmutableSet.of(1)));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+ assertNoErrors();
+ ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ assertThat(maxTargetSdk.getValue()).isEqualTo(1);
+ }
+
+ @Test
+ public void testMethodNoMaxTargetSdk() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(
+ x -> true,
+ ImmutableSet.of(1)));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+ assertNoErrors();
+ ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ assertThat(maxTargetSdk.getValue()).isEqualTo(null);
+ }
+
+ @Test
+ public void testMethodMaxTargetSdkOutOfRange() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(maxTargetSdk=2)",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Map<String, AnnotationHandler> handlerMap =
+ ImmutableMap.of(ANNOTATION, createGreylistHandler(
+ x -> true,
+ ImmutableSet.of(1)));
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+ verify(mStatus, times(1)).error(any(), any());
}
+
}
diff --git a/tools/dexanalyze/Android.bp b/tools/dexanalyze/Android.bp
index 9515ca5c50..a85bf562af 100644
--- a/tools/dexanalyze/Android.bp
+++ b/tools/dexanalyze/Android.bp
@@ -22,6 +22,7 @@ cc_defaults {
"dexanalyze.cc",
"dexanalyze_bytecode.cc",
"dexanalyze_experiments.cc",
+ "dexanalyze_strings.cc",
],
target: {
android: {
diff --git a/tools/dexanalyze/dexanalyze.cc b/tools/dexanalyze/dexanalyze.cc
index 841719b821..040f41ba5d 100644
--- a/tools/dexanalyze/dexanalyze.cc
+++ b/tools/dexanalyze/dexanalyze.cc
@@ -23,6 +23,7 @@
#include "dexanalyze_bytecode.h"
#include "dexanalyze_experiments.h"
+#include "dexanalyze_strings.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
index 1c5a5d548b..659a940e8b 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.cc
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -50,6 +50,53 @@ static inline SafeMap<T, U> SortByOrder(const SafeMap<T, U>& usage, Order order)
return ret;
}
+template <typename A, typename B>
+std::ostream& operator <<(std::ostream& os, const std::pair<A, B>& pair) {
+ return os << "{" << pair.first << ", " << pair.second << "}";
+}
+
+template <typename T, typename... Args, template <typename...> class ArrayType>
+SafeMap<size_t, T> MakeUsageMap(const ArrayType<T, Args...>& array) {
+ SafeMap<size_t, T> ret;
+ for (size_t i = 0; i < array.size(); ++i) {
+ if (array[i] > 0) {
+ ret.Put(i, array[i]);
+ }
+ }
+ return ret;
+}
+
+template <typename T, typename U, typename... Args, template <typename...> class Map>
+void PrintMostUsed(std::ostream& os,
+ const Map<T, U, Args...>& usage,
+ size_t max_count,
+ std::function<void(std::ostream& os, T)> printer =
+ [](std::ostream& os, T v) {
+ os << v;
+ }) {
+ std::vector<std::pair<U, T>> sorted;
+ uint64_t total = 0u;
+ for (const auto& pair : usage) {
+ sorted.emplace_back(pair.second, pair.first);
+ total += pair.second;
+ }
+ std::sort(sorted.rbegin(), sorted.rend());
+ uint64_t other = 0u;
+ for (auto&& pair : sorted) {
+ if (max_count > 0) {
+ os << Percent(pair.first, total) << " : ";
+ printer(os, pair.second);
+ os << "\n";
+ --max_count;
+ } else {
+ other += pair.first;
+ }
+ }
+ if (other != 0u) {
+ os << "other: " << Percent(other, total) << "\n";
+ }
+}
+
static inline std::ostream& operator<<(std::ostream& os, const std::vector<uint8_t>& bytes) {
os << std::hex;
for (const uint8_t& c : bytes) {
@@ -125,32 +172,42 @@ void NewRegisterInstructions::Dump(std::ostream& os, uint64_t total_size) const
std::vector<std::pair<size_t, std::vector<uint8_t>>> pairs;
for (auto&& pair : instruction_freq_) {
if (pair.second > 0 && !pair.first.empty()) {
- // Savings exclude one byte per occurrence and one occurence from having the macro
+ // Savings exclude one byte per occurrence and one occurrence from having the macro
// dictionary.
pairs.emplace_back((pair.second - 1) * (pair.first.size() - 1), pair.first);
}
}
std::sort(pairs.rbegin(), pairs.rend());
static constexpr size_t kMaxMacros = 128;
+ static constexpr size_t kMaxPrintedMacros = 32;
uint64_t top_instructions_savings = 0u;
for (size_t i = 0; i < kMaxMacros && i < pairs.size(); ++i) {
top_instructions_savings += pairs[i].first;
}
if (verbose_level_ >= VerboseLevel::kNormal) {
+ os << "Move result register distribution" << "\n";
+ PrintMostUsed(os, MakeUsageMap(move_result_reg_), 16);
+ os << "First arg register usage\n";
+ std::function<void(std::ostream& os, size_t)> printer = [&](std::ostream& os, size_t idx) {
+ os << Instruction::Name(static_cast<Instruction::Code>(idx));
+ };
+ PrintMostUsed(os, MakeUsageMap(first_arg_reg_count_), 16, printer);
+ os << "Most used field linkage pairs\n";
+ PrintMostUsed(os, field_linkage_counts_, 32);
+ os << "Current extended " << extended_field_ << "\n";
+ os << "Most used method linkage pairs\n";
+ PrintMostUsed(os, method_linkage_counts_, 32);
+ os << "Current extended " << extended_method_ << "\n";
os << "Top " << kMaxMacros << " instruction bytecode sizes and hex dump" << "\n";
for (size_t i = 0; i < kMaxMacros && i < pairs.size(); ++i) {
auto bytes = pairs[i].second;
// Remove opcode bytes.
bytes.erase(bytes.begin());
- os << Percent(pairs[i].first, total_size) << " "
- << Instruction::Name(static_cast<Instruction::Code>(pairs[i].second[0]))
- << "(" << bytes << ")\n";
- }
- os << "Move result register distribution" << "\n";
- const size_t move_result_total =
- std::accumulate(move_result_reg_.begin(), move_result_reg_.end(), 0u);
- for (size_t i = 0; i < move_result_reg_.size(); ++i) {
- os << i << ": " << Percent(move_result_reg_[i], move_result_total) << "\n";
+ if (i < kMaxPrintedMacros) {
+ os << Percent(pairs[i].first, total_size) << " "
+ << Instruction::Name(static_cast<Instruction::Code>(pairs[i].second[0]))
+ << "(" << bytes << ")\n";
+ }
}
}
os << "Top instructions 1b macro savings "
@@ -164,16 +221,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
std::map<size_t, TypeLinkage>& types) {
TypeLinkage& current_type = types[current_class_type.index_];
bool skip_next = false;
- size_t last_start = 0u;
- for (auto inst = code_item.begin(); ; ++inst) {
- if (!count_types && last_start != buffer_.size()) {
- // Register the instruction blob.
- ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + last_start, buffer_.end())];
- last_start = buffer_.size();
- }
- if (inst == code_item.end()) {
- break;
- }
+ for (auto inst = code_item.begin(); inst != code_item.end(); ++inst) {
if (verbose_level_ >= VerboseLevel::kEverything) {
std::cout << std::endl;
std::cout << inst->DumpString(nullptr);
@@ -188,6 +236,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
bool is_iget = false;
const Instruction::Code opcode = inst->Opcode();
Instruction::Code new_opcode = opcode;
+ ++opcode_count_[opcode];
switch (opcode) {
case Instruction::IGET:
case Instruction::IGET_WIDE:
@@ -294,7 +343,8 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
} else {
uint32_t type_idx = current_type.types_.Get(holder_type.index_);
uint32_t field_idx = types[holder_type.index_].fields_.Get(dex_field_idx);
- ExtendPrefix(&type_idx, &field_idx);
+ ++field_linkage_counts_[std::make_pair(type_idx, field_idx)];
+ extended_field_ += ExtendPrefix(&type_idx, &field_idx) ? 1u : 0u;
if (InstNibbles(new_opcode, {out_reg >> 4, out_reg & 0xF, type_idx, field_idx})) {
continue;
}
@@ -319,6 +369,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
} else {
uint32_t args[6] = {};
uint32_t arg_count = inst->GetVarArgs(args);
+ const uint32_t first_arg_reg = code_item.RegistersSize() - code_item.InsSize();
bool next_move_result = false;
uint32_t dest_reg = 0;
@@ -334,31 +385,38 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
}
}
- bool result = false;
uint32_t type_idx = current_type.types_.Get(receiver_type.index_);
uint32_t local_idx = types[receiver_type.index_].methods_.Get(method_idx);
- ExtendPrefix(&type_idx, &local_idx);
- ExtendPrefix(&dest_reg, &local_idx);
- if (arg_count == 0) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx});
- } else if (arg_count == 1) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0]});
- } else if (arg_count == 2) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1]});
- } else if (arg_count == 3) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1], args[2]});
- } else if (arg_count == 4) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1], args[2], args[3]});
- } else if (arg_count == 5) {
- result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
- args[1], args[2], args[3], args[4]});
- }
+ ++method_linkage_counts_[std::make_pair(type_idx, local_idx)];
+
+ // If true, we always put the return value in r0.
+ static constexpr bool kMoveToDestReg = true;
- if (result) {
+ std::vector<uint32_t> new_args;
+ if (kMoveToDestReg && arg_count % 2 == 1) {
+ // Use the extra nibble to sneak in part of the type index.
+ new_args.push_back(local_idx >> 4);
+ local_idx &= ~0xF0;
+ }
+ extended_method_ += ExtendPrefix(&type_idx, &local_idx) ? 1u : 0u;
+ new_args.push_back(type_idx);
+ new_args.push_back(local_idx);
+ if (!kMoveToDestReg) {
+ ExtendPrefix(&dest_reg, &local_idx);
+ new_args.push_back(dest_reg);
+ }
+ for (size_t i = 0; i < arg_count; ++i) {
+ if (args[i] == first_arg_reg) {
+ ++first_arg_reg_count_[opcode];
+ break;
+ }
+ }
+ new_args.insert(new_args.end(), args, args + arg_count);
+ if (InstNibbles(opcode, new_args)) {
skip_next = next_move_result;
+ if (kMoveToDestReg && dest_reg != 0u) {
+ CHECK(InstNibbles(Instruction::MOVE, {dest_reg >> 4, dest_reg & 0xF}));
+ }
continue;
}
}
@@ -466,22 +524,25 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
void NewRegisterInstructions::Add(Instruction::Code opcode, const Instruction& inst) {
const uint8_t* start = reinterpret_cast<const uint8_t*>(&inst);
+ const size_t buffer_start = buffer_.size();
buffer_.push_back(opcode);
buffer_.insert(buffer_.end(), start + 1, start + 2 * inst.SizeInCodeUnits());
+ // Register the instruction blob.
+ ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + buffer_start, buffer_.end())];
}
-void NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) {
+bool NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) {
if (*value1 < 16 && *value2 < 16) {
- return;
+ return false;
}
if ((*value1 >> 4) == 1 && *value2 < 16) {
InstNibbles(0xE5, {});
*value1 ^= 1u << 4;
- return;
+ return true;
} else if ((*value2 >> 4) == 1 && *value1 < 16) {
InstNibbles(0xE6, {});
*value2 ^= 1u << 4;
- return;
+ return true;
}
if (*value1 < 256 && *value2 < 256) {
// Extend each value by 4 bits.
@@ -498,16 +559,6 @@ void NewRegisterInstructions::ExtendPrefix(uint32_t* value1, uint32_t* value2) {
}
*value1 &= 0xF;
*value2 &= 0XF;
-}
-
-bool NewRegisterInstructions::InstNibblesAndIndex(uint8_t opcode,
- uint16_t idx,
- const std::vector<uint32_t>& args) {
- if (!InstNibbles(opcode, args)) {
- return false;
- }
- buffer_.push_back(static_cast<uint8_t>(idx >> 8));
- buffer_.push_back(static_cast<uint8_t>(idx));
return true;
}
@@ -526,6 +577,7 @@ bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint
return false;
}
}
+ const size_t buffer_start = buffer_.size();
buffer_.push_back(opcode);
for (size_t i = 0; i < args.size(); i += 2) {
buffer_.push_back(args[i] << 4);
@@ -536,6 +588,8 @@ bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint
while (buffer_.size() % alignment_ != 0) {
buffer_.push_back(0);
}
+ // Register the instruction blob.
+ ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + buffer_start, buffer_.end())];
return true;
}
diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h
index ed40ba7d9b..015801f516 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.h
+++ b/tools/dexanalyze/dexanalyze_bytecode.h
@@ -54,7 +54,11 @@ struct TypeLinkage {
class NewRegisterInstructions : public Experiment {
public:
- explicit NewRegisterInstructions(uint64_t experiments) : experiments_(experiments) {}
+ explicit NewRegisterInstructions(uint64_t experiments)
+ : experiments_(experiments),
+ move_result_reg_(256),
+ first_arg_reg_count_(256),
+ opcode_count_(256) {}
void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files);
void Dump(std::ostream& os, uint64_t total_size) const;
@@ -64,9 +68,8 @@ class NewRegisterInstructions : public Experiment {
bool count_types,
std::map<size_t, TypeLinkage>& types);
void Add(Instruction::Code opcode, const Instruction& inst);
- bool InstNibblesAndIndex(uint8_t opcode, uint16_t idx, const std::vector<uint32_t>& args);
bool InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args);
- void ExtendPrefix(uint32_t* value1, uint32_t* value2);
+ bool ExtendPrefix(uint32_t* value1, uint32_t* value2);
bool Enabled(BytecodeExperiment experiment) const {
return experiments_ & (1u << static_cast<uint64_t>(experiment));
}
@@ -77,7 +80,13 @@ class NewRegisterInstructions : public Experiment {
uint64_t deduped_size_ = 0u;
uint64_t dex_code_bytes_ = 0u;
uint64_t experiments_ = std::numeric_limits<uint64_t>::max();
- std::array<size_t, 256> move_result_reg_;
+ uint64_t extended_field_ = 0u;
+ uint64_t extended_method_ = 0u;
+ std::vector<size_t> move_result_reg_;
+ std::vector<size_t> first_arg_reg_count_;
+ std::vector<size_t> opcode_count_;
+ std::map<std::pair<uint32_t, uint32_t>, size_t> method_linkage_counts_;
+ std::map<std::pair<uint32_t, uint32_t>, size_t> field_linkage_counts_;
std::map<std::vector<uint8_t>, size_t> instruction_freq_;
// Output instruction buffer.
std::vector<uint8_t> buffer_;
diff --git a/tools/dexanalyze/dexanalyze_experiments.cc b/tools/dexanalyze/dexanalyze_experiments.cc
index 1f6fe4694e..b124f433b3 100644
--- a/tools/dexanalyze/dexanalyze_experiments.cc
+++ b/tools/dexanalyze/dexanalyze_experiments.cc
@@ -208,137 +208,6 @@ void AnalyzeDebugInfo::Dump(std::ostream& os, uint64_t total_size) const {
<< Percent(total_unique_non_header_bytes_, total_size) << "\n";
}
-void AnalyzeStrings::ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
- std::set<std::string> unique_strings;
- for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
- for (size_t i = 0; i < dex_file->NumStringIds(); ++i) {
- uint32_t length = 0;
- const char* data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &length);
- // Analyze if the string has any UTF16 chars.
- bool have_wide_char = false;
- const char* ptr = data;
- for (size_t j = 0; j < length; ++j) {
- have_wide_char = have_wide_char || GetUtf16FromUtf8(&ptr) >= 0x100;
- }
- if (have_wide_char) {
- wide_string_bytes_ += 2 * length;
- } else {
- ascii_string_bytes_ += length;
- }
- string_data_bytes_ += ptr - data;
- unique_strings.insert(data);
- }
- }
- // Unique strings only since we want to exclude savings from multidex duplication.
- std::vector<std::string> strings(unique_strings.begin(), unique_strings.end());
- unique_strings.clear();
-
- // Tunable parameters.
- static const size_t kMinPrefixLen = 1;
- static const size_t kMaxPrefixLen = 255;
- static const size_t kPrefixConstantCost = 4;
- static const size_t kPrefixIndexCost = 2;
-
- // Calculate total shared prefix.
- std::vector<size_t> shared_len;
- prefixes_.clear();
- for (size_t i = 0; i < strings.size(); ++i) {
- size_t best_len = 0;
- if (i > 0) {
- best_len = std::max(best_len, PrefixLen(strings[i], strings[i - 1]));
- }
- if (i < strings.size() - 1) {
- best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
- }
- best_len = std::min(best_len, kMaxPrefixLen);
- std::string prefix;
- if (best_len >= kMinPrefixLen) {
- prefix = strings[i].substr(0, best_len);
- ++prefixes_[prefix];
- }
- total_prefix_index_cost_ += kPrefixIndexCost;
- }
- // Optimize the result by moving long prefixes to shorter ones if it causes savings.
- while (true) {
- bool have_savings = false;
- auto it = prefixes_.begin();
- std::vector<std::string> longest;
- for (const auto& pair : prefixes_) {
- longest.push_back(pair.first);
- }
- std::sort(longest.begin(), longest.end(), [](const std::string& a, const std::string& b) {
- return a.length() > b.length();
- });
- // Do longest first since this provides the best results.
- for (const std::string& s : longest) {
- it = prefixes_.find(s);
- CHECK(it != prefixes_.end());
- const std::string& prefix = it->first;
- int64_t best_savings = 0u;
- int64_t best_len = -1;
- for (int64_t len = prefix.length() - 1; len >= 0; --len) {
- auto found = prefixes_.find(prefix.substr(0, len));
- if (len != 0 && found == prefixes_.end()) {
- continue;
- }
- // Calculate savings from downgrading the prefix.
- int64_t savings = kPrefixConstantCost + prefix.length() -
- (prefix.length() - len) * it->second;
- if (savings > best_savings) {
- best_savings = savings;
- best_len = len;
- break;
- }
- }
- if (best_len != -1) {
- prefixes_[prefix.substr(0, best_len)] += it->second;
- it = prefixes_.erase(it);
- optimization_savings_ += best_savings;
- have_savings = true;
- } else {
- ++it;
- }
- }
- if (!have_savings) {
- break;
- }
- }
- total_num_prefixes_ += prefixes_.size();
- for (const auto& pair : prefixes_) {
- // 4 bytes for an offset, one for length.
- total_prefix_dict_ += pair.first.length();
- total_prefix_table_ += kPrefixConstantCost;
- total_prefix_savings_ += pair.first.length() * pair.second;
- }
-}
-
-void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
- os << "Total string data bytes " << Percent(string_data_bytes_, total_size) << "\n";
- os << "UTF-16 string data bytes " << Percent(wide_string_bytes_, total_size) << "\n";
- os << "ASCII string data bytes " << Percent(ascii_string_bytes_, total_size) << "\n";
-
- // Prefix based strings.
- os << "Total shared prefix bytes " << Percent(total_prefix_savings_, total_size) << "\n";
- os << "Prefix dictionary cost " << Percent(total_prefix_dict_, total_size) << "\n";
- os << "Prefix table cost " << Percent(total_prefix_table_, total_size) << "\n";
- os << "Prefix index cost " << Percent(total_prefix_index_cost_, total_size) << "\n";
- int64_t net_savings = total_prefix_savings_;
- net_savings -= total_prefix_dict_;
- net_savings -= total_prefix_table_;
- net_savings -= total_prefix_index_cost_;
- os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
- os << "Optimization savings " << Percent(optimization_savings_, total_size) << "\n";
- os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
- if (verbose_level_ >= VerboseLevel::kEverything) {
- std::vector<std::pair<std::string, size_t>> pairs(prefixes_.begin(), prefixes_.end());
- // Sort lexicographically.
- std::sort(pairs.begin(), pairs.end());
- for (const auto& pair : pairs) {
- os << pair.first << " : " << pair.second << "\n";
- }
- }
-}
-
void CountDexIndices::ProcessDexFiles(
const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
std::set<std::string> unique_field_names;
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 4e66b3cf3b..55d2f44e99 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -62,30 +62,11 @@ class Experiment {
VerboseLevel verbose_level_ = VerboseLevel::kNormal;
};
-// Analyze string data and strings accessed from code.
-class AnalyzeStrings : public Experiment {
- public:
- void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
- void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
-
- private:
- int64_t wide_string_bytes_ = 0u;
- int64_t ascii_string_bytes_ = 0u;
- int64_t string_data_bytes_ = 0u;
- int64_t total_prefix_savings_ = 0u;
- int64_t total_prefix_dict_ = 0u;
- int64_t total_prefix_table_ = 0u;
- int64_t total_prefix_index_cost_ = 0u;
- int64_t total_num_prefixes_ = 0u;
- int64_t optimization_savings_ = 0u;
- std::unordered_map<std::string, size_t> prefixes_;
-};
-
// Analyze debug info sizes.
class AnalyzeDebugInfo : public Experiment {
public:
- void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
- void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
private:
int64_t total_bytes_ = 0u;
@@ -110,8 +91,8 @@ class AnalyzeDebugInfo : public Experiment {
// Count numbers of dex indices.
class CountDexIndices : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
- void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
+ void ProcessDexFile(const DexFile& dex_file) override;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
void Dump(std::ostream& os, uint64_t total_size) const;
@@ -181,9 +162,9 @@ class CountDexIndices : public Experiment {
// Measure various code metrics including args per invoke-virtual, fill/spill move patterns.
class CodeMetrics : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
+ void ProcessDexFile(const DexFile& dex_file) override;
- void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
private:
static constexpr size_t kMaxArgCount = 6;
diff --git a/tools/dexanalyze/dexanalyze_strings.cc b/tools/dexanalyze/dexanalyze_strings.cc
new file mode 100644
index 0000000000..863e4ee4b3
--- /dev/null
+++ b/tools/dexanalyze/dexanalyze_strings.cc
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dexanalyze_strings.h"
+
+#include <algorithm>
+#include <iomanip>
+#include <iostream>
+#include <queue>
+
+#include "dex/class_accessor-inl.h"
+#include "dex/code_item_accessors-inl.h"
+#include "dex/dex_instruction-inl.h"
+
+namespace art {
+namespace dexanalyze {
+
+// Tunable parameters.
+static const size_t kMinPrefixLen = 1;
+static const size_t kMaxPrefixLen = 255;
+static const size_t kPrefixConstantCost = 4;
+static const size_t kPrefixIndexCost = 2;
+
+// Node value = (distance from root) * (occurrences - 1).
+class MatchTrie {
+ public:
+ void Add(const std::string& str) {
+ MatchTrie* node = this;
+ size_t depth = 0u;
+ for (uint8_t c : str) {
+ ++depth;
+ if (node->nodes_[c] == nullptr) {
+ MatchTrie* new_node = new MatchTrie();
+ node->nodes_[c].reset(new_node);
+ new_node->parent_ = node;
+ new_node->depth_ = depth;
+ new_node->incoming_ = c;
+ node = new_node;
+ } else {
+ node = node->nodes_[c].get();
+ }
+ ++node->count_;
+ }
+ node->is_end_ = true;
+ }
+
+ // Returns the length of the longest prefix and if it's a leaf node.
+ std::pair<size_t, bool> LongestPrefix(const std::string& str) const {
+ const MatchTrie* node = this;
+ const MatchTrie* best_node = this;
+ size_t depth = 0u;
+ size_t best_depth = 0u;
+ for (uint8_t c : str) {
+ if (node->nodes_[c] == nullptr) {
+ break;
+ }
+ node = node->nodes_[c].get();
+ ++depth;
+ if (node->is_end_) {
+ best_depth = depth;
+ best_node = node;
+ }
+ }
+ bool is_leaf = true;
+ for (const std::unique_ptr<MatchTrie>& cur_node : best_node->nodes_) {
+ if (cur_node != nullptr) {
+ is_leaf = false;
+ }
+ }
+ return {best_depth, is_leaf};
+ }
+
+ int32_t Savings() const {
+ int32_t cost = kPrefixConstantCost;
+ int32_t first_used = 0u;
+ if (chosen_suffix_count_ == 0u) {
+ cost += depth_;
+ }
+ uint32_t extra_savings = 0u;
+ for (MatchTrie* cur = parent_; cur != nullptr; cur = cur->parent_) {
+ if (cur->chosen_) {
+ first_used = cur->depth_;
+ if (cur->chosen_suffix_count_ == 0u) {
+ // First suffix for the chosen parent, remove the cost of the dictionary entry.
+ extra_savings += first_used;
+ }
+ break;
+ }
+ }
+ return count_ * (depth_ - first_used) - cost + extra_savings;
+ }
+
+ template <typename T, typename... Args, template <typename...> class Queue>
+ T PopRealTop(Queue<T, Args...>& queue) {
+ auto pair = queue.top();
+ queue.pop();
+ // Keep updating values until one sticks.
+ while (pair.second->Savings() != pair.first) {
+ pair.first = pair.second->Savings();
+ queue.push(pair);
+ pair = queue.top();
+ queue.pop();
+ }
+ return pair;
+ }
+
+ std::vector<std::string> ExtractPrefixes(size_t max) {
+ std::vector<std::string> ret;
+ // Make priority queue and adaptively update it. Each node value is the savings from picking
+ // it. Insert all of the interesting nodes in the queue (children != 1).
+ std::priority_queue<std::pair<int32_t, MatchTrie*>> queue;
+ // Add all of the nodes to the queue.
+ std::vector<MatchTrie*> work(1, this);
+ while (!work.empty()) {
+ MatchTrie* elem = work.back();
+ work.pop_back();
+ size_t num_childs = 0u;
+ for (const std::unique_ptr<MatchTrie>& child : elem->nodes_) {
+ if (child != nullptr) {
+ work.push_back(child.get());
+ ++num_childs;
+ }
+ }
+ if (num_childs > 1u || elem->is_end_) {
+ queue.emplace(elem->Savings(), elem);
+ }
+ }
+ std::priority_queue<std::pair<int32_t, MatchTrie*>> prefixes;
+ // The savings can only ever go down for a given node, never up.
+ while (max != 0u && !queue.empty()) {
+ std::pair<int32_t, MatchTrie*> pair = PopRealTop(queue);
+ if (pair.second != this && pair.first > 0) {
+ // Pick this node.
+ uint32_t count = pair.second->count_;
+ pair.second->chosen_ = true;
+ for (MatchTrie* cur = pair.second->parent_; cur != this; cur = cur->parent_) {
+ if (cur->chosen_) {
+ break;
+ }
+ cur->count_ -= count;
+ }
+ for (MatchTrie* cur = pair.second->parent_; cur != this; cur = cur->parent_) {
+ ++cur->chosen_suffix_count_;
+ }
+ prefixes.emplace(pair.first, pair.second);
+ --max;
+ } else {
+ // Negative or no EV, just delete the node.
+ }
+ }
+ while (!prefixes.empty()) {
+ std::pair<int32_t, MatchTrie*> pair = PopRealTop(prefixes);
+ if (pair.first <= 0) {
+ continue;
+ }
+ std::vector<uint8_t> chars;
+ for (MatchTrie* cur = pair.second; cur != this; cur = cur->parent_) {
+ chars.push_back(cur->incoming_);
+ }
+ ret.push_back(std::string(chars.rbegin(), chars.rend()));
+ // LOG(INFO) << pair.second->Savings() << " : " << ret.back();
+ }
+ return ret;
+ }
+
+ std::unique_ptr<MatchTrie> nodes_[256];
+ MatchTrie* parent_ = nullptr;
+ uint32_t count_ = 0u;
+ int32_t depth_ = 0u;
+ int32_t savings_ = 0u;
+ uint8_t incoming_ = 0u;
+ // If the current node is the end of a possible prefix.
+ bool is_end_ = false;
+ // If the current node is chosen to be a used prefix.
+ bool chosen_ = false;
+ // If the current node is a prefix of a longer chosen prefix.
+ uint32_t chosen_suffix_count_ = 0u;
+};
+
+void AnalyzeStrings::ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+ std::set<std::string> unique_strings;
+ // Accumulate the strings.
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ for (size_t i = 0; i < dex_file->NumStringIds(); ++i) {
+ uint32_t length = 0;
+ const char* data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &length);
+ // Analyze if the string has any UTF16 chars.
+ bool have_wide_char = false;
+ const char* ptr = data;
+ for (size_t j = 0; j < length; ++j) {
+ have_wide_char = have_wide_char || GetUtf16FromUtf8(&ptr) >= 0x100;
+ }
+ if (have_wide_char) {
+ wide_string_bytes_ += 2 * length;
+ } else {
+ ascii_string_bytes_ += length;
+ }
+ string_data_bytes_ += ptr - data;
+ unique_strings.insert(data);
+ }
+ }
+ // Unique strings only since we want to exclude savings from multidex duplication.
+ ProcessStrings(std::vector<std::string>(unique_strings.begin(), unique_strings.end()), 1);
+}
+
+void AnalyzeStrings::ProcessStrings(const std::vector<std::string>& strings, size_t iterations) {
+ if (iterations == 0u) {
+ return;
+ }
+ // Calculate total shared prefix.
+ std::vector<size_t> shared_len;
+ prefixes_.clear();
+ std::unique_ptr<MatchTrie> prefix_construct(new MatchTrie());
+ for (size_t i = 0; i < strings.size(); ++i) {
+ size_t best_len = 0;
+ if (i > 0) {
+ best_len = std::max(best_len, PrefixLen(strings[i], strings[i - 1]));
+ }
+ if (i < strings.size() - 1) {
+ best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
+ }
+ best_len = std::min(best_len, kMaxPrefixLen);
+ std::string prefix;
+ if (best_len >= kMinPrefixLen) {
+ prefix = strings[i].substr(0, best_len);
+ prefix_construct->Add(prefix);
+ ++prefixes_[prefix];
+ total_shared_prefix_bytes_ += best_len;
+ }
+ total_prefix_index_cost_ += kPrefixIndexCost;
+ }
+
+ static constexpr size_t kPrefixBits = 15;
+ static constexpr size_t kShortLen = (1u << (15 - kPrefixBits)) - 1;
+ std::unique_ptr<MatchTrie> prefix_trie(new MatchTrie());
+ static constexpr bool kUseGreedyTrie = true;
+ if (kUseGreedyTrie) {
+ std::vector<std::string> prefixes(prefix_construct->ExtractPrefixes(1 << kPrefixBits));
+ for (auto&& str : prefixes) {
+ prefix_trie->Add(str);
+ }
+ } else {
+ // Optimize the result by moving long prefixes to shorter ones if it causes additional savings.
+ while (true) {
+ bool have_savings = false;
+ auto it = prefixes_.begin();
+ std::vector<std::string> longest;
+ for (const auto& pair : prefixes_) {
+ longest.push_back(pair.first);
+ }
+ std::sort(longest.begin(), longest.end(), [](const std::string& a, const std::string& b) {
+ return a.length() > b.length();
+ });
+ // Do longest first since this provides the best results.
+ for (const std::string& s : longest) {
+ it = prefixes_.find(s);
+ CHECK(it != prefixes_.end());
+ const std::string& prefix = it->first;
+ int64_t best_savings = 0u;
+ int64_t best_len = -1;
+ for (int64_t len = prefix.length() - 1; len >= 0; --len) {
+ auto found = prefixes_.find(prefix.substr(0, len));
+ if (len != 0 && found == prefixes_.end()) {
+ continue;
+ }
+ // Calculate savings from downgrading the prefix.
+ int64_t savings = kPrefixConstantCost + prefix.length() -
+ (prefix.length() - len) * it->second;
+ if (savings > best_savings) {
+ best_savings = savings;
+ best_len = len;
+ break;
+ }
+ }
+ if (best_len != -1) {
+ prefixes_[prefix.substr(0, best_len)] += it->second;
+ it = prefixes_.erase(it);
+ optimization_savings_ += best_savings;
+ have_savings = true;
+ } else {
+ ++it;
+ }
+ }
+ if (!have_savings) {
+ break;
+ }
+ }
+ for (auto&& pair : prefixes_) {
+ prefix_trie->Add(pair.first);
+ }
+ }
+
+ // Count longest prefixes.
+ std::set<std::string> used_prefixes;
+ std::vector<std::string> suffix;
+ for (const std::string& str : strings) {
+ auto pair = prefix_trie->LongestPrefix(str);
+ const size_t len = pair.first;
+ if (len >= kMinPrefixLen) {
+ ++strings_used_prefixed_;
+ total_prefix_savings_ += len;
+ used_prefixes.insert(str.substr(0, len));
+ }
+ suffix.push_back(str.substr(len));
+ if (suffix.back().size() < kShortLen) {
+ ++short_strings_;
+ } else {
+ ++long_strings_;
+ }
+ }
+ std::sort(suffix.begin(), suffix.end());
+ for (const std::string& prefix : used_prefixes) {
+ // 4 bytes for an offset, one for length.
+ auto pair = prefix_trie->LongestPrefix(prefix);
+ CHECK_EQ(pair.first, prefix.length());
+ if (pair.second) {
+ // Only need to add to dictionary if it's a leaf, otherwise we can reuse string data of the
+ // other prefix.
+ total_prefix_dict_ += prefix.size();
+ }
+ total_prefix_table_ += kPrefixConstantCost;
+ }
+ ProcessStrings(suffix, iterations - 1);
+}
+
+void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
+ os << "Total string data bytes " << Percent(string_data_bytes_, total_size) << "\n";
+ os << "UTF-16 string data bytes " << Percent(wide_string_bytes_, total_size) << "\n";
+ os << "ASCII string data bytes " << Percent(ascii_string_bytes_, total_size) << "\n";
+
+ // Prefix based strings.
+ os << "Total shared prefix bytes " << Percent(total_shared_prefix_bytes_, total_size) << "\n";
+ os << "Prefix dictionary cost " << Percent(total_prefix_dict_, total_size) << "\n";
+ os << "Prefix table cost " << Percent(total_prefix_table_, total_size) << "\n";
+ os << "Prefix index cost " << Percent(total_prefix_index_cost_, total_size) << "\n";
+ int64_t net_savings = total_prefix_savings_ + short_strings_;
+ net_savings -= total_prefix_dict_;
+ net_savings -= total_prefix_table_;
+ net_savings -= total_prefix_index_cost_;
+ os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
+ os << "Optimization savings " << Percent(optimization_savings_, total_size) << "\n";
+ os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
+ os << "Strings using prefix "
+ << Percent(strings_used_prefixed_, total_prefix_index_cost_ / kPrefixIndexCost) << "\n";
+ os << "Short strings " << Percent(short_strings_, short_strings_ + long_strings_) << "\n";
+ if (verbose_level_ >= VerboseLevel::kEverything) {
+ std::vector<std::pair<std::string, size_t>> pairs(prefixes_.begin(), prefixes_.end());
+ // Sort lexicographically.
+ std::sort(pairs.begin(), pairs.end());
+ for (const auto& pair : pairs) {
+ os << pair.first << " : " << pair.second << "\n";
+ }
+ }
+}
+
+} // namespace dexanalyze
+} // namespace art
diff --git a/tools/dexanalyze/dexanalyze_strings.h b/tools/dexanalyze/dexanalyze_strings.h
new file mode 100644
index 0000000000..32702a60cb
--- /dev/null
+++ b/tools/dexanalyze/dexanalyze_strings.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
+#define ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
+
+#include <array>
+#include <vector>
+#include <map>
+
+#include "base/safe_map.h"
+#include "dexanalyze_experiments.h"
+#include "dex/code_item_accessors.h"
+#include "dex/utf-inl.h"
+
+namespace art {
+namespace dexanalyze {
+
+// Analyze string data and strings accessed from code.
+class AnalyzeStrings : public Experiment {
+ public:
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
+
+ private:
+ void ProcessStrings(const std::vector<std::string>& strings, size_t iterations);
+
+ int64_t wide_string_bytes_ = 0u;
+ int64_t ascii_string_bytes_ = 0u;
+ int64_t string_data_bytes_ = 0u;
+ int64_t total_shared_prefix_bytes_ = 0u;
+ int64_t total_prefix_savings_ = 0u;
+ int64_t total_prefix_dict_ = 0u;
+ int64_t total_prefix_table_ = 0u;
+ int64_t total_prefix_index_cost_ = 0u;
+ int64_t total_num_prefixes_ = 0u;
+ int64_t optimization_savings_ = 0u;
+ int64_t strings_used_prefixed_ = 0u;
+ int64_t short_strings_ = 0u;
+ int64_t long_strings_ = 0u;
+ std::unordered_map<std::string, size_t> prefixes_;
+};
+
+} // namespace dexanalyze
+} // namespace art
+
+#endif // ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
diff --git a/tools/golem/build-target.sh b/tools/golem/build-target.sh
index 921a8cbe36..45c9125930 100755
--- a/tools/golem/build-target.sh
+++ b/tools/golem/build-target.sh
@@ -367,7 +367,7 @@ if [[ "$make_tarball" == "make_tarball" ]]; then
dirs_rooted+=("$root_dir/$tar_dir")
done
- execute tar -czf "${tarball}" "${dirs_rooted[@]}" --exclude .git --exclude .gitignore
+ execute tar -czf "${tarball}" --exclude ".git" --exclude ".gitignore" "${dirs_rooted[@]}"
tar_result=$?
if [[ $tar_result -ne 0 ]]; then
[[ -f $tarball ]] && rm $tarball
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index bf8a1b755e..6d9b6fbe40 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -235,7 +235,7 @@ class DexMember {
const bool is_method_;
};
-class ClassPath FINAL {
+class ClassPath final {
public:
ClassPath(const std::vector<std::string>& dex_paths, bool open_writable) {
OpenDexFiles(dex_paths, open_writable);
@@ -316,7 +316,7 @@ class ClassPath FINAL {
std::vector<std::unique_ptr<const DexFile>> dex_files_;
};
-class HierarchyClass FINAL {
+class HierarchyClass final {
public:
HierarchyClass() {}
@@ -455,7 +455,7 @@ class HierarchyClass FINAL {
std::vector<HierarchyClass*> extended_by_;
};
-class Hierarchy FINAL {
+class Hierarchy final {
public:
explicit Hierarchy(ClassPath& classpath) : classpath_(classpath) {
BuildClassHierarchy();
@@ -559,7 +559,7 @@ class Hierarchy FINAL {
std::map<std::string, HierarchyClass> classes_;
};
-class HiddenApi FINAL {
+class HiddenApi final {
public:
HiddenApi() {}
diff --git a/tools/jfuzz/Android.bp b/tools/jfuzz/Android.bp
new file mode 100644
index 0000000000..f0d8b3779d
--- /dev/null
+++ b/tools/jfuzz/Android.bp
@@ -0,0 +1,29 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Fuzzer tool.
+cc_binary_host {
+ name: "jfuzz",
+ srcs: ["jfuzz.cc"],
+ cflags: [
+ "-O0",
+ "-g",
+ "-Wall",
+ ],
+ target: {
+ windows: {
+ enabled: true,
+ },
+ },
+}
diff --git a/tools/jfuzz/Android.mk b/tools/jfuzz/Android.mk
deleted file mode 100644
index c7002d67ec..0000000000
--- a/tools/jfuzz/Android.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Fuzzer tool.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := jfuzz.cc
-LOCAL_CFLAGS += -O0 -g -Wall
-LOCAL_MODULE_HOST_OS := darwin linux windows
-LOCAL_MODULE := jfuzz
-include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 264217ead6..3ef78d5718 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -224,5 +224,15 @@
"libcore.javax.crypto.CipherInputStreamTest#testCloseTwice",
"libcore.libcore.io.BlockGuardOsTest#test_android_getaddrinfo_networkPolicy",
"libcore.libcore.io.BlockGuardOsTest#test_checkNewMethodsInPosix"]
+},
+{
+ description: "fdsan doesn't exist on the host",
+ result: EXEC_FAILED,
+ modes: [host],
+ bug: 113177877,
+ names: ["libcore.libcore.io.FdsanTest#testFileInputStream",
+ "libcore.libcore.io.FdsanTest#testFileOutputStream",
+ "libcore.libcore.io.FdsanTest#testRandomAccessFile",
+ "libcore.libcore.io.FdsanTest#testParcelFileDescriptor"]
}
]
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 942a4e0fc6..23533af02b 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -10,14 +10,26 @@
modes: [device],
names: ["jsr166.CompletableFutureTest#testCompleteOnTimeout_completed",
"jsr166.CompletableFutureTest#testDelayedExecutor",
+ "jsr166.ExecutorsTest#testTimedCallable",
+ "jsr166.RecursiveActionTest#testJoinIgnoresInterruptsOutsideForkJoinPool",
"libcore.libcore.icu.TransliteratorTest#testAll",
"libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25821045",
"libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25883157",
"libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndTimeout",
+ "libcore.java.text.DecimalFormatTest#testWhitespaceError",
+ "libcore.java.text.DecimalFormatTest#testWhitespaceTolerated",
+ "libcore.java.text.DecimalFormatTest#test_exponentSeparator",
+ "libcore.java.text.DecimalFormatTest#test_setMaximumFractionDigitsAffectsRoundingMode",
+ "libcore.java.util.jar.OldJarFileTest#test_ConstructorLjava_io_File",
+ "libcore.java.util.jar.OldJarFileTest#test_ConstructorLjava_lang_StringZ",
+ "libcore.java.util.jar.OldJarInputStreamTest#test_read$ZII",
"libcore.java.util.TimeZoneTest#testSetDefaultDeadlock",
"libcore.javax.crypto.CipherBasicsTest#testBasicEncryption",
+ "org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_removeJ",
"org.apache.harmony.tests.java.text.MessageFormatTest#test_parseLjava_lang_String",
- "org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"]
+ "org.apache.harmony.tests.java.util.ControlTest#test_toBundleName_LStringLLocale",
+ "org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"
+ ]
},
{
description: "Sometimes times out with gcstress and debug.",
diff --git a/tools/libcore_gcstress_failures.txt b/tools/libcore_gcstress_failures.txt
index 6840f9ebec..965e85c359 100644
--- a/tools/libcore_gcstress_failures.txt
+++ b/tools/libcore_gcstress_failures.txt
@@ -33,15 +33,5 @@
"org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext",
"libcore.java.text.DecimalFormatTest#testCurrencySymbolSpacing",
"libcore.java.text.SimpleDateFormatTest#testLocales"]
-},
-{
- description: "GC crash",
- result: EXEC_FAILED,
- bug: 111545159,
- names: ["org.apache.harmony.tests.java.util.AbstractSequentialListTest#test_addAllILjava_util_Collection",
- "org.apache.harmony.tests.java.util.HashtableTest#test_putLjava_lang_ObjectLjava_lang_Object",
- "org.apache.harmony.tests.java.util.VectorTest#test_addAllILjava_util_Collection",
- "org.apache.harmony.tests.java.util.VectorTest#test_addAllLjava_util_Collection",
- "org.apache.harmony.tests.java.io.BufferedWriterTest#test_write_LStringII_Exception"]
}
]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index a435f2e03e..b0b5810dcc 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -311,10 +311,10 @@ if [[ "$plugin" != "" ]]; then
vm_args="$vm_args --vm-arg $plugin"
fi
-if $use_jit; then
- vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
- debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
-fi
+# Because we're running debuggable, we discard any AOT code.
+# Therefore we run de2oat with 'quicken' to avoid spending time compiling.
+vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
+debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
if $instant_jit; then
debuggee_args="$debuggee_args -Xjitthreshold:0"
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 240d63c6d3..2d39b2a9f8 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -157,7 +157,7 @@ fi
# timeout when being asked to run packages, and some tests go above
# the default timeout.
if $gcstress && $debug && $device_mode; then
- vogar_args="$vogar_args --timeout 960"
+ vogar_args="$vogar_args --timeout 1440"
else
vogar_args="$vogar_args --timeout 480"
fi
@@ -195,4 +195,7 @@ esac
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args $expectations $(cparg $DEPS) ${working_packages[@]}
+
+cmd="vogar $vogar_args $expectations $(cparg $DEPS) ${working_packages[@]}"
+echo "Running $cmd"
+eval $cmd
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index ed6ac3d199..4ea5b2ddd9 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -38,7 +38,7 @@ static constexpr const char* kTracerInstrumentationKey = "tracefast_TRAMPOLINE";
static constexpr bool kNeedsInterpreter = false;
#endif // TRACEFAST_INITERPRETER
-class Tracer FINAL : public art::instrumentation::InstrumentationListener {
+class Tracer final : public art::instrumentation::InstrumentationListener {
public:
Tracer() {}
@@ -46,40 +46,40 @@ class Tracer FINAL : public art::instrumentation::InstrumentationListener {
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const art::JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void DexPcMoved(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void FieldRead(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -87,7 +87,7 @@ class Tracer FINAL : public art::instrumentation::InstrumentationListener {
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtField* field ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -95,32 +95,32 @@ class Tracer FINAL : public art::instrumentation::InstrumentationListener {
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtField* field ATTRIBUTE_UNUSED,
const art::JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void ExceptionThrown(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void ExceptionHandled(art::Thread* self ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Throwable> throwable ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void Branch(art::Thread* thread ATTRIBUTE_UNUSED,
art::ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
art::ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
art::ArtMethod* callee ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
private:
DISALLOW_COPY_AND_ASSIGN(Tracer);
@@ -149,7 +149,7 @@ class TraceFastPhaseCB : public art::RuntimePhaseCallback {
TraceFastPhaseCB() {}
void NextRuntimePhase(art::RuntimePhaseCallback::RuntimePhase phase)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (phase == art::RuntimePhaseCallback::RuntimePhase::kInit) {
art::ScopedThreadSuspension sts(art::Thread::Current(),
art::ThreadState::kWaitingForMethodTracingStart);
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 9c86024711..865b9df03d 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -192,8 +192,8 @@ class FlowAnalysisCollector : public VeriFlowAnalysis {
return uses_;
}
- RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
- void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+ RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+ void AnalyzeFieldSet(const Instruction& instruction) override;
private:
// List of reflection uses found, concrete and abstract.
@@ -212,8 +212,8 @@ class FlowAnalysisSubstitutor : public VeriFlowAnalysis {
return uses_;
}
- RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
- void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+ RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+ void AnalyzeFieldSet(const Instruction& instruction) override;
private:
// List of reflection uses found, concrete and abstract.