summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk5
-rw-r--r--cmdline/cmdline_parser_test.cc2
-rw-r--r--compiler/Android.mk6
-rw-r--r--compiler/cfi_test.h4
-rw-r--r--compiler/common_compiler_test.cc28
-rw-r--r--compiler/common_compiler_test.h13
-rw-r--r--compiler/compiled_method.cc2
-rw-r--r--compiler/dex/dataflow_iterator-inl.h8
-rw-r--r--compiler/dex/dataflow_iterator.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/dex/global_value_numbering_test.cc3
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc10
-rw-r--r--compiler/dex/gvn_dead_code_elimination.h2
-rw-r--r--compiler/dex/local_value_numbering_test.cc2
-rw-r--r--compiler/dex/mir_analysis.cc6
-rw-r--r--compiler/dex/mir_dataflow.cc43
-rw-r--r--compiler/dex/mir_field_info.h4
-rw-r--r--compiler/dex/mir_graph.cc61
-rw-r--r--compiler/dex/mir_graph.h69
-rw-r--r--compiler/dex/mir_method_info.h5
-rw-r--r--compiler/dex/mir_optimization.cc120
-rw-r--r--compiler/dex/pass_driver.h2
-rw-r--r--compiler/dex/pass_driver_me.h7
-rw-r--r--compiler/dex/pass_driver_me_post_opt.cc2
-rw-r--r--compiler/dex/post_opt_passes.h20
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc18
-rw-r--r--compiler/dex/quick/arm/call_arm.cc16
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc6
-rw-r--r--compiler/dex/quick/arm/int_arm.cc28
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc12
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc2
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc14
-rw-r--r--compiler/dex/quick/codegen_util.cc2
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc10
-rw-r--r--compiler/dex/quick/gen_common.cc34
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc16
-rw-r--r--compiler/dex/quick/gen_loadstore.cc2
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc8
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/int_mips.cc8
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc12
-rw-r--r--compiler/dex/quick/mir_to_lir.cc44
-rw-r--r--compiler/dex/quick/mir_to_lir.h20
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc4
-rw-r--r--compiler/dex/quick/quick_compiler.cc6
-rw-r--r--compiler/dex/quick/ralloc_util.cc2
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc16
-rw-r--r--compiler/dex/quick/x86/call_x86.cc2
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc12
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc6
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc2
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc12
-rw-r--r--compiler/dex/ssa_transformation.cc40
-rw-r--r--compiler/dex/type_inference.cc1067
-rw-r--r--compiler/dex/type_inference.h443
-rw-r--r--compiler/dex/type_inference_test.cc2044
-rw-r--r--compiler/dex/verification_results.cc2
-rw-r--r--compiler/dex/verified_method.cc4
-rw-r--r--compiler/dex/verified_method.h2
-rw-r--r--compiler/dex/vreg_analysis.cc452
-rw-r--r--compiler/driver/compiler_driver-inl.h6
-rw-r--r--compiler/driver/compiler_driver.cc68
-rw-r--r--compiler/driver/compiler_driver.h45
-rw-r--r--compiler/driver/compiler_driver_test.cc85
-rw-r--r--compiler/driver/dex_compilation_unit.h3
-rw-r--r--compiler/dwarf/dwarf_constants.h22
-rw-r--r--compiler/dwarf/dwarf_test.cc10
-rw-r--r--compiler/dwarf/dwarf_test.h68
-rw-r--r--compiler/dwarf/headers.h9
-rw-r--r--compiler/dwarf/register.h1
-rw-r--r--compiler/elf_builder.h252
-rw-r--r--compiler/elf_writer_debug.cc91
-rw-r--r--compiler/elf_writer_debug.h16
-rw-r--r--compiler/elf_writer_quick.cc197
-rw-r--r--compiler/elf_writer_quick.h10
-rw-r--r--compiler/elf_writer_test.cc8
-rw-r--r--compiler/image_test.cc18
-rw-r--r--compiler/image_writer.cc26
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/jni/quick/calling_convention.cc4
-rw-r--r--compiler/jni/quick/jni_compiler.cc12
-rw-r--r--compiler/linker/relative_patcher_test.h2
-rw-r--r--compiler/oat_test.cc4
-rw-r--r--compiler/oat_writer.cc18
-rw-r--r--compiler/oat_writer.h8
-rw-r--r--compiler/optimizing/boolean_simplifier.cc11
-rw-r--r--compiler/optimizing/builder.cc16
-rw-r--r--compiler/optimizing/code_generator.cc24
-rw-r--r--compiler/optimizing/code_generator_arm.cc32
-rw-r--r--compiler/optimizing/code_generator_arm.h4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc412
-rw-r--r--compiler/optimizing/code_generator_arm64.h20
-rw-r--r--compiler/optimizing/code_generator_x86.cc196
-rw-r--r--compiler/optimizing/code_generator_x86.h8
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc289
-rw-r--r--compiler/optimizing/code_generator_x86_64.h4
-rw-r--r--compiler/optimizing/dead_code_elimination.cc81
-rw-r--r--compiler/optimizing/dead_code_elimination.h11
-rw-r--r--compiler/optimizing/graph_checker.cc65
-rw-r--r--compiler/optimizing/graph_visualizer.cc4
-rw-r--r--compiler/optimizing/gvn.cc2
-rw-r--r--compiler/optimizing/inliner.cc2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc97
-rw-r--r--compiler/optimizing/intrinsics_arm.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc4
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc65
-rw-r--r--compiler/optimizing/locations.h23
-rw-r--r--compiler/optimizing/nodes.cc326
-rw-r--r--compiler/optimizing/nodes.h133
-rw-r--r--compiler/optimizing/optimization.cc4
-rw-r--r--compiler/optimizing/optimization.h2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc62
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h12
-rw-r--r--compiler/optimizing/parallel_move_resolver.cc309
-rw-r--r--compiler/optimizing/parallel_move_resolver.h120
-rw-r--r--compiler/optimizing/parallel_move_test.cc344
-rw-r--r--compiler/optimizing/reference_type_propagation.cc105
-rw-r--r--compiler/optimizing/register_allocator.cc49
-rw-r--r--compiler/optimizing/register_allocator.h8
-rw-r--r--compiler/optimizing/register_allocator_test.cc4
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h38
-rw-r--r--compiler/optimizing/stack_map_stream.cc359
-rw-r--r--compiler/optimizing/stack_map_stream.h420
-rw-r--r--compiler/optimizing/stack_map_test.cc42
-rw-r--r--compiler/output_stream_test.cc6
-rw-r--r--compiler/utils/arm/assembler_arm.h14
-rw-r--r--compiler/utils/arm64/assembler_arm64.h6
-rw-r--r--compiler/utils/assembler.cc8
-rw-r--r--compiler/utils/assembler.h18
-rw-r--r--compiler/utils/assembler_thumb_test.cc94
-rw-r--r--compiler/utils/dedupe_set.h4
-rw-r--r--compiler/utils/growable_array.h8
-rw-r--r--compiler/utils/mips/assembler_mips.h14
-rw-r--r--compiler/utils/mips64/assembler_mips64.h6
-rw-r--r--compiler/utils/test_dex_file_builder.h372
-rw-r--r--compiler/utils/test_dex_file_builder_test.cc84
-rw-r--r--compiler/utils/x86/assembler_x86.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc145
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h27
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc129
-rw-r--r--dalvikvm/dalvikvm.cc23
-rw-r--r--dex2oat/dex2oat.cc149
-rw-r--r--disassembler/disassembler.cc2
-rw-r--r--imgdiag/imgdiag.cc16
-rw-r--r--oatdump/oatdump.cc11
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/arch/arm/context_arm.h2
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc15
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S6
-rw-r--r--runtime/arch/arm64/context_arm64.h2
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc17
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S6
-rw-r--r--runtime/arch/mips/context_mips.h2
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc17
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S6
-rw-r--r--runtime/arch/mips64/context_mips64.h2
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc23
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S6
-rw-r--r--runtime/arch/stub_test.cc18
-rw-r--r--runtime/arch/x86/context_x86.h2
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc26
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc3
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S12
-rw-r--r--runtime/arch/x86_64/context_x86_64.h2
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc27
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S10
-rw-r--r--runtime/art_field-inl.h8
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/base/bit_vector.cc26
-rw-r--r--runtime/base/bit_vector.h2
-rw-r--r--runtime/base/bit_vector_test.cc44
-rw-r--r--runtime/base/hex_dump.cc2
-rw-r--r--runtime/base/logging.cc2
-rw-r--r--runtime/base/logging.h2
-rw-r--r--runtime/base/macros.h5
-rw-r--r--runtime/base/mutex-inl.h27
-rw-r--r--runtime/base/mutex.cc58
-rw-r--r--runtime/base/mutex.h4
-rw-r--r--runtime/base/mutex_test.cc7
-rw-r--r--runtime/base/scoped_flock.cc9
-rw-r--r--runtime/base/stl_util.h20
-rw-r--r--runtime/base/variant_map.h2
-rw-r--r--runtime/base/variant_map_test.cc2
-rw-r--r--runtime/check_jni.cc8
-rw-r--r--runtime/class_linker-inl.h8
-rw-r--r--runtime/class_linker.cc274
-rw-r--r--runtime/class_linker.h16
-rw-r--r--runtime/class_linker_test.cc180
-rw-r--r--runtime/common_runtime_test.cc81
-rw-r--r--runtime/common_runtime_test.h7
-rw-r--r--runtime/common_throws.cc64
-rw-r--r--runtime/debugger.cc19
-rw-r--r--runtime/debugger.h2
-rw-r--r--runtime/dex_file-inl.h2
-rw-r--r--runtime/dex_file.cc105
-rw-r--r--runtime/dex_file.h41
-rw-r--r--runtime/dex_file_test.cc28
-rw-r--r--runtime/dex_file_verifier.cc14
-rw-r--r--runtime/dex_file_verifier.h6
-rw-r--r--runtime/dex_file_verifier_test.cc10
-rw-r--r--runtime/dex_instruction.h2
-rw-r--r--runtime/dex_method_iterator.h28
-rw-r--r--runtime/elf.h32
-rw-r--r--runtime/elf_file.cc584
-rw-r--r--runtime/elf_file.h10
-rw-r--r--runtime/elf_file_impl.h28
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h20
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc29
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc2
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc10
-rw-r--r--runtime/entrypoints/quick/quick_cast_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc6
-rw-r--r--runtime/exception_test.cc2
-rw-r--r--runtime/gc/accounting/atomic_stack.h4
-rw-r--r--runtime/gc/accounting/card_table.cc8
-rw-r--r--runtime/gc/accounting/card_table.h2
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h2
-rw-r--r--runtime/gc/accounting/space_bitmap.cc10
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc4
-rw-r--r--runtime/gc/allocator/rosalloc.cc30
-rw-r--r--runtime/gc/collector/mark_sweep.cc8
-rw-r--r--runtime/gc/collector/semi_space.cc2
-rw-r--r--runtime/gc/heap-inl.h2
-rw-r--r--runtime/gc/heap.cc12
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/gc/reference_queue.cc4
-rw-r--r--runtime/gc/space/bump_pointer_space.h2
-rw-r--r--runtime/gc/space/dlmalloc_space-inl.h6
-rw-r--r--runtime/gc/space/dlmalloc_space.cc6
-rw-r--r--runtime/gc/space/image_space.cc10
-rw-r--r--runtime/gc/space/image_space.h4
-rw-r--r--runtime/gc/space/large_object_space.cc21
-rw-r--r--runtime/gc/space/malloc_space.cc4
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/region_space.h2
-rw-r--r--runtime/gc/space/rosalloc_space-inl.h6
-rw-r--r--runtime/gc/space/rosalloc_space.cc18
-rw-r--r--runtime/gc_map.h2
-rw-r--r--runtime/hprof/hprof.cc6
-rw-r--r--runtime/indirect_reference_table-inl.h2
-rw-r--r--runtime/indirect_reference_table.cc17
-rw-r--r--runtime/indirect_reference_table.h4
-rw-r--r--runtime/indirect_reference_table_test.cc2
-rw-r--r--runtime/instrumentation.cc12
-rw-r--r--runtime/intern_table.cc12
-rw-r--r--runtime/intern_table.h2
-rw-r--r--runtime/intern_table_test.cc4
-rw-r--r--runtime/interpreter/interpreter.cc44
-rw-r--r--runtime/interpreter/interpreter_common.cc10
-rw-r--r--runtime/interpreter/interpreter_common.h4
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc277
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc264
-rw-r--r--runtime/interpreter/unstarted_runtime.cc2
-rw-r--r--runtime/jdwp/jdwp.h2
-rw-r--r--runtime/jdwp/jdwp_expand_buf.cc2
-rw-r--r--runtime/jdwp/jdwp_handler.cc2
-rw-r--r--runtime/jit/jit_code_cache.h4
-rw-r--r--runtime/jni_internal.cc17
-rw-r--r--runtime/jni_internal_test.cc16
-rw-r--r--runtime/mapping_table.h2
-rw-r--r--runtime/mem_map.cc9
-rw-r--r--runtime/mem_map.h14
-rw-r--r--runtime/memory_region.cc2
-rw-r--r--runtime/mirror/abstract_method.cc44
-rw-r--r--runtime/mirror/abstract_method.h73
-rw-r--r--runtime/mirror/art_method-inl.h4
-rw-r--r--runtime/mirror/art_method.cc40
-rw-r--r--runtime/mirror/art_method.h8
-rw-r--r--runtime/mirror/class-inl.h22
-rw-r--r--runtime/mirror/class.cc22
-rw-r--r--runtime/mirror/class.h28
-rw-r--r--runtime/mirror/dex_cache_test.cc4
-rw-r--r--runtime/mirror/field.h7
-rw-r--r--runtime/mirror/iftable-inl.h2
-rw-r--r--runtime/mirror/iftable.h10
-rw-r--r--runtime/mirror/method.cc103
-rw-r--r--runtime/mirror/method.h93
-rw-r--r--runtime/mirror/object-inl.h10
-rw-r--r--runtime/mirror/object.cc2
-rw-r--r--runtime/mirror/object_array-inl.h4
-rw-r--r--runtime/mirror/object_test.cc136
-rw-r--r--runtime/mirror/stack_trace_element.cc4
-rw-r--r--runtime/mirror/string-inl.h2
-rw-r--r--runtime/mirror/string.cc4
-rw-r--r--runtime/mirror/throwable.cc6
-rw-r--r--runtime/monitor.cc93
-rw-r--r--runtime/monitor_android.cc2
-rw-r--r--runtime/monitor_pool.h3
-rw-r--r--runtime/monitor_test.cc4
-rw-r--r--runtime/native/dalvik_system_DexFile.cc24
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc14
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc45
-rw-r--r--runtime/native/dalvik_system_VMStack.cc21
-rw-r--r--runtime/native/java_lang_Class.cc236
-rw-r--r--runtime/native/java_lang_DexCache.cc8
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/java_lang_Thread.cc12
-rw-r--r--runtime/native/java_lang_reflect_Array.cc15
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc35
-rw-r--r--runtime/native/java_lang_reflect_Method.cc6
-rw-r--r--runtime/native/java_lang_reflect_Proxy.cc7
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc4
-rw-r--r--runtime/nth_caller_visitor.h8
-rw-r--r--runtime/oat_file.cc6
-rw-r--r--runtime/oat_file.h14
-rw-r--r--runtime/oat_file_assistant.cc8
-rw-r--r--runtime/oat_file_assistant.h28
-rw-r--r--runtime/oat_file_assistant_test.cc2
-rw-r--r--runtime/object_callbacks.h5
-rw-r--r--runtime/os_linux.cc4
-rw-r--r--runtime/parsed_options.cc6
-rw-r--r--runtime/parsed_options_test.cc33
-rw-r--r--runtime/prebuilt_tools_test.cc66
-rw-r--r--runtime/primitive.h2
-rw-r--r--runtime/profiler.cc6
-rw-r--r--runtime/proxy_test.cc61
-rw-r--r--runtime/reference_table.cc6
-rw-r--r--runtime/reference_table_test.cc4
-rw-r--r--runtime/reflection.cc24
-rw-r--r--runtime/reflection.h3
-rw-r--r--runtime/reflection_test.cc26
-rw-r--r--runtime/runtime.cc45
-rw-r--r--runtime/runtime.h6
-rw-r--r--runtime/runtime_linux.cc27
-rw-r--r--runtime/scoped_thread_state_change.h18
-rw-r--r--runtime/signal_catcher.cc14
-rw-r--r--runtime/signal_set.h2
-rw-r--r--runtime/stack.cc133
-rw-r--r--runtime/stack.h14
-rw-r--r--runtime/thread-inl.h6
-rw-r--r--runtime/thread.cc31
-rw-r--r--runtime/thread.h41
-rw-r--r--runtime/thread_linux.cc14
-rw-r--r--runtime/thread_list.cc14
-rw-r--r--runtime/thread_list.h6
-rw-r--r--runtime/thread_pool.cc2
-rw-r--r--runtime/thread_pool.h4
-rw-r--r--runtime/trace.h4
-rw-r--r--runtime/utils.cc44
-rw-r--r--runtime/utils.h2
-rw-r--r--runtime/utils_test.cc25
-rw-r--r--runtime/verifier/dex_gc_map.cc2
-rw-r--r--runtime/verifier/dex_gc_map.h2
-rw-r--r--runtime/verifier/method_verifier.h8
-rw-r--r--runtime/verifier/method_verifier_test.cc2
-rw-r--r--runtime/verifier/reg_type.h6
-rw-r--r--runtime/verifier/reg_type_cache-inl.h2
-rw-r--r--runtime/well_known_classes.cc12
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--runtime/zip_archive.cc2
-rw-r--r--runtime/zip_archive.h2
-rw-r--r--runtime/zip_archive_test.cc4
-rw-r--r--sigchainlib/sigchain.cc12
-rw-r--r--test/004-JniTest/jni_test.cc4
-rw-r--r--test/004-SignalTest/signaltest.cc2
-rw-r--r--test/008-exceptions/expected.txt13
-rw-r--r--test/008-exceptions/src/Main.java41
-rw-r--r--test/068-classloader/expected.txt2
-rw-r--r--test/068-classloader/src-ex/MutationTarget.java22
-rw-r--r--test/068-classloader/src-ex/Mutator.java25
-rw-r--r--test/068-classloader/src/Main.java70
-rw-r--r--test/080-oom-throw/expected.txt1
-rw-r--r--test/080-oom-throw/src/Main.java49
-rw-r--r--test/104-growth-limit/src/Main.java6
-rw-r--r--test/458-checker-instruction-simplification/src/Main.java160
-rw-r--r--test/463-checker-boolean-simplifier/src/Main.java15
-rw-r--r--test/474-checker-boolean-input/src/Main.java83
-rw-r--r--test/474-fp-sub-neg/expected.txt2
-rw-r--r--test/474-fp-sub-neg/info.txt5
-rw-r--r--test/474-fp-sub-neg/src/Main.java45
-rw-r--r--test/475-simplify-mul-zero/expected.txt1
-rw-r--r--test/475-simplify-mul-zero/info.txt2
-rw-r--r--test/475-simplify-mul-zero/src/Main.java28
-rw-r--r--test/476-checker-ctor-memory-barrier/expected.txt0
-rw-r--r--test/476-checker-ctor-memory-barrier/info.txt2
-rw-r--r--test/476-checker-ctor-memory-barrier/src/Main.java147
-rw-r--r--test/477-checker-bound-type/expected.txt0
-rw-r--r--test/477-checker-bound-type/info.txt3
-rw-r--r--test/477-checker-bound-type/src/Main.java61
-rw-r--r--test/477-long-to-float-conversion-precision/expected.txt0
-rw-r--r--test/477-long-to-float-conversion-precision/info.txt1
-rw-r--r--test/477-long-to-float-conversion-precision/src/Main.java41
-rw-r--r--test/478-checker-inliner-nested-loop/expected.txt0
-rw-r--r--test/478-checker-inliner-nested-loop/info.txt2
-rw-r--r--test/478-checker-inliner-nested-loop/src/Main.java57
-rw-r--r--test/479-regression-implicit-null-check/expected.txt0
-rw-r--r--test/479-regression-implicit-null-check/info.txt2
-rw-r--r--test/479-regression-implicit-null-check/src/Main.java50
-rw-r--r--test/480-checker-dead-blocks/expected.txt0
-rw-r--r--test/480-checker-dead-blocks/info.txt1
-rw-r--r--test/480-checker-dead-blocks/src/Main.java147
-rw-r--r--test/Android.run-test.mk23
-rwxr-xr-xtest/etc/run-test-jar4
-rw-r--r--tools/art1
-rwxr-xr-xtools/run-jdwp-tests.sh9
402 files changed, 11974 insertions, 4776 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 7283710bfa..7d76795714 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,7 +61,7 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
-ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod
+ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
@@ -171,6 +171,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/oat_file_test.cc \
runtime/oat_file_assistant_test.cc \
runtime/parsed_options_test.cc \
+ runtime/prebuilt_tools_test.cc \
runtime/reference_table_test.cc \
runtime/thread_pool_test.cc \
runtime/transaction_test.cc \
@@ -190,6 +191,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \
compiler/dex/mir_graph_test.cc \
compiler/dex/mir_optimization_test.cc \
compiler/dex/quick/quick_cfi_test.cc \
+ compiler/dex/type_inference_test.cc \
compiler/dwarf/dwarf_test.cc \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
@@ -226,6 +228,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \
compiler/utils/arena_allocator_test.cc \
compiler/utils/dedupe_set_test.cc \
compiler/utils/swap_space_test.cc \
+ compiler/utils/test_dex_file_builder_test.cc \
compiler/utils/arm/managed_register_arm_test.cc \
compiler/utils/arm64/managed_register_arm64_test.cc \
compiler/utils/x86/managed_register_x86_test.cc \
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 9f873b321a..1386439f72 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -23,7 +23,7 @@
#include "gtest/gtest.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(NULL));
+ reinterpret_cast<void*>(nullptr));
namespace art {
bool UsuallyEquals(double expected, double actual);
diff --git a/compiler/Android.mk b/compiler/Android.mk
index ac95abdd8d..3f5271d31f 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -23,6 +23,7 @@ LIBART_COMPILER_SRC_FILES := \
dex/global_value_numbering.cc \
dex/gvn_dead_code_elimination.cc \
dex/local_value_numbering.cc \
+ dex/type_inference.cc \
dex/quick/arm/assemble_arm.cc \
dex/quick/arm/call_arm.cc \
dex/quick/arm/fp_arm.cc \
@@ -124,13 +125,14 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/optimizing_compiler.cc \
optimizing/parallel_move_resolver.cc \
optimizing/prepare_for_register_allocation.cc \
+ optimizing/primitive_type_propagation.cc \
+ optimizing/reference_type_propagation.cc \
optimizing/register_allocator.cc \
optimizing/side_effects_analysis.cc \
optimizing/ssa_builder.cc \
optimizing/ssa_liveness_analysis.cc \
optimizing/ssa_phi_elimination.cc \
- optimizing/primitive_type_propagation.cc \
- optimizing/reference_type_propagation.cc \
+ optimizing/stack_map_stream.cc \
trampolines/trampoline_compiler.cc \
utils/arena_bit_vector.cc \
utils/arm/assembler_arm.cc \
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index cdb1b9e9a5..f7501d2dda 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -22,6 +22,7 @@
#include <sstream>
#include "arch/instruction_set.h"
+#include "dwarf/dwarf_constants.h"
#include "dwarf/dwarf_test.h"
#include "dwarf/headers.h"
#include "disassembler/disassembler.h"
@@ -45,7 +46,8 @@ class CFITest : public dwarf::DwarfTest {
// Pretty-print CFI opcodes.
constexpr bool is64bit = false;
dwarf::DebugFrameOpCodeWriter<> initial_opcodes;
- dwarf::WriteEhFrameCIE(is64bit, dwarf::Reg(8), initial_opcodes, &eh_frame_data_);
+ dwarf::WriteEhFrameCIE(is64bit, dwarf::DW_EH_PE_absptr, dwarf::Reg(8),
+ initial_opcodes, &eh_frame_data_);
std::vector<uintptr_t> eh_frame_patches;
dwarf::WriteEhFrameFDE(is64bit, 0, 0, actual_asm.size(), &actual_cfi,
&eh_frame_data_, &eh_frame_patches);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 96d90bb443..5a9e04f5dd 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -140,6 +140,27 @@ void CommonCompilerTest::MakeExecutable(mirror::ClassLoader* class_loader, const
}
}
+// Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
+// driver assumes ownership of the set, so the test should properly release the set.
+std::unordered_set<std::string>* CommonCompilerTest::GetImageClasses() {
+ // Empty set: by default no classes are retained in the image.
+ return new std::unordered_set<std::string>();
+}
+
+// Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler
+// driver assumes ownership of the set, so the test should properly release the set.
+std::unordered_set<std::string>* CommonCompilerTest::GetCompiledClasses() {
+ // Null, no selection of compiled-classes.
+ return nullptr;
+}
+
+// Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler
+// driver assumes ownership of the set, so the test should properly release the set.
+std::unordered_set<std::string>* CommonCompilerTest::GetCompiledMethods() {
+ // Null, no selection of compiled-methods.
+ return nullptr;
+}
+
void CommonCompilerTest::SetUp() {
CommonRuntimeTest::SetUp();
{
@@ -165,7 +186,10 @@ void CommonCompilerTest::SetUp() {
method_inliner_map_.get(),
compiler_kind, instruction_set,
instruction_set_features_.get(),
- true, new std::set<std::string>, nullptr,
+ true,
+ GetImageClasses(),
+ GetCompiledClasses(),
+ GetCompiledMethods(),
2, true, true, "", timer_.get(), -1, ""));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
@@ -239,7 +263,7 @@ void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
- CHECK(method != NULL) << "Virtual method not found: "
+ CHECK(method != nullptr) << "Virtual method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
}
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index d7b210d571..8d80a2da5c 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
#include <list>
+#include <unordered_set>
#include <vector>
#include "common_runtime_test.h"
@@ -56,6 +57,18 @@ class CommonCompilerTest : public CommonRuntimeTest {
virtual void SetUpRuntimeOptions(RuntimeOptions *options);
+ // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
+ // driver assumes ownership of the set, so the test should properly release the set.
+ virtual std::unordered_set<std::string>* GetImageClasses();
+
+ // Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler
+ // driver assumes ownership of the set, so the test should properly release the set.
+ virtual std::unordered_set<std::string>* GetCompiledClasses();
+
+ // Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler
+ // driver assumes ownership of the set, so the test should properly release the set.
+ virtual std::unordered_set<std::string>* GetCompiledMethods();
+
virtual void TearDown();
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 4f7a970fdd..d1acada6dd 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -108,7 +108,7 @@ const void* CompiledCode::CodePointer(const void* code_pointer,
}
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 6e25db6f04..83dfc28844 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -23,7 +23,7 @@ namespace art {
// Single forward pass over the nodes.
inline BasicBlock* DataflowIterator::ForwardSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ < end_idx_) {
@@ -38,7 +38,7 @@ inline BasicBlock* DataflowIterator::ForwardSingleNext() {
// Repeat full forward passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we at the end and have we changed something?
if ((idx_ >= end_idx_) && changed_ == true) {
@@ -61,7 +61,7 @@ inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
// Single reverse pass over the nodes.
inline BasicBlock* DataflowIterator::ReverseSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ >= 0) {
@@ -76,7 +76,7 @@ inline BasicBlock* DataflowIterator::ReverseSingleNext() {
// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we done and we changed something during the last iteration?
if ((idx_ < 0) && changed_) {
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 2a06cec9a0..097c2a40b4 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -72,7 +72,7 @@ namespace art {
: mir_graph_(mir_graph),
start_idx_(start_idx),
end_idx_(end_idx),
- block_id_list_(NULL),
+ block_id_list_(nullptr),
idx_(0),
repeats_(0),
changed_(false) {}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ef94d8b66f..d1ddfda545 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -301,7 +301,7 @@ extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::C
art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
- art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+ art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
driver.GetVerifiedMethod(&dex_file, method_idx));
art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index b4559ef375..c538d0beee 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -15,7 +15,6 @@
*/
#include "base/logging.h"
-#include "dataflow_iterator.h"
#include "dataflow_iterator-inl.h"
#include "dex/mir_field_info.h"
#include "global_value_numbering.h"
@@ -260,10 +259,8 @@ class GlobalValueNumberingTest : public testing::Test {
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
- mir->ssa_rep->fp_use = nullptr; // Not used by LVN.
mir->ssa_rep->num_defs = def->num_defs;
mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
- mir->ssa_rep->fp_def = nullptr; // Not used by LVN.
mir->dalvikInsn.opcode = def->opcode;
mir->offset = i; // LVN uses offset only for debug output
mir->optimization_flags = 0u;
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index ec12221f3c..d7f36f787e 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -478,7 +478,7 @@ void GvnDeadCodeElimination::ChangeBinOp2AddrToPlainBinOp(MIR* mir) {
mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR + Instruction::ADD_INT);
}
-MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
+MIR* GvnDeadCodeElimination::CreatePhi(int s_reg) {
int v_reg = mir_graph_->SRegToVReg(s_reg);
MIR* phi = mir_graph_->NewMIR();
phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
@@ -491,11 +491,9 @@ MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
mir_graph_->AllocateSSADefData(phi, 1);
phi->ssa_rep->defs[0] = s_reg;
- phi->ssa_rep->fp_def[0] = fp;
size_t num_uses = bb_->predecessors.size();
mir_graph_->AllocateSSAUseData(phi, num_uses);
- std::fill_n(phi->ssa_rep->fp_use, num_uses, fp);
size_t idx = 0u;
for (BasicBlockId pred_id : bb_->predecessors) {
BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
@@ -523,14 +521,12 @@ MIR* GvnDeadCodeElimination::RenameSRegDefOrCreatePhi(uint16_t def_change, uint1
// defining MIR for that dalvik reg, the preserved valus must come from its predecessors
// and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
if (def_change == kNPos) {
- bool fp = mir_to_kill->ssa_rep->fp_def[0];
if (wide) {
DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
- DCHECK_EQ(fp, mir_to_kill->ssa_rep->fp_def[1]);
DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
- CreatePhi(new_s_reg + 1, fp); // High word Phi.
+ CreatePhi(new_s_reg + 1); // High word Phi.
}
- return CreatePhi(new_s_reg, fp);
+ return CreatePhi(new_s_reg);
} else {
DCHECK_LT(def_change, last_change);
DCHECK_LE(last_change, vreg_chains_.NumMIRs());
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
index 9a19f29970..f2378f2ced 100644
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -128,7 +128,7 @@ class GvnDeadCodeElimination : public DeletableArenaObject<kArenaAllocMisc> {
void KillMIR(MIRData* data);
static void KillMIR(MIR* mir);
static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
- MIR* CreatePhi(int s_reg, bool fp);
+ MIR* CreatePhi(int s_reg);
MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
// Update state variables going backwards through a MIR.
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 566527ad42..0393410867 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -158,10 +158,8 @@ class LocalValueNumberingTest : public testing::Test {
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
- mir->ssa_rep->fp_use = nullptr; // Not used by LVN.
mir->ssa_rep->num_defs = def->num_defs;
mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
- mir->ssa_rep->fp_def = nullptr; // Not used by LVN.
mir->dalvikInsn.opcode = def->opcode;
mir->offset = i; // LVN uses offset only for debug output
mir->optimization_flags = 0u;
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3d7a640ce3..9099e8a54d 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -968,7 +968,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
* edges until we reach an explicit branch or return.
*/
BasicBlock* ending_bb = bb;
- if (ending_bb->last_mir_insn != NULL) {
+ if (ending_bb->last_mir_insn != nullptr) {
uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
while ((ending_flags & kAnBranch) == 0) {
ending_bb = GetBasicBlock(ending_bb->fall_through);
@@ -998,7 +998,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
bool done = false;
while (!done) {
tbb->visited = true;
- for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
// Skip any MIR pseudo-op.
continue;
@@ -1195,7 +1195,7 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
ClearAllVisitedFlags();
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
AnalyzeBlock(bb, &stats);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 2a920a4e29..b4aec98e01 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -123,7 +123,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_UA | DF_NULL_CHK_A | DF_REF_A,
// 1F CHK_CAST vAA, type@BBBB
- DF_UA | DF_REF_A | DF_UMS,
+ DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
// 20 INSTANCE_OF vA, vB, type@CCCC
DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
@@ -159,10 +159,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_NOP,
// 2B PACKED_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ DF_UA | DF_CORE_A,
// 2C SPARSE_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ DF_UA | DF_CORE_A,
// 2D CMPL_FLOAT vAA, vBB, vCC
DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
@@ -180,22 +180,22 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
// 32 IF_EQ vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 33 IF_NE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 34 IF_LT vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 35 IF_GE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 36 IF_GT vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 37 IF_LE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 38 IF_EQZ vAA, +BBBB
DF_UA,
@@ -989,7 +989,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
MIR* mir;
ArenaBitVector *use_v, *def_v, *live_in_v;
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
use_v = bb->data_flow_info->use_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
@@ -998,7 +998,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
live_in_v = bb->data_flow_info->live_in_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
@@ -1080,8 +1080,6 @@ void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
if (mir->ssa_rep->num_uses_allocated < num_uses) {
mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
- // NOTE: will be filled in during type & size inference pass
- mir->ssa_rep->fp_use = arena_->AllocArray<bool>(num_uses, kArenaAllocDFInfo);
}
}
@@ -1090,7 +1088,6 @@ void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
if (mir->ssa_rep->num_defs_allocated < num_defs) {
mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def = arena_->AllocArray<bool>(num_defs, kArenaAllocDFInfo);
}
}
@@ -1191,7 +1188,7 @@ void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
/* Entry function to convert a block into SSA representation */
bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
/*
* Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
@@ -1214,7 +1211,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
}
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
kArenaAllocDFInfo));
@@ -1287,35 +1284,27 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
if (df_attributes & DF_HAS_USES) {
num_uses = 0;
if (df_attributes & DF_UA) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
if (df_attributes & DF_A_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
}
}
if (df_attributes & DF_UB) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
if (df_attributes & DF_B_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
}
}
if (df_attributes & DF_UC) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
if (df_attributes & DF_C_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
}
}
}
if (df_attributes & DF_HAS_DEFS) {
- mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
if (df_attributes & DF_A_WIDE) {
- mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
}
}
@@ -1413,8 +1402,8 @@ void MIRGraph::CountUses(BasicBlock* bb) {
return;
}
uint32_t weight = GetUseCountWeight(bb);
- for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
@@ -1459,7 +1448,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
void MIRGraph::VerifyDataflow() {
/* Verify if all blocks are connected as claimed */
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
VerifyPredInfo(bb);
}
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ca5695893e..e4570fd8d3 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -124,7 +124,7 @@ class MirFieldInfo {
uint16_t declaring_field_idx_;
// The type index of the class declaring the field, 0 if unresolved.
uint16_t declaring_class_idx_;
- // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ // The dex file that defines the class containing the field and the field, null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -179,6 +179,7 @@ class MirIFieldLoweringInfo : public MirFieldInfo {
friend class GlobalValueNumberingTest;
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
+ friend class TypeInferenceTest;
};
class MirSFieldLoweringInfo : public MirFieldInfo {
@@ -254,6 +255,7 @@ class MirSFieldLoweringInfo : public MirFieldInfo {
friend class GlobalValueNumberingTest;
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
+ friend class TypeInferenceTest;
};
} // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4d340387f2..b5c42f11ac 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -81,15 +81,15 @@ const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
};
MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
- : reg_location_(NULL),
+ : reg_location_(nullptr),
block_id_map_(std::less<unsigned int>(), arena->Adapter()),
cu_(cu),
ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
- vreg_to_ssa_map_(NULL),
- ssa_last_defs_(NULL),
- is_constant_v_(NULL),
- constant_values_(NULL),
+ vreg_to_ssa_map_(nullptr),
+ ssa_last_defs_(nullptr),
+ is_constant_v_(nullptr),
+ constant_values_(nullptr),
use_counts_(arena->Adapter()),
raw_use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
@@ -106,24 +106,24 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
max_nested_loops_(0u),
- i_dom_list_(NULL),
+ i_dom_list_(nullptr),
temp_scoped_alloc_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
- try_block_addr_(NULL),
- entry_block_(NULL),
- exit_block_(NULL),
- current_code_item_(NULL),
+ try_block_addr_(nullptr),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
+ current_code_item_(nullptr),
m_units_(arena->Adapter()),
method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
- opcode_count_(NULL),
+ opcode_count_(nullptr),
num_ssa_regs_(0),
extended_basic_blocks_(arena->Adapter()),
method_sreg_(0),
attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke.
- checkstats_(NULL),
+ checkstats_(nullptr),
arena_(arena),
backward_branches_(0),
forward_branches_(0),
@@ -185,13 +185,13 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
DCHECK_GT(code_offset, orig_block->start_offset);
MIR* insn = orig_block->first_mir_insn;
- MIR* prev = NULL; // Will be set to instruction before split.
+ MIR* prev = nullptr; // Will be set to instruction before split.
while (insn) {
if (insn->offset == code_offset) break;
prev = insn;
insn = insn->next;
}
- if (insn == NULL) {
+ if (insn == nullptr) {
LOG(FATAL) << "Break split failed";
}
// Now insn is at the instruction where we want to split, namely
@@ -530,7 +530,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
size = switch_data[1];
first_key = switch_data[2] | (switch_data[3] << 16);
target_table = reinterpret_cast<const int*>(&switch_data[4]);
- keyTable = NULL; // Make the compiler happy.
+ keyTable = nullptr; // Make the compiler happy.
/*
* Sparse switch data format:
* ushort ident = 0x0200 magic value
@@ -695,9 +695,10 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
current_method_ = m_units_.size();
current_offset_ = 0;
// TODO: will need to snapshot stack image and use that as the mir context identification.
- m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
- dex_file, current_code_item_, class_def_idx, method_idx, access_flags,
- cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
+ m_units_.push_back(new (arena_) DexCompilationUnit(
+ cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file,
+ current_code_item_, class_def_idx, method_idx, access_flags,
+ cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
const uint16_t* code_ptr = current_code_item_->insns_;
const uint16_t* code_end =
current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
@@ -717,8 +718,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
// If this is the first method, set up default entry and exit blocks.
if (current_method_ == 0) {
- DCHECK(entry_block_ == NULL);
- DCHECK(exit_block_ == NULL);
+ DCHECK(entry_block_ == nullptr);
+ DCHECK(exit_block_ == nullptr);
DCHECK_EQ(GetNumBlocks(), 0U);
// Use id 0 to represent a null block.
BasicBlock* null_block = CreateNewBB(kNullBlock);
@@ -754,7 +755,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
insn->m_unit_index = current_method_;
int width = ParseInsn(code_ptr, &insn->dalvikInsn);
Instruction::Code opcode = insn->dalvikInsn.opcode;
- if (opcode_count_ != NULL) {
+ if (opcode_count_ != nullptr) {
opcode_count_[static_cast<int>(opcode)]++;
}
@@ -878,7 +879,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
void MIRGraph::ShowOpcodeStats() {
- DCHECK(opcode_count_ != NULL);
+ DCHECK(opcode_count_ != nullptr);
LOG(INFO) << "Opcode Count";
for (int i = 0; i < kNumPackedOpcodes; i++) {
if (opcode_count_[i] != 0) {
@@ -946,7 +947,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
return;
}
file = fopen(fpath.c_str(), "w");
- if (file == NULL) {
+ if (file == nullptr) {
PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
return;
}
@@ -960,7 +961,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
for (idx = 0; idx < num_blocks; idx++) {
int block_idx = all_blocks ? idx : dfs_order_[idx];
BasicBlock* bb = GetBasicBlock(block_idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
if (bb->hidden) continue;
if (bb->block_type == kEntryBlock) {
@@ -1500,8 +1501,8 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
}
nop = true;
}
- int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
- int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+ int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+ int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
// Note that this does not check the MIR's opcode in all cases. In cases where it
@@ -1529,7 +1530,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
for (int i = 0; i < uses; i++) {
str.append(" ");
str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
- if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+ if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
// For the listing, skip the high sreg.
i++;
}
@@ -1622,7 +1623,7 @@ std::string MIRGraph::GetSSAName(int ssa_reg) {
// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
- if (reg_location_ == NULL) {
+ if (reg_location_ == nullptr) {
// Pre-SSA - just use the standard name.
return GetSSAName(ssa_reg);
}
@@ -1715,7 +1716,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bo
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
- if (move_result_mir == NULL) {
+ if (move_result_mir == nullptr) {
info->result.location = kLocInvalid;
} else {
info->result = GetRawDest(move_result_mir);
@@ -2293,7 +2294,7 @@ bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const
void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
// Reset flags for all MIRs in bb.
- for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= (~reset_flags);
}
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 85b13448da..0db54bf23c 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -39,6 +39,7 @@ class DexFileMethodInliner;
class GlobalValueNumbering;
class GvnDeadCodeElimination;
class PassManager;
+class TypeInference;
// Forward declaration.
class MIRGraph;
@@ -64,6 +65,7 @@ enum DataFlowAttributePos {
kNullTransferSrc0, // Object copy src[0] -> dst.
kNullTransferSrcN, // Phi null check state transfer.
kRangeCheckC, // Range check of C.
+ kCheckCastA, // Check cast of A.
kFPA,
kFPB,
kFPC,
@@ -73,6 +75,7 @@ enum DataFlowAttributePos {
kRefA,
kRefB,
kRefC,
+ kSameTypeAB, // A and B have the same type but it can be core/ref/fp (IF_cc).
kUsesMethodStar, // Implicit use of Method*.
kUsesIField, // Accesses an instance field (IGET/IPUT).
kUsesSField, // Accesses a static field (SGET/SPUT).
@@ -101,6 +104,7 @@ enum DataFlowAttributePos {
#define DF_NULL_TRANSFER_0 (UINT64_C(1) << kNullTransferSrc0)
#define DF_NULL_TRANSFER_N (UINT64_C(1) << kNullTransferSrcN)
#define DF_RANGE_CHK_C (UINT64_C(1) << kRangeCheckC)
+#define DF_CHK_CAST (UINT64_C(1) << kCheckCastA)
#define DF_FP_A (UINT64_C(1) << kFPA)
#define DF_FP_B (UINT64_C(1) << kFPB)
#define DF_FP_C (UINT64_C(1) << kFPC)
@@ -110,6 +114,7 @@ enum DataFlowAttributePos {
#define DF_REF_A (UINT64_C(1) << kRefA)
#define DF_REF_B (UINT64_C(1) << kRefB)
#define DF_REF_C (UINT64_C(1) << kRefC)
+#define DF_SAME_TYPE_AB (UINT64_C(1) << kSameTypeAB)
#define DF_UMS (UINT64_C(1) << kUsesMethodStar)
#define DF_IFIELD (UINT64_C(1) << kUsesIField)
#define DF_SFIELD (UINT64_C(1) << kUsesSField)
@@ -217,13 +222,11 @@ struct BasicBlockDataFlow {
*/
struct SSARepresentation {
int32_t* uses;
- bool* fp_use;
int32_t* defs;
- bool* fp_def;
- int16_t num_uses_allocated;
- int16_t num_defs_allocated;
- int16_t num_uses;
- int16_t num_defs;
+ uint16_t num_uses_allocated;
+ uint16_t num_defs_allocated;
+ uint16_t num_uses;
+ uint16_t num_defs;
static uint32_t GetStartUseIndex(Instruction::Code opcode);
};
@@ -334,7 +337,8 @@ class MIR : public ArenaObject<kArenaAllocMIR> {
// SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
// the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
uint32_t sfield_lowering_info;
- // INVOKE data index, points to MIRGraph::method_lowering_infos_.
+ // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined
+ // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference.
uint32_t method_lowering_info;
} meta;
@@ -598,7 +602,7 @@ class MIRGraph {
BasicBlock* GetBasicBlock(unsigned int block_id) const {
DCHECK_LT(block_id, block_list_.size()); // NOTE: NullBasicBlockId is 0.
- return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
+ return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
}
size_t GetBasicBlockListCount() const {
@@ -647,6 +651,10 @@ class MIRGraph {
*/
void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
+ bool HasCheckCast() const {
+ return (merged_df_flags_ & DF_CHK_CAST) != 0u;
+ }
+
bool HasFieldAccess() const {
return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
}
@@ -691,8 +699,16 @@ class MIRGraph {
void DoCacheMethodLoweringInfo();
const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
- DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
- return method_lowering_infos_[mir->meta.method_lowering_info];
+ return GetMethodLoweringInfo(mir->meta.method_lowering_info);
+ }
+
+ const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const {
+ DCHECK_LT(lowering_info, method_lowering_infos_.size());
+ return method_lowering_infos_[lowering_info];
+ }
+
+ size_t GetMethodLoweringInfoCount() const {
+ return method_lowering_infos_.size();
}
void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
@@ -1073,7 +1089,9 @@ class MIRGraph {
bool EliminateNullChecksGate();
bool EliminateNullChecks(BasicBlock* bb);
void EliminateNullChecksEnd();
+ void InferTypesStart();
bool InferTypes(BasicBlock* bb);
+ void InferTypesEnd();
bool EliminateClassInitChecksGate();
bool EliminateClassInitChecks(BasicBlock* bb);
void EliminateClassInitChecksEnd();
@@ -1100,34 +1118,6 @@ class MIRGraph {
return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
}
- /*
- * Type inference handling helpers. Because Dalvik's bytecode is not fully typed,
- * we have to do some work to figure out the sreg type. For some operations it is
- * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
- * may never know the "real" type.
- *
- * We perform the type inference operation by using an iterative walk over
- * the graph, propagating types "defined" by typed opcodes to uses and defs in
- * non-typed opcodes (such as MOVE). The Setxx(index) helpers are used to set defined
- * types on typed opcodes (such as ADD_INT). The Setxx(index, is_xx) form is used to
- * propagate types through non-typed opcodes such as PHI and MOVE. The is_xx flag
- * tells whether our guess of the type is based on a previously typed definition.
- * If so, the defined type takes precedence. Note that it's possible to have the same sreg
- * show multiple defined types because dx treats constants as untyped bit patterns.
- * The return value of the Setxx() helpers says whether or not the Setxx() action changed
- * the current guess, and is used to know when to terminate the iterative walk.
- */
- bool SetFp(int index, bool is_fp);
- bool SetFp(int index);
- bool SetCore(int index, bool is_core);
- bool SetCore(int index);
- bool SetRef(int index, bool is_ref);
- bool SetRef(int index);
- bool SetWide(int index, bool is_wide);
- bool SetWide(int index);
- bool SetHigh(int index, bool is_high);
- bool SetHigh(int index);
-
bool PuntToInterpreter() {
return punt_to_interpreter_;
}
@@ -1252,7 +1242,6 @@ class MIRGraph {
static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
- bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
protected:
int FindCommonParent(int block1, int block2);
@@ -1399,6 +1388,7 @@ class MIRGraph {
ArenaBitVector* work_live_vregs;
ArenaBitVector** def_block_matrix; // num_vregs x num_blocks_.
ArenaBitVector** phi_node_blocks; // num_vregs x num_blocks_.
+ TypeInference* ti;
} ssa;
// Global value numbering.
struct {
@@ -1458,6 +1448,7 @@ class MIRGraph {
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
friend class TopologicalSortOrderTest;
+ friend class TypeInferenceTest;
friend class QuickCFITest;
};
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 7230c462cd..946c74becf 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -88,7 +88,7 @@ class MirMethodInfo {
// The type index of the class declaring the method, 0 if unresolved.
uint16_t declaring_class_idx_;
// The dex file that defines the class containing the method and the method,
- // nullptr if unresolved.
+ // null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -223,7 +223,7 @@ class MirMethodLoweringInfo : public MirMethodInfo {
uintptr_t direct_code_;
uintptr_t direct_method_;
// Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
- // devirtualized invoke target if available, nullptr and 0u otherwise.
+ // devirtualized invoke target if available, null and 0u otherwise.
// After Resolve() they hold the actual target method that will be called; it will be either
// a devirtualized target method or the compilation's unit's dex file and MethodIndex().
const DexFile* target_dex_file_;
@@ -232,6 +232,7 @@ class MirMethodLoweringInfo : public MirMethodInfo {
int stats_flags_;
friend class MirOptimizationTest;
+ friend class TypeInferenceTest;
};
} // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 9d7b4b4dfd..467c14ed55 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -25,6 +25,7 @@
#include "gvn_dead_code_elimination.h"
#include "local_value_numbering.h"
#include "mir_field_info.h"
+#include "type_inference.h"
#include "quick/dex_file_method_inliner.h"
#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
@@ -54,7 +55,7 @@ void MIRGraph::SetConstantWide(int32_t ssa_reg, int64_t value) {
void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
MIR* mir;
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// Skip pass if BB has MIR without SSA representation.
if (mir->ssa_rep == nullptr) {
return;
@@ -115,11 +116,11 @@ void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
/* Advance to next strictly dominated MIR node in an extended basic block */
MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
- if (mir != NULL) {
+ if (mir != nullptr) {
mir = mir->next;
- while (mir == NULL) {
+ while (mir == nullptr) {
bb = GetBasicBlock(bb->fall_through);
- if ((bb == NULL) || Predecessors(bb) != 1) {
+ if ((bb == nullptr) || Predecessors(bb) != 1) {
// mir is null and we cannot proceed further.
break;
} else {
@@ -133,7 +134,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
/*
* To be used at an invoke mir. If the logically next mir node represents
- * a move-result, return it. Else, return NULL. If a move-result exists,
+ * a move-result, return it. Else, return nullptr. If a move-result exists,
* it is required to immediately follow the invoke with no intervening
* opcodes or incoming arcs. However, if the result of the invoke is not
* used, a move-result may not be present.
@@ -141,7 +142,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* tbb = bb;
mir = AdvanceMIR(&tbb, mir);
- while (mir != NULL) {
+ while (mir != nullptr) {
if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
@@ -151,7 +152,7 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
mir = AdvanceMIR(&tbb, mir);
} else {
- mir = NULL;
+ mir = nullptr;
}
}
return mir;
@@ -159,29 +160,29 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
if (bb->block_type == kDead) {
- return NULL;
+ return nullptr;
}
DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock));
BasicBlock* bb_taken = GetBasicBlock(bb->taken);
BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
- if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
+ if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
// Follow simple unconditional branches.
bb = bb_taken;
} else {
// Follow simple fallthrough
- bb = (bb_taken != NULL) ? NULL : bb_fall_through;
+ bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
}
- if (bb == NULL || (Predecessors(bb) != 1)) {
- return NULL;
+ if (bb == nullptr || (Predecessors(bb) != 1)) {
+ return nullptr;
}
DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
return bb;
}
static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
if (mir->ssa_rep->uses[i] == ssa_name) {
@@ -190,11 +191,11 @@ static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
}
}
}
- return NULL;
+ return nullptr;
}
static SelectInstructionKind SelectKind(MIR* mir) {
- // Work with the case when mir is nullptr.
+ // Work with the case when mir is null.
if (mir == nullptr) {
return kSelectNone;
}
@@ -255,7 +256,8 @@ size_t MIRGraph::GetNumAvailableVRTemps() {
}
// Calculate remaining ME temps available.
- size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+ size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
+ reserved_temps_for_backend_;
if (num_non_special_compiler_temps_ >= remaining_me_temps) {
return 0;
@@ -346,7 +348,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
size_t available_temps = GetNumAvailableVRTemps();
if (available_temps <= 0 || (available_temps <= 1 && wide)) {
if (verbose) {
- LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+ LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
+ << " are available.";
}
return nullptr;
}
@@ -364,8 +367,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
if (verbose) {
- LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
- << " and s" << compiler_temp->s_reg_low << " has been created.";
+ LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
+ << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
}
if (wide) {
@@ -477,8 +480,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
allocator.get()));
}
- while (bb != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ while (bb != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// TUNING: use the returned value number for CSE.
if (use_lvn) {
local_valnum->GetValueNumber(mir);
@@ -537,7 +540,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Bitcode doesn't allow this optimization.
break;
}
- if (mir->next != NULL) {
+ if (mir->next != nullptr) {
MIR* mir_next = mir->next;
// Make sure result of cmp is used by next insn and nowhere else
if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
@@ -574,7 +577,6 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Copy the SSA information that is relevant.
mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
mir_next->ssa_rep->uses = mir->ssa_rep->uses;
- mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
mir_next->ssa_rep->num_defs = 0;
mir->ssa_rep->num_uses = 0;
mir->ssa_rep->num_defs = 0;
@@ -594,12 +596,12 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
- DCHECK(ft != NULL);
+ DCHECK(ft != nullptr);
BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
BasicBlock* ft_tk = GetBasicBlock(ft->taken);
BasicBlock* tk = GetBasicBlock(bb->taken);
- DCHECK(tk != NULL);
+ DCHECK(tk != nullptr);
BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
BasicBlock* tk_tk = GetBasicBlock(tk->taken);
@@ -608,7 +610,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
* transfers to the rejoin block and the fall_though edge goes to a block that
* unconditionally falls through to the rejoin block.
*/
- if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+ if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
(Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
/*
* Okay - we have the basic diamond shape.
@@ -628,7 +630,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
MIR* if_false = ft->first_mir_insn;
// It's possible that the target of the select isn't used - skip those (rare) cases.
MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
- if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+ if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
/*
* We'll convert the IF_EQZ/IF_NEZ to a SELECT. We need to find the
* Phi node in the merge block and delete it (while using the SSA name
@@ -668,16 +670,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
mir->ssa_rep->uses = src_ssa;
mir->ssa_rep->num_uses = 3;
}
- mir->ssa_rep->num_defs = 1;
- mir->ssa_rep->defs = arena_->AllocArray<int32_t>(1, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def = arena_->AllocArray<bool>(1, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
- // Match type of uses to def.
- mir->ssa_rep->fp_use = arena_->AllocArray<bool>(mir->ssa_rep->num_uses,
- kArenaAllocDFInfo);
- for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
- mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
- }
+ AllocateSSADefData(mir, 1);
/*
* There is usually a Phi node in the join block for our two cases. If the
* Phi node only contains our two cases as input, we will use the result
@@ -721,7 +714,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
}
}
- bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
+ bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
+ nullptr;
}
if (use_lvn && UNLIKELY(!global_valnum->Good())) {
LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -732,9 +726,9 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
/* Collect stats on number of checks removed */
void MIRGraph::CountChecks(class BasicBlock* bb) {
- if (bb->data_flow_info != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ if (bb->data_flow_info != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -935,7 +929,7 @@ bool MIRGraph::EliminateNullChecksGate() {
// reset MIR_MARK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= ~MIR_MARK;
}
}
@@ -1010,7 +1004,7 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
// no intervening uses.
// Walk through the instruction in the block, updating as necessary
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
@@ -1121,7 +1115,7 @@ void MIRGraph::EliminateNullChecksEnd() {
// converge MIR_MARK with MIR_IGNORE_NULL_CHECK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
@@ -1131,23 +1125,26 @@ void MIRGraph::EliminateNullChecksEnd() {
}
}
+void MIRGraph::InferTypesStart() {
+ DCHECK(temp_scoped_alloc_ != nullptr);
+ temp_.ssa.ti = new (temp_scoped_alloc_.get()) TypeInference(this, temp_scoped_alloc_.get());
+}
+
/*
* Perform type and size inference for a basic block.
*/
bool MIRGraph::InferTypes(BasicBlock* bb) {
if (bb->data_flow_info == nullptr) return false;
- bool infer_changed = false;
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
- continue;
- }
-
- // Propagate type info.
- infer_changed = InferTypeAndSize(bb, mir, infer_changed);
- }
+ DCHECK(temp_.ssa.ti != nullptr);
+ return temp_.ssa.ti->Apply(bb);
+}
- return infer_changed;
+void MIRGraph::InferTypesEnd() {
+ DCHECK(temp_.ssa.ti != nullptr);
+ temp_.ssa.ti->Finish();
+ delete temp_.ssa.ti;
+ temp_.ssa.ti = nullptr;
}
bool MIRGraph::EliminateClassInitChecksGate() {
@@ -1509,7 +1506,7 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
continue;
}
@@ -1540,7 +1537,8 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
->GenInline(this, bb, mir, target.dex_method_index)) {
if (cu_->verbose || cu_->print_pass) {
LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
- << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
+ << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
+ *target.dex_file)
<< "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< "\" @0x" << std::hex << mir->offset;
}
@@ -1564,7 +1562,7 @@ void MIRGraph::DumpCheckStats() {
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
checkstats_ = stats;
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
CountChecks(bb);
}
if (stats->null_checks > 0) {
@@ -1597,7 +1595,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
bool terminated_by_return = false;
bool do_local_value_numbering = false;
// Visit blocks strictly dominated by this head.
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->visited = true;
terminated_by_return |= bb->terminated_by_return;
do_local_value_numbering |= bb->use_lvn;
@@ -1606,7 +1604,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (terminated_by_return || do_local_value_numbering) {
// Do lvn for all blocks in this extended set.
bb = start_bb;
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->use_lvn = do_local_value_numbering;
bb->dominates_return = terminated_by_return;
bb = NextDominatedBlock(bb);
@@ -1629,7 +1627,7 @@ void MIRGraph::BasicBlockOptimization() {
if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
ClearAllVisitedFlags();
PreOrderDfsIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
BuildExtendedBBList(bb);
}
// Perform extended basic block optimizations.
@@ -1638,7 +1636,7 @@ void MIRGraph::BasicBlockOptimization() {
}
} else {
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
BasicBlockOpt(bb);
}
}
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 671bcecfba..8762b53af4 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -68,7 +68,7 @@ class PassDriver {
* @return whether the pass was applied.
*/
virtual bool RunPass(const char* pass_name) {
- // Paranoid: c_unit cannot be nullptr and we need a pass name.
+ // Paranoid: c_unit cannot be null and we need a pass name.
DCHECK(pass_name != nullptr);
DCHECK_NE(pass_name[0], 0);
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 94eef225ee..cbe4a02edb 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -88,7 +88,7 @@ class PassDriverME: public PassDriver {
}
bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
- // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+ // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
DCHECK(pass != nullptr);
DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
@@ -211,8 +211,9 @@ class PassDriverME: public PassDriver {
* @param settings_to_fill Fills the options to contain the mapping of name of option to the new
* configuration.
*/
- static void FillOverriddenPassSettings(const PassManagerOptions* options, const char* pass_name,
- SafeMap<const std::string, const OptionContent>& settings_to_fill) {
+ static void FillOverriddenPassSettings(
+ const PassManagerOptions* options, const char* pass_name,
+ SafeMap<const std::string, const OptionContent>& settings_to_fill) {
const std::string& settings = options->GetOverriddenPassOptions();
const size_t settings_len = settings.size();
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
index a8b8a54033..b35bc3d7d3 100644
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -41,7 +41,7 @@ void PassDriverMEPostOpt::SetupPasses(PassManager* pass_manager) {
pass_manager->AddPass(new SSAConversion);
pass_manager->AddPass(new PhiNodeOperands);
pass_manager->AddPass(new PerformInitRegLocations);
- pass_manager->AddPass(new TypeInference);
+ pass_manager->AddPass(new TypeInferencePass);
pass_manager->AddPass(new FinishSSATransformation);
}
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
index 1ab862503b..e9fa0eb578 100644
--- a/compiler/dex/post_opt_passes.h
+++ b/compiler/dex/post_opt_passes.h
@@ -263,12 +263,19 @@ class PerformInitRegLocations : public PassMEMirSsaRep {
};
/**
- * @class TypeInference
+ * @class TypeInferencePass
* @brief Type inference pass.
*/
-class TypeInference : public PassMEMirSsaRep {
+class TypeInferencePass : public PassMEMirSsaRep {
public:
- TypeInference() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+ TypeInferencePass() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+ }
+
+ void Start(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->InferTypesStart();
}
bool Worker(PassDataHolder* data) const {
@@ -280,6 +287,13 @@ class TypeInference : public PassMEMirSsaRep {
DCHECK(bb != nullptr);
return c_unit->mir_graph->InferTypes(bb);
}
+
+ void End(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->InferTypesEnd();
+ }
};
/**
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index c5ac4c1508..df4a9f2048 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1055,7 +1055,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -1066,7 +1066,7 @@ void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -1084,7 +1084,7 @@ void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
uint8_t* const write_buffer = write_pos;
- for (; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = (write_pos - write_buffer);
if (!lir->flags.is_nop) {
int opcode = lir->opcode;
@@ -1258,8 +1258,8 @@ void ArmMir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
/*
* NOTE: the lir being considered here will be encoded following the switch (so long as
* we're not in a retry situation). However, any new non-pc_rel instructions inserted
@@ -1506,7 +1506,7 @@ void ArmMir2Lir::AssembleLIR() {
case kFixupAdr: {
const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
LIR* target = lir->target;
- int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != nullptr) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
offset_adjustment);
int32_t disp = target_disp - ((lir->offset + 4) & ~3);
@@ -1642,7 +1642,7 @@ size_t ArmMir2Lir::GetInsnSize(LIR* lir) {
uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
if (!lir->flags.is_nop) {
if (lir->flags.fixup != kFixupNone) {
@@ -1658,8 +1658,8 @@ uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offse
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3d18af6169..6ba4016260 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -124,7 +124,7 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -156,7 +156,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
@@ -165,12 +165,12 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
// r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -238,7 +238,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
if (!kUseReadBarrier) {
@@ -252,16 +252,16 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index eb1383fcff..94fc4743a4 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -187,7 +187,8 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, Re
return;
}
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
return;
case Instruction::LONG_TO_FLOAT: {
rl_src = LoadValueWide(rl_src, kFPReg);
@@ -217,7 +218,8 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, Re
return;
}
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 47669db979..7598e50977 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -138,10 +138,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocatio
RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, -1);
OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
- LIR* branch1 = OpCondBranch(kCondLt, NULL);
- LIR* branch2 = OpCondBranch(kCondGt, NULL);
+ LIR* branch1 = OpCondBranch(kCondLt, nullptr);
+ LIR* branch2 = OpCondBranch(kCondGt, nullptr);
OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
- LIR* branch3 = OpCondBranch(kCondEq, NULL);
+ LIR* branch3 = OpCondBranch(kCondEq, nullptr);
LIR* it = OpIT(kCondHi, "E");
NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
@@ -389,7 +389,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va
* generate the long form in an attempt to avoid an extra assembly pass.
* TODO: consider interspersing slowpaths in code following unconditional branches.
*/
- bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
+ bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
if (!skip && reg.Low8() && (check_value == 0)) {
if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
@@ -882,7 +882,7 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_new_value;
if (!is_long) {
- rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+ rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
} else if (load_early) {
rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
}
@@ -905,7 +905,7 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
RegLocation rl_expected;
if (!is_long) {
- rl_expected = LoadValue(rl_src_expected, LocToRegClass(rl_src_new_value));
+ rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
} else if (load_early) {
rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
} else {
@@ -1159,12 +1159,12 @@ void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
#ifdef ARM_R4_SUSPEND_FLAG
NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
- return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+ return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
#else
RegStorage t_reg = AllocTemp();
LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
t_reg, kUnsignedHalf, kNotVolatile);
- LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
+ LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
0, target);
FreeTemp(t_reg);
return cmp_branch;
@@ -1326,11 +1326,6 @@ void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
}
}
- // Now, restore lr to its non-temp status.
- FreeTemp(tmp1);
- Clobber(rs_rARM_LR);
- UnmarkTemp(rs_rARM_LR);
-
if (reg_status != 0) {
// We had manually allocated registers for rl_result.
// Now construct a RegLocation.
@@ -1338,7 +1333,14 @@ void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
}
+ // Free tmp1 but keep LR as temp for StoreValueWide() if needed.
+ FreeTemp(tmp1);
+
StoreValueWide(rl_dest, rl_result);
+
+ // Now, restore lr to its non-temp status.
+ Clobber(rs_rARM_LR);
+ UnmarkTemp(rs_rARM_LR);
}
void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 25ea6941c0..2ef92f851b 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -90,7 +90,7 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
}
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&literal_list_, value);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -411,7 +411,7 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_s
return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
}
@@ -695,7 +695,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
}
LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- LIR* res = NULL;
+ LIR* res = nullptr;
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
if (r_dest.IsFloat()) {
@@ -721,10 +721,10 @@ LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
}
}
- if (res == NULL) {
+ if (res == nullptr) {
// No short form - load from the literal pool.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -814,7 +814,7 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
- LIR* store = NULL;
+ LIR* store = nullptr;
ArmOpcode opcode = kThumbBkpt;
bool thumb_form = (all_low_regs && (scale == 0));
RegStorage reg_ptr;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2f1ae66bfc..b78fb80aa0 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -663,7 +663,7 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -674,7 +674,7 @@ void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -889,8 +889,8 @@ void Arm64Mir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requiring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
// NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
// the time of insertion. Note that inserted instructions don't need use/def flags, but do
// need size and pc-rel status properly updated.
@@ -1037,7 +1037,7 @@ void Arm64Mir2Lir::AssembleLIR() {
// Check that the instruction preceding the multiply-accumulate is a load or store.
if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
// insert a NOP between the load/store and the multiply-accumulate.
- LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+ LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
new_lir->offset = lir->offset;
new_lir->flags.fixup = kFixupNone;
new_lir->flags.size = EncodingMap[kA64Nop0].size;
@@ -1108,7 +1108,7 @@ size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) {
uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
A64Opcode opcode = UNWIDE(lir->opcode);
if (!lir->flags.is_nop) {
@@ -1123,8 +1123,8 @@ uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t off
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 4abbd77d88..9a7c2ade18 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -127,7 +127,7 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, key_reg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -167,7 +167,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -176,12 +176,12 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
// w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
not_unlocked_branch->target = slow_path_target;
@@ -220,7 +220,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -235,16 +235,16 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b7dbd0a97d..9340d01640 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -803,7 +803,7 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
- LIR* early_exit = OpCondBranch(kCondNe, NULL);
+ LIR* early_exit = OpCondBranch(kCondNe, nullptr);
NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e9ad8ba175..483231f931 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -121,7 +121,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
// Wide, as we need 8B alignment.
data_target = AddWideData(&literal_list_, value, 0);
}
@@ -148,7 +148,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -525,7 +525,7 @@ LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -624,7 +624,7 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
@@ -658,7 +658,7 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
@@ -1190,7 +1190,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
*/
LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
- LIR* load = NULL;
+ LIR* load = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
@@ -1286,7 +1286,7 @@ LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- LIR* store = NULL;
+ LIR* store = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f4a318cc2..fb68335e6e 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1080,7 +1080,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for nullptr.
+ // Reserve pointer id 0 for null.
size_t null_idx = WrapPointer<void>(nullptr);
DCHECK_EQ(null_idx, 0U);
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 4ac6c0c5b5..f5e6c09dba 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -368,9 +368,9 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
#define UNSAFE_GET_PUT(type, code, type_flags) \
INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- type_flags & ~kIntrinsicFlagIsObject), \
+ type_flags), \
INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
+ type_flags | kIntrinsicFlagIsVolatile), \
INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
type_flags), \
INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
@@ -392,7 +392,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
- dex_file_(NULL) {
+ dex_file_(nullptr) {
static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
"bad arraysize for kClassCacheNames");
@@ -507,6 +507,7 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
intrinsic.d.data & kIntrinsicFlagIsObject);
case kIntrinsicUnsafeGet:
return backend->GenInlinedUnsafeGet(info, intrinsic.d.data & kIntrinsicFlagIsLong,
+ intrinsic.d.data & kIntrinsicFlagIsObject,
intrinsic.d.data & kIntrinsicFlagIsVolatile);
case kIntrinsicUnsafePut:
return backend->GenInlinedUnsafePut(info, intrinsic.d.data & kIntrinsicFlagIsLong,
@@ -752,6 +753,7 @@ bool DexFileMethodInliner::GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, M
insn->dalvikInsn.opcode = Instruction::CONST;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = method.d.data;
+ insn->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(move_result, insn);
return true;
}
@@ -790,6 +792,7 @@ bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* b
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = arg;
+ insn->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(move_result, insn);
return true;
}
@@ -912,6 +915,7 @@ bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MI
}
move->dalvikInsn.vA = move_result->dalvikInsn.vA;
move->dalvikInsn.vB = return_reg;
+ move->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(insn, move);
}
return true;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b132c4cc54..de5e0410fb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -87,7 +87,7 @@ void Mir2Lir::GenIfNullUseHelperImmMethod(
const RegStorage r_result_;
};
- LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
@@ -113,10 +113,10 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
}
- // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved.
+ // r_base now points at static storage (Class*) or null if the type is not yet resolved.
LIR* unresolved_branch = nullptr;
if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
- // Check if r_base is nullptr.
+ // Check if r_base is null.
unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
}
LIR* uninit_branch = nullptr;
@@ -136,8 +136,8 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
// There are up to two branches to the static field slow path, the "unresolved" when the type
- // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized.
- // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path.
+ // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+ // At least one will be non-null here, otherwise we wouldn't generate the slow path.
StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
RegStorage r_base_in, RegStorage r_method_in)
: LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
@@ -165,7 +165,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
}
private:
- // Second branch to the slow path, or nullptr if there's only one branch.
+ // Second branch to the slow path, or null if there's only one branch.
LIR* const second_branch_;
const int storage_index_;
@@ -173,7 +173,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
RegStorage r_method_;
};
- // The slow path is invoked if the r_base is nullptr or the class pointed
+ // The slow path is invoked if the r_base is null or the class pointed
// to by it is not initialized.
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -319,7 +319,7 @@ LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
/* Perform an explicit null-check on a register. */
LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
+ return nullptr;
}
return GenNullCheck(m_reg);
}
@@ -1188,7 +1188,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
DCHECK(!IsSameReg(result_reg, object.reg));
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
RegStorage check_class = AllocTypedTemp(false, kRefReg);
RegStorage object_class = AllocTypedTemp(false, kRefReg);
@@ -1287,7 +1287,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
// On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
LoadConstant(rl_result.reg, 0);
}
- LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
/* load object->klass_ */
RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref.
@@ -1295,7 +1295,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
ref_class_reg, kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
- LIR* branchover = NULL;
+ LIR* branchover = nullptr;
if (type_known_final) {
// rl_result == ref == class.
GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
@@ -1320,7 +1320,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (!type_known_abstract) {
/* Uses branchovers */
LoadConstant(rl_result.reg, 1); // assume true
- branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
+ branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
}
OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class
@@ -2088,7 +2088,7 @@ void Mir2Lir::GenConst(RegLocation rl_dest, int value) {
}
void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
- RegLocation rl_src) {
+ RegLocation rl_src, RegisterClass return_reg_class) {
/*
* Don't optimize the register usage since it calls out to support
* functions
@@ -2097,12 +2097,10 @@ void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_d
FlushAllRegs(); /* Send everything to home location */
CallRuntimeHelperRegLocation(trampoline, rl_src, false);
if (rl_dest.wide) {
- RegLocation rl_result;
- rl_result = GetReturnWide(LocToRegClass(rl_dest));
+ RegLocation rl_result = GetReturnWide(return_reg_class);
StoreValueWide(rl_dest, rl_result);
} else {
- RegLocation rl_result;
- rl_result = GetReturn(LocToRegClass(rl_dest));
+ RegLocation rl_result = GetReturn(return_reg_class);
StoreValue(rl_dest, rl_result);
}
}
@@ -2131,7 +2129,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) {
}
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
FlushAllRegs();
- LIR* branch = OpTestSuspend(NULL);
+ LIR* branch = OpTestSuspend(nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
} else {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index db7095dafb..1eb3a5f1b5 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -882,8 +882,6 @@ RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
- DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -896,8 +894,6 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
- DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -1338,7 +1334,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
}
bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
- bool is_long, bool is_volatile) {
+ bool is_long, bool is_object, bool is_volatile) {
if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
// TODO: add Mips and Mips64 implementations.
return false;
@@ -1351,7 +1347,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
+ RegLocation rl_result = EvalLoc(rl_dest, is_object ? kRefReg : kCoreReg, true);
if (is_long) {
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
|| cu_->instruction_set == kArm64) {
@@ -1411,7 +1407,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
FreeTemp(rl_temp_offset);
}
} else {
- rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value));
+ rl_value = LoadValue(rl_src_value, is_object ? kRefReg : kCoreReg);
if (rl_value.ref) {
StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
} else {
@@ -1499,11 +1495,13 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
FreeCallTemps();
if (info->result.location != kLocInvalid) {
// We have a following MOVE_RESULT - do it now.
+ RegisterClass reg_class =
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]);
if (info->result.wide) {
- RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
+ RegLocation ret_loc = GetReturnWide(reg_class);
StoreValueWide(info->result, ret_loc);
} else {
- RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
+ RegLocation ret_loc = GetReturn(reg_class);
StoreValue(info->result, ret_loc);
}
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 54e5742837..4215e8bc50 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -46,7 +46,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
if (rl_src.location == kLocPhysReg) {
OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
- // On 64-bit targets, will sign extend. Make sure constant reference is always NULL.
+ // On 64-bit targets, will sign extend. Make sure constant reference is always null.
DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 936ff42c8c..f9b9684284 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -613,7 +613,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
LOG(FATAL) << "Unexpected branch kind " << opcode;
UNREACHABLE();
}
- LIR* hop_target = NULL;
+ LIR* hop_target = nullptr;
if (!unconditional) {
hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
@@ -650,7 +650,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success.
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
@@ -668,7 +668,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
* (label2 - label1), where label1 is a standard
* kPseudoTargetLabel and is stored in operands[2].
* If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
+ * and is found in lir->target. If operands[3] is non-nullptr,
* then it is a Switch/Data table.
*/
int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
@@ -863,7 +863,7 @@ int MipsMir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(lir->opcode >= 0)) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 05570e4bde..39b9cc7056 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -112,7 +112,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
// Test loop.
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
Load32Disp(r_base, 0, r_key);
OpRegImm(kOpAdd, r_base, 8);
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
@@ -188,7 +188,7 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
tab_rec->anchor = base_label;
// Bounds check - if < 0 or >= size continue following switch.
- LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+ LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
// Materialize the table base pointer.
RegStorage r_base = AllocPtrSizeTemp();
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1ca8bb618b..9319c64784 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -68,7 +68,7 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati
NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
@@ -128,7 +128,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage sr
break;
default:
LOG(FATAL) << "No support for ConditionCode: " << cond;
- return NULL;
+ return nullptr;
}
if (cmp_zero) {
branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
@@ -278,7 +278,7 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
LoadConstant(rs_dest, false_val);
LIR* target_label = NewLIR0(kPseudoTargetLabel);
ne_branchover->target = target_label;
@@ -447,7 +447,7 @@ void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
// Test suspend flag, return target of taken suspend branch.
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
+ return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
}
// Decrement register and branch on condition.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8ab542270d..95c61cd4ed 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -566,7 +566,7 @@ LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
/* Load value from base + scaled index. */
LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
LIR *res;
MipsOpCode opcode = kMipsNop;
bool is64bit = cu_->target64 && r_dest.Is64Bit();
@@ -640,7 +640,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
// Store value base base + scaled index.
LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
MipsOpCode opcode = kMipsNop;
RegStorage t_reg = AllocTemp();
@@ -696,8 +696,8 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
* rlp and then restore.
*/
LIR *res;
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
@@ -857,8 +857,8 @@ LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r
LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
LIR *res;
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 961cd4f06b..e9e9161a1c 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -104,19 +104,6 @@ RegisterClass Mir2Lir::ShortyToRegClass(char shorty_type) {
return res;
}
-RegisterClass Mir2Lir::LocToRegClass(RegLocation loc) {
- RegisterClass res;
- if (loc.fp) {
- DCHECK(!loc.ref) << "At most, one of ref/fp may be set";
- res = kFPReg;
- } else if (loc.ref) {
- res = kRefReg;
- } else {
- res = kCoreReg;
- }
- return res;
-}
-
void Mir2Lir::LockArg(size_t in_position) {
RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
@@ -560,25 +547,20 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
}
- DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
- StoreValue(GetReturn(LocToRegClass(rl_src[0])), rl_src[0]);
+ StoreValue(GetReturn(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
break;
case Instruction::RETURN_WIDE:
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
}
- DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
- StoreValueWide(GetReturnWide(LocToRegClass(rl_src[0])), rl_src[0]);
- break;
-
- case Instruction::MOVE_RESULT_WIDE:
- StoreValueWide(rl_dest, GetReturnWide(LocToRegClass(rl_dest)));
+ StoreValueWide(GetReturnWide(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
break;
case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_WIDE:
case Instruction::MOVE_RESULT_OBJECT:
- StoreValue(rl_dest, GetReturn(LocToRegClass(rl_dest)));
+ // Already processed with invoke or filled-new-array.
break;
case Instruction::MOVE:
@@ -1237,7 +1219,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
block_label_list_[block_id].flags.fixup = kFixupLabel;
AppendLIR(&block_label_list_[block_id]);
- LIR* head_lir = NULL;
+ LIR* head_lir = nullptr;
// If this is a catch block, export the start address.
if (bb->catch_entry) {
@@ -1263,7 +1245,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
}
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
ResetRegPool();
if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
ClobberAllTemps();
@@ -1287,7 +1269,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
GenPrintLabel(mir);
// Remember the first LIR for this block.
- if (head_lir == NULL) {
+ if (head_lir == nullptr) {
head_lir = &block_label_list_[bb->id];
// Set the first label as a scheduling barrier.
DCHECK(!head_lir->flags.use_def_invalid);
@@ -1327,7 +1309,7 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
- BasicBlock*bb = NULL;
+ BasicBlock*bb = nullptr;
for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
if (candidate->block_type == kDalvikByteCode) {
@@ -1335,11 +1317,11 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
break;
}
}
- if (bb == NULL) {
+ if (bb == nullptr) {
return false;
}
DCHECK_EQ(bb->start_offset, 0);
- DCHECK(bb->first_mir_insn != NULL);
+ DCHECK(bb->first_mir_insn != nullptr);
// Get the first instruction.
MIR* mir = bb->first_mir_insn;
@@ -1361,17 +1343,17 @@ void Mir2Lir::MethodMIR2LIR() {
PreOrderDfsIterator iter(mir_graph_);
BasicBlock* curr_bb = iter.Next();
BasicBlock* next_bb = iter.Next();
- while (curr_bb != NULL) {
+ while (curr_bb != nullptr) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
- if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {
next_bb = iter.Next();
- } while ((next_bb != NULL) && (next_bb->block_type == kDead));
+ } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
}
HandleSlowPaths();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index db59714742..8f08a51e95 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -388,7 +388,7 @@ class Mir2Lir {
LIR* DefEnd() { return def_end_; }
void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
void ResetDefBody() { def_start_ = def_end_ = nullptr; }
- // Find member of aliased set matching storage_used; return nullptr if none.
+ // Find member of aliased set matching storage_used; return null if none.
RegisterInfo* FindMatchingView(uint32_t storage_used) {
RegisterInfo* res = Master();
for (; res != nullptr; res = res->GetAliasChain()) {
@@ -605,7 +605,7 @@ class Mir2Lir {
char* ArenaStrdup(const char* str) {
size_t len = strlen(str) + 1;
char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
- if (res != NULL) {
+ if (res != nullptr) {
strncpy(res, str, len);
}
return res;
@@ -634,7 +634,6 @@ class Mir2Lir {
}
RegisterClass ShortyToRegClass(char shorty_type);
- RegisterClass LocToRegClass(RegLocation loc);
int ComputeFrameSize();
void Materialize();
virtual CompiledMethod* GetCompiledMethod();
@@ -651,7 +650,7 @@ class Mir2Lir {
void DumpPromotionMap();
void CodegenDump();
LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
- int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+ int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
LIR* NewLIR0(int opcode);
LIR* NewLIR1(int opcode, int dest);
LIR* NewLIR2(int opcode, int dest, int src1);
@@ -846,7 +845,8 @@ class Mir2Lir {
RegLocation rl_src, int lit);
virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2, int flags);
- void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+ RegisterClass return_reg_class);
void GenSuspendTest(int opt_flags);
void GenSuspendTestAndBranch(int opt_flags, LIR* target);
@@ -954,7 +954,7 @@ class Mir2Lir {
virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
bool GenInlinedStringCompareTo(CallInfo* info);
virtual bool GenInlinedCurrentThread(CallInfo* info);
- bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
+ bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_object, bool is_volatile);
bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
bool is_volatile, bool is_ordered);
@@ -1120,8 +1120,8 @@ class Mir2Lir {
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
- * @param target branch target (or nullptr)
- * @param compare output for getting LIR for comparison (or nullptr)
+ * @param target branch target (or null)
+ * @param compare output for getting LIR for comparison (or null)
* @returns The branch instruction that was generated.
*/
virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
@@ -1854,7 +1854,7 @@ class Mir2Lir {
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
- // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+ // Record the MIR that generated a given safepoint (null for prologue safepoints).
ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
// The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
@@ -1869,7 +1869,7 @@ class Mir2Lir {
// For architectures that don't have true PC-relative addressing (see pc_rel_temp_
// above) and also have a limited range of offsets for loads, it's be useful to
// know the minimum offset into the dex cache arrays, so we calculate that as well
- // if pc_rel_temp_ isn't nullptr.
+ // if pc_rel_temp_ isn't null.
uint32_t dex_cache_arrays_min_offset_;
dwarf::LazyDebugFrameOpCodeWriter cfi_;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index d276457d01..b3c73557a7 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -76,7 +76,7 @@ class QuickCFITest : public CFITest {
isa_features.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
CompilerDriver driver(&compiler_options, &verification_results, &method_inliner_map,
Compiler::kQuick, isa, isa_features.get(),
- false, 0, 0, 0, false, false, "", 0, -1, "");
+ false, nullptr, nullptr, nullptr, 0, false, false, "", 0, -1, "");
ClassLinker* linker = nullptr;
CompilationUnit cu(&pool, isa, &driver, linker);
DexFile::CodeItem code_item { 0, 0, 0, 0, 0, 0, { 0 } }; // NOLINT
@@ -100,7 +100,7 @@ class QuickCFITest : public CFITest {
}
}
m2l->AdjustSpillMask();
- m2l->GenEntrySequence(NULL, m2l->LocCReturnRef());
+ m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
m2l->GenExitSequence();
m2l->HandleSlowPaths();
m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fc3e687469..39eb117e9c 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -102,7 +102,7 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
"kDisabledOpts unexpected");
-// Supported shorty types per instruction set. nullptr means that all are available.
+// Supported shorty types per instruction set. null means that all are available.
// Z : boolean
// B : byte
// S : short
@@ -422,7 +422,7 @@ static int kInvokeOpcodes[] = {
Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
};
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// Unsupported opcodes. null can be used when everything is supported. Size of the lists is
// recorded below.
static const int* kUnsupportedOpcodes[] = {
// 0 = kNone.
@@ -515,7 +515,7 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e779479780..8ec86fa56c 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -935,7 +935,7 @@ bool Mir2Lir::CheckCorePoolSanity() {
RegStorage my_reg = info->GetReg();
RegStorage partner_reg = info->Partner();
RegisterInfo* partner = GetRegInfo(partner_reg);
- DCHECK(partner != NULL);
+ DCHECK(partner != nullptr);
DCHECK(partner->IsWide());
DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
DCHECK(partner->IsLive());
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index af19f5eaed..eb3335798e 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1633,7 +1633,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
AssemblerStatus res = kSuccess; // Assume success
const bool kVerbosePcFixup = false;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (IsPseudoLirOp(lir->opcode)) {
continue;
}
@@ -1646,7 +1646,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
switch (lir->opcode) {
case kX86Jcc8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1679,7 +1679,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jcc32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1695,7 +1695,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jecxz8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc;
pc = lir->offset + 2; // opcode + rel8
CodeOffset target = target_lir->offset;
@@ -1706,7 +1706,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1738,7 +1738,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1748,7 +1748,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
default:
if (lir->flags.fixup == kFixupLoad) {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset target = target_lir->offset;
// Handle 64 bit RIP addressing.
if (lir->operands[1] == kRIPReg) {
@@ -1950,7 +1950,7 @@ int X86Mir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d7a5eb04db..e2364d8548 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -80,7 +80,7 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
RegStorage addr_for_jump;
if (cu_->target64) {
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index cfe0480c54..8e81746db5 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -309,7 +309,8 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
}
return;
case Instruction::DOUBLE_TO_LONG:
@@ -334,7 +335,8 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
}
return;
default:
@@ -482,13 +484,13 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
} else {
NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
- LIR* branch = NULL;
+ LIR* branch = nullptr;
if (unordered_gt) {
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
if (!IsByteRegister(rl_result.reg)) {
- LIR* branch2 = NULL;
+ LIR* branch2 = nullptr;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
@@ -511,7 +513,7 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
- LIR* branch = NULL;
+ LIR* branch = nullptr;
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 1043815e10..943bfc0300 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1229,7 +1229,7 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
LockTemp(rs_r0);
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
- RegLocation rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+ RegLocation rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
@@ -1569,7 +1569,7 @@ LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
} else {
OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
}
- return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+ return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
}
// Decrement register and branch on condition
@@ -3005,7 +3005,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
// We will use this register to compare to memory below.
// References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index a16e242d08..b4603793b4 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1281,7 +1281,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
- // Is the string non-NULL?
+ // Is the string non-null?
LoadValueDirectFixed(rl_obj, rs_rDX);
GenNullCheck(rs_rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index efcb9eefb5..61a1becac1 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -578,7 +578,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
} else if (pc_rel_base_reg_.Valid() || cu_->target64) {
// We will load the value from the literal area.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -642,8 +642,8 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_dest, OpSize size) {
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_dest.IsPair();
bool is64bit = ((size == k64) || (size == kDouble));
@@ -763,7 +763,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
}
}
- // Always return first load generated as this might cause a fault if base is nullptr.
+ // Always return first load generated as this might cause a fault if base is null.
return load;
}
@@ -791,8 +791,8 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_src, OpSize size,
int opt_flags) {
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_src.IsPair();
bool is64bit = (size == k64) || (size == kDouble);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 197f66d017..939bf40564 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -26,15 +26,15 @@ namespace art {
void MIRGraph::ClearAllVisitedFlags() {
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
bb->visited = false;
}
}
BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
- if (bb != NULL) {
+ if (bb != nullptr) {
if (bb->visited || bb->hidden) {
- bb = NULL;
+ bb = nullptr;
}
}
return bb;
@@ -42,13 +42,13 @@ BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
- if (res == NULL) {
+ if (res == nullptr) {
res = NeedsVisit(GetBasicBlock(bb->taken));
- if (res == NULL) {
+ if (res == nullptr) {
if (bb->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
res = NeedsVisit(GetBasicBlock(sbi->block));
- if (res != NULL) {
+ if (res != nullptr) {
break;
}
}
@@ -75,7 +75,7 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) {
while (!succ.empty()) {
BasicBlock* curr = succ.back();
BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
- if (next_successor != NULL) {
+ if (next_successor != nullptr) {
MarkPreOrder(next_successor);
succ.push_back(next_successor);
continue;
@@ -107,7 +107,7 @@ void MIRGraph::ComputeDFSOrders() {
if (num_reachable_blocks_ != GetNumBlocks()) {
// Kill all unreachable blocks.
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (!bb->visited) {
bb->Kill(this);
}
@@ -121,7 +121,7 @@ void MIRGraph::ComputeDFSOrders() {
* register idx is defined in BasicBlock bb.
*/
bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
@@ -149,11 +149,11 @@ void MIRGraph::ComputeDefBlockMatrix() {
}
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
FindLocalLiveIn(bb);
}
AllNodesIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
FillDefBlockMatrix(bb);
}
@@ -247,7 +247,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
int num_total_blocks = GetBasicBlockListCount();
- if (bb->dominators == NULL) {
+ if (bb->dominators == nullptr) {
bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
true /* expandable */, kBitMapDominators);
bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
@@ -357,7 +357,7 @@ void MIRGraph::ComputeDominators() {
/* Initialize domination-related data structures */
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
InitializeDominationInfo(bb);
}
@@ -376,7 +376,7 @@ void MIRGraph::ComputeDominators() {
/* Compute the immediate dominators */
RepeatingReversePostOrderDfsIterator iter2(this);
bool change = false;
- for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+ for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
change = ComputeblockIDom(bb);
}
@@ -387,19 +387,19 @@ void MIRGraph::ComputeDominators() {
GetEntryBlock()->i_dom = 0;
PreOrderDfsIterator iter3(this);
- for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+ for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
SetDominators(bb);
}
ReversePostOrderDfsIterator iter4(this);
- for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+ for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
ComputeBlockDominators(bb);
}
// Compute the dominance frontier for each block.
ComputeDomPostOrderTraversal(GetEntryBlock());
PostOrderDOMIterator iter5(this);
- for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+ for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
@@ -434,7 +434,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
@@ -466,7 +466,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
void MIRGraph::FindPhiNodeBlocks() {
RepeatingPostOrderDfsIterator iter(this);
bool change = false;
- for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+ for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
change = ComputeBlockLiveIns(bb);
}
@@ -505,7 +505,7 @@ void MIRGraph::FindPhiNodeBlocks() {
*/
bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
/* Phi nodes are at the beginning of each block */
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
return true;
int ssa_reg = mir->ssa_rep->defs[0];
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
new file mode 100644
index 0000000000..19d591ba41
--- /dev/null
+++ b/compiler/dex/type_inference.cc
@@ -0,0 +1,1067 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "type_inference.h"
+
+#include "base/bit_vector-inl.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex_file-inl.h"
+#include "driver/dex_compilation_unit.h"
+#include "mir_field_info.h"
+#include "mir_graph.h"
+#include "mir_method_info.h"
+
+namespace art {
+
+inline TypeInference::Type TypeInference::Type::ArrayType(uint32_t array_depth, Type nested_type) {
+ DCHECK_NE(array_depth, 0u);
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (array_depth << kBitArrayDepthStart) |
+ ((nested_type.raw_bits_ & kMaskWideAndType) << kArrayTypeShift));
+}
+
+inline TypeInference::Type TypeInference::Type::ArrayTypeFromComponent(Type component_type) {
+ if (component_type.ArrayDepth() == 0u) {
+ return ArrayType(1u, component_type);
+ }
+ if (UNLIKELY(component_type.ArrayDepth() == kMaxArrayDepth)) {
+ return component_type;
+ }
+ return Type(component_type.raw_bits_ + (1u << kBitArrayDepthStart)); // array_depth + 1u;
+}
+
+TypeInference::Type TypeInference::Type::ShortyType(char shorty) {
+ switch (shorty) {
+ case 'L':
+ return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+ case 'D':
+ return Type(kFlagLowWord | kFlagWide | kFlagFp);
+ case 'J':
+ return Type(kFlagLowWord | kFlagWide | kFlagCore);
+ case 'F':
+ return Type(kFlagLowWord | kFlagNarrow | kFlagFp);
+ default:
+ DCHECK(shorty == 'I' || shorty == 'S' || shorty == 'C' || shorty == 'B' || shorty == 'Z');
+ return Type(kFlagLowWord | kFlagNarrow | kFlagCore);
+ }
+}
+
+TypeInference::Type TypeInference::Type::DexType(const DexFile* dex_file, uint32_t type_idx) {
+ const char* desc = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
+ if (UNLIKELY(desc[0] == 'V')) {
+ return Unknown();
+ } else if (UNLIKELY(desc[0] == '[')) {
+ size_t array_depth = 0u;
+ while (*desc == '[') {
+ ++array_depth;
+ ++desc;
+ }
+ if (UNLIKELY(array_depth > kMaxArrayDepth)) {
+ LOG(WARNING) << "Array depth exceeds " << kMaxArrayDepth << ": " << array_depth
+ << " in dex file " << dex_file->GetLocation() << " type index " << type_idx;
+ array_depth = kMaxArrayDepth;
+ }
+ Type shorty_result = Type::ShortyType(desc[0]);
+ return ArrayType(array_depth, shorty_result);
+ } else {
+ return ShortyType(desc[0]);
+ }
+}
+
+bool TypeInference::Type::MergeArrayConflict(Type src_type) {
+ DCHECK(Ref());
+ DCHECK_NE(ArrayDepth(), src_type.ArrayDepth());
+ DCHECK_GE(std::min(ArrayDepth(), src_type.ArrayDepth()), 1u);
+ bool size_conflict =
+ (ArrayDepth() == 1u && (raw_bits_ & kFlagArrayWide) != 0u) ||
+ (src_type.ArrayDepth() == 1u && (src_type.raw_bits_ & kFlagArrayWide) != 0u);
+ // Mark all three array type bits so that merging any other type bits will not change this type.
+ return Copy(Type((raw_bits_ & kMaskNonArray) |
+ (1u << kBitArrayDepthStart) | kFlagArrayCore | kFlagArrayRef | kFlagArrayFp |
+ kFlagArrayNarrow | (size_conflict ? kFlagArrayWide : 0u)));
+}
+
+bool TypeInference::Type::MergeStrong(Type src_type) {
+ bool changed = MergeNonArrayFlags(src_type);
+ if (src_type.ArrayDepth() != 0u) {
+ if (ArrayDepth() == 0u) {
+ DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+ DCHECK_NE(src_type.raw_bits_ & kFlagRef, 0u);
+ raw_bits_ |= src_type.raw_bits_ & (~kMaskNonArray | kFlagRef);
+ changed = true;
+ } else if (ArrayDepth() == src_type.ArrayDepth()) {
+ changed |= MergeBits(src_type, kMaskArrayWideAndType);
+ } else if (src_type.ArrayDepth() == 1u &&
+ (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Source type is [L or [? but current type is at least [[, preserve it.
+ } else if (ArrayDepth() == 1u &&
+ (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Overwrite [? or [L with the source array type which is at least [[.
+ raw_bits_ = (raw_bits_ & kMaskNonArray) | (src_type.raw_bits_ & ~kMaskNonArray);
+ changed = true;
+ } else {
+ // Mark the array value type with conflict - both ref and fp.
+ changed |= MergeArrayConflict(src_type);
+ }
+ }
+ return changed;
+}
+
+bool TypeInference::Type::MergeWeak(Type src_type) {
+ bool changed = MergeNonArrayFlags(src_type);
+ if (src_type.ArrayDepth() != 0u && src_type.NonNull()) {
+ DCHECK_NE(src_type.ArrayDepth(), 0u);
+ if (ArrayDepth() == 0u) {
+ DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+ // Preserve current type.
+ } else if (ArrayDepth() == src_type.ArrayDepth()) {
+ changed |= MergeBits(src_type, kMaskArrayWideAndType);
+ } else if (src_type.ArrayDepth() == 1u &&
+ (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Source type is [L or [? but current type is at least [[, preserve it.
+ } else if (ArrayDepth() == 1u &&
+ (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // We have [? or [L. If it's [?, upgrade to [L as the source array type is at least [[.
+ changed |= MergeBits(ObjectArrayType(), kMaskArrayWideAndType);
+ } else {
+ // Mark the array value type with conflict - both ref and fp.
+ changed |= MergeArrayConflict(src_type);
+ }
+ }
+ return changed;
+}
+
+TypeInference::CheckCastData::CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+ : mir_graph_(mir_graph),
+ alloc_(alloc),
+ num_blocks_(mir_graph->GetNumBlocks()),
+ num_sregs_(mir_graph->GetNumSSARegs()),
+ check_cast_map_(std::less<MIR*>(), alloc->Adapter()),
+ split_sreg_data_(std::less<int32_t>(), alloc->Adapter()) {
+}
+
+void TypeInference::CheckCastData::AddCheckCast(MIR* check_cast, Type type) {
+ DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+ type.CheckPureRef();
+ int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+ num_sregs_ += 1;
+ check_cast_map_.Put(check_cast, CheckCastMapValue{extra_s_reg, type}); // NOLINT
+ int32_t s_reg = check_cast->ssa_rep->uses[0];
+ auto lb = split_sreg_data_.lower_bound(s_reg);
+ if (lb == split_sreg_data_.end() || split_sreg_data_.key_comp()(s_reg, lb->first)) {
+ SplitSRegData split_s_reg_data = {
+ 0,
+ alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+ alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+ new (alloc_) ArenaBitVector(alloc_, num_blocks_, false)
+ };
+ std::fill_n(split_s_reg_data.starting_mod_s_reg, num_blocks_, INVALID_SREG);
+ std::fill_n(split_s_reg_data.ending_mod_s_reg, num_blocks_, INVALID_SREG);
+ split_s_reg_data.def_phi_blocks_->ClearAllBits();
+ BasicBlock* def_bb = FindDefBlock(check_cast);
+ split_s_reg_data.ending_mod_s_reg[def_bb->id] = s_reg;
+ split_s_reg_data.def_phi_blocks_->SetBit(def_bb->id);
+ lb = split_sreg_data_.PutBefore(lb, s_reg, split_s_reg_data);
+ }
+ lb->second.ending_mod_s_reg[check_cast->bb] = extra_s_reg;
+ lb->second.def_phi_blocks_->SetBit(check_cast->bb);
+}
+
+void TypeInference::CheckCastData::AddPseudoPhis() {
+ // Look for pseudo-phis where a split SSA reg merges with a differently typed version
+ // and initialize all starting_mod_s_reg.
+ DCHECK(!split_sreg_data_.empty());
+ ArenaBitVector* phi_blocks = new (alloc_) ArenaBitVector(alloc_, num_blocks_, false);
+
+ for (auto& entry : split_sreg_data_) {
+ SplitSRegData& data = entry.second;
+
+ // Find pseudo-phi nodes.
+ phi_blocks->ClearAllBits();
+ ArenaBitVector* input_blocks = data.def_phi_blocks_;
+ do {
+ for (uint32_t idx : input_blocks->Indexes()) {
+ BasicBlock* def_bb = mir_graph_->GetBasicBlock(idx);
+ if (def_bb->dom_frontier != nullptr) {
+ phi_blocks->Union(def_bb->dom_frontier);
+ }
+ }
+ } while (input_blocks->Union(phi_blocks));
+
+ // Find live pseudo-phis. Make sure they're merging the same SSA reg.
+ data.def_phi_blocks_->ClearAllBits();
+ int32_t s_reg = entry.first;
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ for (uint32_t phi_bb_id : phi_blocks->Indexes()) {
+ BasicBlock* phi_bb = mir_graph_->GetBasicBlock(phi_bb_id);
+ DCHECK(phi_bb != nullptr);
+ DCHECK(phi_bb->data_flow_info != nullptr);
+ DCHECK(phi_bb->data_flow_info->live_in_v != nullptr);
+ if (IsSRegLiveAtStart(phi_bb, v_reg, s_reg)) {
+ int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+ num_sregs_ += 1;
+ data.starting_mod_s_reg[phi_bb_id] = extra_s_reg;
+ data.def_phi_blocks_->SetBit(phi_bb_id);
+ }
+ }
+
+ // SSA rename for s_reg.
+ TopologicalSortIterator iter(mir_graph_);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->data_flow_info == nullptr || bb->block_type == kEntryBlock) {
+ continue;
+ }
+ BasicBlockId bb_id = bb->id;
+ if (data.def_phi_blocks_->IsBitSet(bb_id)) {
+ DCHECK_NE(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+ } else {
+ DCHECK_EQ(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+ if (IsSRegLiveAtStart(bb, v_reg, s_reg)) {
+ // The earliest predecessor must have been processed already.
+ BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+ int32_t mod_s_reg = data.ending_mod_s_reg[pred_bb->id];
+ data.starting_mod_s_reg[bb_id] = (mod_s_reg != INVALID_SREG) ? mod_s_reg : s_reg;
+ } else if (data.ending_mod_s_reg[bb_id] != INVALID_SREG) {
+ // Start the original defining block with s_reg.
+ data.starting_mod_s_reg[bb_id] = s_reg;
+ }
+ }
+ if (data.ending_mod_s_reg[bb_id] == INVALID_SREG) {
+ // If the block doesn't define the modified SSA reg, it propagates the starting type.
+ data.ending_mod_s_reg[bb_id] = data.starting_mod_s_reg[bb_id];
+ }
+ }
+ }
+}
+
+void TypeInference::CheckCastData::InitializeCheckCastSRegs(Type* sregs) const {
+ for (const auto& entry : check_cast_map_) {
+ DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+ sregs[entry.second.modified_s_reg] = entry.second.type.AsNonNull();
+ }
+}
+
+void TypeInference::CheckCastData::MergeCheckCastConflicts(Type* sregs) const {
+ for (const auto& entry : check_cast_map_) {
+ DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+ sregs[entry.first->ssa_rep->uses[0]].MergeNonArrayFlags(
+ sregs[entry.second.modified_s_reg].AsNull());
+ }
+}
+
+void TypeInference::CheckCastData::MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const {
+ for (auto& entry : split_sreg_data_) {
+ for (uint32_t bb_id : entry.second.def_phi_blocks_->Indexes()) {
+ bb_df_attrs[bb_id] |= DF_NULL_TRANSFER_N;
+ }
+ }
+}
+
+void TypeInference::CheckCastData::Start(BasicBlock* bb) {
+ for (auto& entry : split_sreg_data_) {
+ entry.second.current_mod_s_reg = entry.second.starting_mod_s_reg[bb->id];
+ }
+}
+
+bool TypeInference::CheckCastData::ProcessPseudoPhis(BasicBlock* bb, Type* sregs) {
+ bool changed = false;
+ for (auto& entry : split_sreg_data_) {
+ DCHECK_EQ(entry.second.current_mod_s_reg, entry.second.starting_mod_s_reg[bb->id]);
+ if (entry.second.def_phi_blocks_->IsBitSet(bb->id)) {
+ int32_t* ending_mod_s_reg = entry.second.ending_mod_s_reg;
+ Type merged_type = sregs[entry.second.current_mod_s_reg];
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+ merged_type.MergeWeak(sregs[ending_mod_s_reg[pred_id]]);
+ }
+ if (UNLIKELY(!merged_type.IsDefined())) {
+ // This can happen during an initial merge of a loop head if the original def is
+ // actually an untyped null. (All other definitions are typed using the check-cast.)
+ } else if (merged_type.Wide()) {
+ // Ignore the pseudo-phi, just remember that there's a size mismatch.
+ sregs[entry.second.current_mod_s_reg].MarkSizeConflict();
+ } else {
+ DCHECK(merged_type.Narrow() && merged_type.LowWord() && !merged_type.HighWord());
+ // Propagate both down (fully) and up (without the "non-null" flag).
+ changed |= sregs[entry.second.current_mod_s_reg].Copy(merged_type);
+ merged_type = merged_type.AsNull();
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+ sregs[ending_mod_s_reg[pred_id]].MergeStrong(merged_type);
+ }
+ }
+ }
+ }
+ return changed;
+}
+
+void TypeInference::CheckCastData::ProcessCheckCast(MIR* mir) {
+ auto mir_it = check_cast_map_.find(mir);
+ DCHECK(mir_it != check_cast_map_.end());
+ auto sreg_it = split_sreg_data_.find(mir->ssa_rep->uses[0]);
+ DCHECK(sreg_it != split_sreg_data_.end());
+ sreg_it->second.current_mod_s_reg = mir_it->second.modified_s_reg;
+}
+
+TypeInference::SplitSRegData* TypeInference::CheckCastData::GetSplitSRegData(int32_t s_reg) {
+ auto it = split_sreg_data_.find(s_reg);
+ return (it == split_sreg_data_.end()) ? nullptr : &it->second;
+}
+
+BasicBlock* TypeInference::CheckCastData::FindDefBlock(MIR* check_cast) {
+ // Find the initial definition of the SSA reg used by the check-cast.
+ DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+ int32_t s_reg = check_cast->ssa_rep->uses[0];
+ if (mir_graph_->IsInVReg(s_reg)) {
+ return mir_graph_->GetEntryBlock();
+ }
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ BasicBlock* bb = mir_graph_->GetBasicBlock(check_cast->bb);
+ DCHECK(bb != nullptr);
+ while (true) {
+ // Find the earliest predecessor in the topological sort order to ensure we don't
+ // go in a loop.
+ BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+ // The s_reg was not valid at the end of pred_bb, so it must have been defined in bb.
+ return bb;
+ }
+ bb = pred_bb;
+ }
+}
+
+BasicBlock* TypeInference::CheckCastData::FindTopologicallyEarliestPredecessor(BasicBlock* bb) {
+ DCHECK(!bb->predecessors.empty());
+ const auto& indexes = mir_graph_->GetTopologicalSortOrderIndexes();
+ DCHECK_LT(bb->id, indexes.size());
+ size_t best_idx = indexes[bb->id];
+ BasicBlockId best_id = NullBasicBlockId;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(pred_id, indexes.size());
+ if (best_idx > indexes[pred_id]) {
+ best_idx = indexes[pred_id];
+ best_id = pred_id;
+ }
+ }
+ // There must be at least one predecessor earlier than the bb.
+ DCHECK_LT(best_idx, indexes[bb->id]);
+ return mir_graph_->GetBasicBlock(best_id);
+}
+
+bool TypeInference::CheckCastData::IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg) {
+ DCHECK_EQ(v_reg, mir_graph_->SRegToVReg(s_reg));
+ DCHECK(bb != nullptr);
+ DCHECK(bb->data_flow_info != nullptr);
+ DCHECK(bb->data_flow_info->live_in_v != nullptr);
+ if (!bb->data_flow_info->live_in_v->IsBitSet(v_reg)) {
+ return false;
+ }
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+ return false;
+ }
+ }
+ return true;
+}
+
+TypeInference::TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+ : mir_graph_(mir_graph),
+ cu_(mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()),
+ check_cast_data_(!mir_graph->HasCheckCast() ? nullptr :
+ InitializeCheckCastData(mir_graph, alloc)),
+ num_sregs_(
+ check_cast_data_ != nullptr ? check_cast_data_->NumSRegs() : mir_graph->GetNumSSARegs()),
+ ifields_(mir_graph->GetIFieldLoweringInfoCount() == 0u ? nullptr :
+ PrepareIFieldTypes(cu_->dex_file, mir_graph, alloc)),
+ sfields_(mir_graph->GetSFieldLoweringInfoCount() == 0u ? nullptr :
+ PrepareSFieldTypes(cu_->dex_file, mir_graph, alloc)),
+ signatures_(mir_graph->GetMethodLoweringInfoCount() == 0u ? nullptr :
+ PrepareSignatures(cu_->dex_file, mir_graph, alloc)),
+ current_method_signature_(
+ Signature(cu_->dex_file, cu_->method_idx, (cu_->access_flags & kAccStatic) != 0, alloc)),
+ sregs_(alloc->AllocArray<Type>(num_sregs_, kArenaAllocMisc)),
+ bb_df_attrs_(alloc->AllocArray<uint64_t>(mir_graph->GetNumBlocks(), kArenaAllocDFInfo)) {
+ InitializeSRegs();
+}
+
+bool TypeInference::Apply(BasicBlock* bb) {
+ bool changed = false;
+ uint64_t bb_df_attrs = bb_df_attrs_[bb->id];
+ if (bb_df_attrs != 0u) {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->Start(bb);
+ if (bb_df_attrs & DF_NULL_TRANSFER_N) {
+ changed |= check_cast_data_->ProcessPseudoPhis(bb, sregs_);
+ }
+ }
+ MIR* mir = bb->first_mir_insn;
+ MIR* main_mirs_end = ((bb_df_attrs & DF_SAME_TYPE_AB) != 0u) ? bb->last_mir_insn : nullptr;
+ for (; mir != main_mirs_end && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi;
+ mir = mir->next) {
+ // Special-case handling for Phi comes first because we have 2 Phis instead of a wide one.
+ // At least one input must have been previously processed. Look for the first
+ // occurrence of a high_word or low_word flag to determine the type.
+ size_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+ DCHECK_EQ(bb->predecessors.size(), num_uses);
+ Type merged_type = sregs_[defs[0]];
+ for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+ int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+ merged_type.MergeWeak(sregs_[input_mod_s_reg]);
+ }
+ if (UNLIKELY(!merged_type.IsDefined())) {
+ // No change
+ } else if (merged_type.HighWord()) {
+ // Ignore the high word phi, just remember if there's a size mismatch.
+ if (UNLIKELY(merged_type.LowWord())) {
+ sregs_[defs[0]].MarkSizeConflict();
+ }
+ } else {
+ // Propagate both down (fully) and up (without the "non-null" flag).
+ changed |= sregs_[defs[0]].Copy(merged_type);
+ merged_type = merged_type.AsNull();
+ for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+ int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+ changed |= UpdateSRegFromLowWordType(input_mod_s_reg, merged_type);
+ }
+ }
+ }
+
+ // Propagate types with MOVEs and AGETs, process CHECK_CASTs for modified SSA reg tracking.
+ for (; mir != main_mirs_end; mir = mir->next) {
+ uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+ size_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+
+ // Special handling for moves. Propagate type both ways.
+ if ((attrs & DF_IS_MOVE) != 0) {
+ int32_t used_mod_s_reg = ModifiedSReg(uses[0]);
+ int32_t defd_mod_s_reg = defs[0];
+
+ // The "non-null" flag is propagated only downwards from actual definitions and it's
+ // not initially marked for moves, so used sreg must be marked before defined sreg.
+ // The only exception is an inlined move where we know the type from the original invoke.
+ DCHECK(sregs_[used_mod_s_reg].NonNull() || !sregs_[defd_mod_s_reg].NonNull() ||
+ (mir->optimization_flags & MIR_CALLEE) != 0);
+ changed |= UpdateSRegFromLowWordType(used_mod_s_reg, sregs_[defd_mod_s_reg].AsNull());
+
+ // The value is the same, so either both registers are null or no register is.
+ // In any case we can safely propagate the array type down.
+ changed |= UpdateSRegFromLowWordType(defd_mod_s_reg, sregs_[used_mod_s_reg]);
+ if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[used_mod_s_reg].Ref())) {
+ // Mark type conflict: move instead of move-object.
+ sregs_[used_mod_s_reg].MarkTypeConflict();
+ }
+ continue;
+ }
+
+ // Handle AGET/APUT.
+ if ((attrs & DF_HAS_RANGE_CHKS) != 0) {
+ int32_t base_mod_s_reg = ModifiedSReg(uses[num_uses - 2u]);
+ int32_t mod_s_reg = (attrs & DF_DA) != 0 ? defs[0] : ModifiedSReg(uses[0]);
+ DCHECK_NE(sregs_[base_mod_s_reg].ArrayDepth(), 0u);
+ if (!sregs_[base_mod_s_reg].NonNull()) {
+ // If the base is null, don't propagate anything. All that we could determine
+ // has already been merged in the previous stage.
+ } else {
+ changed |= UpdateSRegFromLowWordType(mod_s_reg, sregs_[base_mod_s_reg].ComponentType());
+ Type array_type = Type::ArrayTypeFromComponent(sregs_[mod_s_reg]);
+ if ((attrs & DF_DA) != 0) {
+ changed |= sregs_[base_mod_s_reg].MergeStrong(array_type);
+ } else {
+ changed |= sregs_[base_mod_s_reg].MergeWeak(array_type);
+ }
+ }
+ if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[mod_s_reg].Ref())) {
+ // Mark type conflict: aget/aput instead of aget/aput-object.
+ sregs_[mod_s_reg].MarkTypeConflict();
+ }
+ continue;
+ }
+
+ // Special-case handling for check-cast to advance modified SSA reg.
+ if (UNLIKELY((attrs & DF_CHK_CAST) != 0)) {
+ DCHECK(check_cast_data_ != nullptr);
+ check_cast_data_->ProcessCheckCast(mir);
+ }
+ }
+
+ // Propagate types for IF_cc if present.
+ if (mir != nullptr) {
+ DCHECK(mir == bb->last_mir_insn);
+ DCHECK(mir->next == nullptr);
+ DCHECK_NE(MIRGraph::GetDataFlowAttributes(mir) & DF_SAME_TYPE_AB, 0u);
+ DCHECK_EQ(mir->ssa_rep->num_uses, 2u);
+ const int32_t* uses = mir->ssa_rep->uses;
+ int32_t mod_s_reg0 = ModifiedSReg(uses[0]);
+ int32_t mod_s_reg1 = ModifiedSReg(uses[1]);
+ changed |= sregs_[mod_s_reg0].MergeWeak(sregs_[mod_s_reg1].AsNull());
+ changed |= sregs_[mod_s_reg1].MergeWeak(sregs_[mod_s_reg0].AsNull());
+ }
+ }
+ return changed;
+}
+
+void TypeInference::Finish() {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->MergeCheckCastConflicts(sregs_);
+ }
+
+ size_t num_sregs = mir_graph_->GetNumSSARegs(); // Without the extra SSA regs.
+ for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+ if (sregs_[s_reg].SizeConflict()) {
+ /*
+ * The dex bytecode definition does not explicitly outlaw the definition of the same
+ * virtual register to be used in both a 32-bit and 64-bit pair context. However, dx
+ * does not generate this pattern (at least recently). Further, in the next revision of
+ * dex, we will forbid this. To support the few cases in the wild, detect this pattern
+ * and punt to the interpreter.
+ */
+ LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+ << " has size conflict block for sreg " << s_reg
+ << ", punting to interpreter.";
+ mir_graph_->SetPuntToInterpreter(true);
+ return;
+ }
+ }
+
+ size_t conflict_s_reg = 0;
+ bool type_conflict = false;
+ for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+ Type type = sregs_[s_reg];
+ RegLocation* loc = &mir_graph_->reg_location_[s_reg];
+ loc->wide = type.Wide();
+ loc->defined = type.IsDefined();
+ loc->fp = type.Fp();
+ loc->core = type.Core();
+ loc->ref = type.Ref();
+ loc->high_word = type.HighWord();
+ if (UNLIKELY(type.TypeConflict())) {
+ type_conflict = true;
+ conflict_s_reg = s_reg;
+ }
+ }
+
+ if (type_conflict) {
+ /*
+ * We don't normally expect to see a Dalvik register definition used both as a
+ * floating point and core value, though technically it could happen with constants.
+ * Until we have proper typing, detect this situation and disable register promotion
+ * (which relies on the distinction between core a fp usages).
+ */
+ LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+ << " has type conflict block for sreg " << conflict_s_reg
+ << ", disabling register promotion.";
+ cu_->disable_opt |= (1 << kPromoteRegs);
+ }
+}
+
+TypeInference::Type TypeInference::FieldType(const DexFile* dex_file, uint32_t field_idx) {
+ uint32_t type_idx = dex_file->GetFieldId(field_idx).type_idx_;
+ Type result = Type::DexType(dex_file, type_idx);
+ return result;
+}
+
+TypeInference::Type* TypeInference::PrepareIFieldTypes(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetIFieldLoweringInfoCount();
+ Type* ifields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // NOTE: Quickened field accesses have invalid FieldIndex() but they are always resolved.
+ const MirFieldInfo& info = mir_graph->GetIFieldLoweringInfo(i);
+ const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+ uint32_t field_idx = info.IsResolved() ? info.DeclaringFieldIndex() : info.FieldIndex();
+ ifields[i] = FieldType(current_dex_file, field_idx);
+ DCHECK_EQ(info.MemAccessType() == kDexMemAccessWide, ifields[i].Wide());
+ DCHECK_EQ(info.MemAccessType() == kDexMemAccessObject, ifields[i].Ref());
+ }
+ return ifields;
+}
+
+TypeInference::Type* TypeInference::PrepareSFieldTypes(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetSFieldLoweringInfoCount();
+ Type* sfields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // FieldIndex() is always valid for static fields (no quickened instructions).
+ sfields[i] = FieldType(dex_file, mir_graph->GetSFieldLoweringInfo(i).FieldIndex());
+ }
+ return sfields;
+}
+
+TypeInference::MethodSignature TypeInference::Signature(const DexFile* dex_file,
+ uint32_t method_idx,
+ bool is_static,
+ ScopedArenaAllocator* alloc) {
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+ const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ Type return_type = Type::DexType(dex_file, proto_id.return_type_idx_);
+ const DexFile::TypeList* type_list = dex_file->GetProtoParameters(proto_id);
+ size_t this_size = (is_static ? 0u : 1u);
+ size_t param_size = ((type_list != nullptr) ? type_list->Size() : 0u);
+ size_t size = this_size + param_size;
+ Type* param_types = (size != 0u) ? alloc->AllocArray<Type>(size, kArenaAllocDFInfo) : nullptr;
+ if (!is_static) {
+ param_types[0] = Type::DexType(dex_file, method_id.class_idx_);
+ }
+ for (size_t i = 0; i != param_size; ++i) {
+ uint32_t type_idx = type_list->GetTypeItem(i).type_idx_;
+ param_types[this_size + i] = Type::DexType(dex_file, type_idx);
+ }
+ return MethodSignature{ return_type, size, param_types }; // NOLINT
+}
+
+TypeInference::MethodSignature* TypeInference::PrepareSignatures(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetMethodLoweringInfoCount();
+ MethodSignature* signatures = alloc->AllocArray<MethodSignature>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // NOTE: Quickened invokes have invalid MethodIndex() but they are always resolved.
+ const MirMethodInfo& info = mir_graph->GetMethodLoweringInfo(i);
+ uint32_t method_idx = info.IsResolved() ? info.DeclaringMethodIndex() : info.MethodIndex();
+ const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+ signatures[i] = Signature(current_dex_file, method_idx, info.IsStatic(), alloc);
+ }
+ return signatures;
+}
+
+TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ if (!mir_graph->HasCheckCast()) {
+ return nullptr;
+ }
+
+ CheckCastData* data = nullptr;
+ const DexFile* dex_file = nullptr;
+ PreOrderDfsIterator iter(mir_graph);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST) {
+ if (data == nullptr) {
+ data = new (alloc) CheckCastData(mir_graph, alloc);
+ dex_file = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()->dex_file;
+ }
+ Type type = Type::DexType(dex_file, mir->dalvikInsn.vB);
+ data->AddCheckCast(mir, type);
+ }
+ }
+ }
+ if (data != nullptr) {
+ data->AddPseudoPhis();
+ }
+ return data;
+}
+
+void TypeInference::InitializeSRegs() {
+ std::fill_n(sregs_, num_sregs_, Type::Unknown());
+
+ /* Treat ArtMethod* as a normal reference */
+ sregs_[mir_graph_->GetMethodSReg()] = Type::NonArrayRefType();
+
+ // Initialize parameter SSA regs at method entry.
+ int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
+ for (size_t i = 0, size = current_method_signature_.num_params; i != size; ++i) {
+ Type param_type = current_method_signature_.param_types[i].AsNonNull();
+ sregs_[entry_param_s_reg] = param_type;
+ entry_param_s_reg += param_type.Wide() ? 2 : 1;
+ }
+ DCHECK_EQ(static_cast<uint32_t>(entry_param_s_reg),
+ mir_graph_->GetFirstInVR() + mir_graph_->GetNumOfInVRs());
+
+ // Initialize check-cast types.
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->InitializeCheckCastSRegs(sregs_);
+ }
+
+ // Initialize well-known SSA register definition types. Merge inferred types
+ // upwards where a single merge is enough (INVOKE arguments and return type,
+ // RETURN type, IPUT/SPUT source type).
+ // NOTE: Using topological sort order to make sure the definition comes before
+ // any upward merging. This allows simple assignment of the defined types
+ // instead of MergeStrong().
+ TopologicalSortIterator iter(mir_graph_);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ uint64_t bb_df_attrs = 0u;
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->Start(bb);
+ }
+ // Ignore pseudo-phis, we're not setting types for SSA regs that depend on them in this pass.
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+ bb_df_attrs |= attrs;
+
+ const uint32_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+
+ uint16_t opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST:
+ case Instruction::CONST_HIGH16:
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ case Instruction::CONST_WIDE:
+ case Instruction::CONST_WIDE_HIGH16:
+ case Instruction::MOVE:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_FROM16:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_OBJECT_FROM16:
+ case Instruction::MOVE_OBJECT_16:
+ if ((mir->optimization_flags & MIR_CALLEE) != 0) {
+ // Inlined const/move keeps method_lowering_info for type inference.
+ DCHECK_LT(mir->meta.method_lowering_info, mir_graph_->GetMethodLoweringInfoCount());
+ Type return_type = signatures_[mir->meta.method_lowering_info].return_type;
+ DCHECK(return_type.IsDefined()); // Method return type can't be void.
+ sregs_[defs[0]] = return_type.AsNonNull();
+ if (return_type.Wide()) {
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ sregs_[defs[1]] = return_type.ToHighWord();
+ }
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ case kMirOpPhi:
+ // These cannot be determined in this simple pass and will be processed later.
+ break;
+
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_WIDE:
+ case Instruction::MOVE_RESULT_OBJECT:
+ // Nothing to do, handled with invoke-* or filled-new-array/-range.
+ break;
+ case Instruction::MOVE_EXCEPTION:
+ // NOTE: We can never catch an array.
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CONST_CLASS:
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CHECK_CAST:
+ DCHECK(check_cast_data_ != nullptr);
+ check_cast_data_->ProcessCheckCast(mir);
+ break;
+ case Instruction::ARRAY_LENGTH:
+ sregs_[ModifiedSReg(uses[0])].MergeStrong(Type::UnknownArrayType());
+ break;
+ case Instruction::NEW_INSTANCE:
+ sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB).AsNonNull();
+ DCHECK(sregs_[defs[0]].Ref());
+ DCHECK_EQ(sregs_[defs[0]].ArrayDepth(), 0u);
+ break;
+ case Instruction::NEW_ARRAY:
+ sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vC).AsNonNull();
+ DCHECK(sregs_[defs[0]].Ref());
+ DCHECK_NE(sregs_[defs[0]].ArrayDepth(), 0u);
+ break;
+ case Instruction::FILLED_NEW_ARRAY:
+ case Instruction::FILLED_NEW_ARRAY_RANGE: {
+ Type array_type = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB);
+ array_type.CheckPureRef(); // Previously checked by the method verifier.
+ DCHECK_NE(array_type.ArrayDepth(), 0u);
+ Type component_type = array_type.ComponentType();
+ DCHECK(!component_type.Wide());
+ MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+ if (move_result_mir != nullptr) {
+ DCHECK_EQ(move_result_mir->dalvikInsn.opcode, Instruction::MOVE_RESULT_OBJECT);
+ sregs_[move_result_mir->ssa_rep->defs[0]] = array_type.AsNonNull();
+ }
+ DCHECK_EQ(num_uses, mir->dalvikInsn.vA);
+ for (size_t next = 0u; next != num_uses; ++next) {
+ int32_t input_mod_s_reg = ModifiedSReg(uses[next]);
+ sregs_[input_mod_s_reg].MergeStrong(component_type);
+ }
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ const MethodSignature* signature = &signatures_[mir->meta.method_lowering_info];
+ MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+ if (move_result_mir != nullptr) {
+ Type return_type = signature->return_type;
+ sregs_[move_result_mir->ssa_rep->defs[0]] = return_type.AsNonNull();
+ if (return_type.Wide()) {
+ DCHECK_EQ(move_result_mir->ssa_rep->defs[0] + 1, move_result_mir->ssa_rep->defs[1]);
+ sregs_[move_result_mir->ssa_rep->defs[1]] = return_type.ToHighWord();
+ }
+ }
+ size_t next = 0u;
+ for (size_t i = 0, size = signature->num_params; i != size; ++i) {
+ Type param_type = signature->param_types[i];
+ int32_t param_s_reg = ModifiedSReg(uses[next]);
+ DCHECK(!param_type.Wide() || uses[next] + 1 == uses[next + 1]);
+ UpdateSRegFromLowWordType(param_s_reg, param_type);
+ next += param_type.Wide() ? 2 : 1;
+ }
+ DCHECK_EQ(next, num_uses);
+ DCHECK_EQ(next, mir->dalvikInsn.vA);
+ break;
+ }
+
+ case Instruction::RETURN_WIDE:
+ DCHECK(current_method_signature_.return_type.Wide());
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ DCHECK_EQ(ModifiedSReg(uses[0]), uses[0]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT: {
+ int32_t mod_s_reg = ModifiedSReg(uses[0]);
+ UpdateSRegFromLowWordType(mod_s_reg, current_method_signature_.return_type);
+ break;
+ }
+
+ // NOTE: For AGET/APUT we set only the array type. The operand type is set
+ // below based on the data flow attributes.
+ case Instruction::AGET:
+ case Instruction::APUT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowArrayType());
+ break;
+ case Instruction::AGET_WIDE:
+ case Instruction::APUT_WIDE:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::WideArrayType());
+ break;
+ case Instruction::AGET_OBJECT:
+ sregs_[defs[0]] = sregs_[defs[0]].AsNonNull();
+ FALLTHROUGH_INTENDED;
+ case Instruction::APUT_OBJECT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::ObjectArrayType());
+ break;
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::APUT_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::APUT_CHAR:
+ case Instruction::AGET_SHORT:
+ case Instruction::APUT_SHORT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowCoreArrayType());
+ break;
+
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET_WIDE_QUICK:
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ sregs_[defs[1]] = ifields_[mir->meta.ifield_lowering_info].ToHighWord();
+ FALLTHROUGH_INTENDED;
+ case Instruction::IGET:
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT:
+ case Instruction::IGET_QUICK:
+ case Instruction::IGET_OBJECT_QUICK:
+ case Instruction::IGET_BOOLEAN_QUICK:
+ case Instruction::IGET_BYTE_QUICK:
+ case Instruction::IGET_CHAR_QUICK:
+ case Instruction::IGET_SHORT_QUICK:
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ sregs_[defs[0]] = ifields_[mir->meta.ifield_lowering_info].AsNonNull();
+ break;
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::IPUT:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_QUICK:
+ case Instruction::IPUT_OBJECT_QUICK:
+ case Instruction::IPUT_BOOLEAN_QUICK:
+ case Instruction::IPUT_BYTE_QUICK:
+ case Instruction::IPUT_CHAR_QUICK:
+ case Instruction::IPUT_SHORT_QUICK:
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+ ifields_[mir->meta.ifield_lowering_info]);
+ break;
+ case Instruction::SGET_WIDE:
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ sregs_[defs[1]] = sfields_[mir->meta.sfield_lowering_info].ToHighWord();
+ FALLTHROUGH_INTENDED;
+ case Instruction::SGET:
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ sregs_[defs[0]] = sfields_[mir->meta.sfield_lowering_info].AsNonNull();
+ break;
+ case Instruction::SPUT_WIDE:
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::SPUT:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+ sfields_[mir->meta.sfield_lowering_info]);
+ break;
+
+ default:
+ // No invokes or reference definitions here.
+ DCHECK_EQ(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC), 0u);
+ DCHECK_NE(attrs & (DF_DA | DF_REF_A), (DF_DA | DF_REF_A));
+ break;
+ }
+
+ if ((attrs & DF_NULL_TRANSFER_N) != 0) {
+ // Don't process Phis at this stage.
+ continue;
+ }
+
+ // Handle defs
+ if (attrs & DF_DA) {
+ int32_t s_reg = defs[0];
+ sregs_[s_reg].SetLowWord();
+ if (attrs & DF_FP_A) {
+ sregs_[s_reg].SetFp();
+ }
+ if (attrs & DF_CORE_A) {
+ sregs_[s_reg].SetCore();
+ }
+ if (attrs & DF_REF_A) {
+ sregs_[s_reg].SetRef();
+ }
+ if (attrs & DF_A_WIDE) {
+ sregs_[s_reg].SetWide();
+ DCHECK_EQ(s_reg + 1, ModifiedSReg(defs[1]));
+ sregs_[s_reg + 1].MergeHighWord(sregs_[s_reg]);
+ } else {
+ sregs_[s_reg].SetNarrow();
+ }
+ }
+
+ // Handles uses
+ size_t next = 0;
+ #define PROCESS(REG) \
+ if (attrs & DF_U##REG) { \
+ int32_t mod_s_reg = ModifiedSReg(uses[next]); \
+ sregs_[mod_s_reg].SetLowWord(); \
+ if (attrs & DF_FP_##REG) { \
+ sregs_[mod_s_reg].SetFp(); \
+ } \
+ if (attrs & DF_CORE_##REG) { \
+ sregs_[mod_s_reg].SetCore(); \
+ } \
+ if (attrs & DF_REF_##REG) { \
+ sregs_[mod_s_reg].SetRef(); \
+ } \
+ if (attrs & DF_##REG##_WIDE) { \
+ sregs_[mod_s_reg].SetWide(); \
+ DCHECK_EQ(mod_s_reg + 1, ModifiedSReg(uses[next + 1])); \
+ sregs_[mod_s_reg + 1].SetWide(); \
+ sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]); \
+ next += 2; \
+ } else { \
+ sregs_[mod_s_reg].SetNarrow(); \
+ next++; \
+ } \
+ }
+ PROCESS(A)
+ PROCESS(B)
+ PROCESS(C)
+ #undef PROCESS
+ DCHECK(next == mir->ssa_rep->num_uses || (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0);
+ }
+ // Record relevant attributes.
+ bb_df_attrs_[bb->id] = bb_df_attrs &
+ (DF_NULL_TRANSFER_N | DF_CHK_CAST | DF_IS_MOVE | DF_HAS_RANGE_CHKS | DF_SAME_TYPE_AB);
+ }
+
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->MarkPseudoPhiBlocks(bb_df_attrs_);
+ }
+}
+
+int32_t TypeInference::ModifiedSReg(int32_t s_reg) {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+ if (UNLIKELY(split_data != nullptr)) {
+ DCHECK_NE(split_data->current_mod_s_reg, INVALID_SREG);
+ return split_data->current_mod_s_reg;
+ }
+ }
+ return s_reg;
+}
+
+int32_t TypeInference::PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx) {
+ DCHECK_LT(pred_idx, bb->predecessors.size());
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+ if (UNLIKELY(split_data != nullptr)) {
+ return split_data->ending_mod_s_reg[bb->predecessors[pred_idx]];
+ }
+ }
+ return s_reg;
+}
+
+bool TypeInference::UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type) {
+ DCHECK(low_word_type.LowWord());
+ bool changed = sregs_[mod_s_reg].MergeStrong(low_word_type);
+ if (!sregs_[mod_s_reg].Narrow()) { // Wide without conflict with narrow.
+ DCHECK(!low_word_type.Narrow());
+ DCHECK_LT(mod_s_reg, mir_graph_->GetNumSSARegs()); // Original SSA reg.
+ changed |= sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);
+ }
+ return changed;
+}
+
+} // namespace art
diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h
new file mode 100644
index 0000000000..c9b29bf7aa
--- /dev/null
+++ b/compiler/dex/type_inference.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_TYPE_INFERENCE_H_
+#define ART_COMPILER_DEX_TYPE_INFERENCE_H_
+
+#include "base/logging.h"
+#include "base/arena_object.h"
+#include "base/scoped_arena_containers.h"
+
+namespace art {
+
+class ArenaBitVector;
+class BasicBlock;
+struct CompilationUnit;
+class DexFile;
+class MirFieldInfo;
+class MirMethodInfo;
+class MIR;
+class MIRGraph;
+
+/**
+ * @brief Determine the type of SSA registers.
+ *
+ * @details
+ * Because Dalvik's bytecode is not fully typed, we have to do some work to figure
+ * out the sreg type. For some operations it is clear based on the opcode (i.e.
+ * ADD_FLOAT v0, v1, v2), but for others (MOVE), we may never know the "real" type.
+ *
+ * We perform the type inference operation in two phases:
+ * 1. First, we make one pass over all insns in the topological sort order and
+ * extract known type information from all insns for their defs and uses.
+ * 2. Then we repeatedly go through the graph to process insns that can propagate
+ * types from inputs to outputs and vice versa. These insns are just the MOVEs,
+ * AGET/APUTs, IF_ccs and Phis (including pseudo-Phis, see below).
+ *
+ * Since the main purpose is to determine the basic FP/core/reference type, we don't
+ * need to record the precise reference type, we only record the array type to determine
+ * the result types of agets and source type of aputs.
+ *
+ * One complication is the check-cast instruction that effectively defines a new
+ * virtual register that has a different type than the original sreg. We need to
+ * track these virtual sregs and insert pseudo-phis where they merge.
+ *
+ * Another problems is with null references. The same zero constant can be used
+ * as differently typed null and moved around with move-object which would normally
+ * be an ill-formed assignment. So we need to keep track of values that can be null
+ * and values that cannot.
+ *
+ * Note that it's possible to have the same sreg show multiple defined types because dx
+ * treats constants as untyped bit patterns. We disable register promotion in that case.
+ */
+class TypeInference : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+ TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ bool Apply(BasicBlock* bb);
+ void Finish();
+
+ private:
+ struct Type {
+ static Type Unknown() {
+ return Type(0u);
+ }
+
+ static Type NonArrayRefType() {
+ return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+ }
+
+ static Type ObjectArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef);
+ }
+
+ static Type WideArrayType() {
+ // Core or FP unknown.
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayWide);
+ }
+
+ static Type NarrowArrayType() {
+ // Core or FP unknown.
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow);
+ }
+
+ static Type NarrowCoreArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayCore);
+ }
+
+ static Type UnknownArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (1u << kBitArrayDepthStart));
+ }
+
+ static Type ArrayType(uint32_t array_depth, Type nested_type);
+ static Type ArrayTypeFromComponent(Type component_type);
+ static Type ShortyType(char shorty);
+ static Type DexType(const DexFile* dex_file, uint32_t type_idx);
+
+ bool IsDefined() {
+ return raw_bits_ != 0u;
+ }
+
+ bool SizeConflict() const {
+ // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+ return (Wide() && Narrow()) || (HighWord() && LowWord());
+ }
+
+ bool TypeConflict() const {
+ // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+ return (raw_bits_ & kMaskType) != 0u && !IsPowerOfTwo(raw_bits_ & kMaskType); // 2+ bits.
+ }
+
+ void MarkSizeConflict() {
+ SetBits(kFlagLowWord | kFlagHighWord);
+ }
+
+ void MarkTypeConflict() {
+ // Mark all three type bits so that merging any other type bits will not change this type.
+ SetBits(kFlagFp | kFlagCore | kFlagRef);
+ }
+
+ void CheckPureRef() const {
+ DCHECK_EQ(raw_bits_ & (kMaskWideAndType | kMaskWord), kFlagNarrow | kFlagRef | kFlagLowWord);
+ }
+
+ // If reference, don't treat as possible null and require precise type.
+ //
+ // References without this flag are allowed to have a type conflict and their
+ // type will not be propagated down. However, for simplicity we allow propagation
+ // of other flags up as it will affect only other null references; should those
+ // references be marked non-null later, we would have to do it anyway.
+ // NOTE: This is a negative "non-null" flag rather then a positive "is-null"
+ // to simplify merging together with other non-array flags.
+ bool NonNull() const {
+ return IsBitSet(kFlagNonNull);
+ }
+
+ bool Wide() const {
+ return IsBitSet(kFlagWide);
+ }
+
+ bool Narrow() const {
+ return IsBitSet(kFlagNarrow);
+ }
+
+ bool Fp() const {
+ return IsBitSet(kFlagFp);
+ }
+
+ bool Core() const {
+ return IsBitSet(kFlagCore);
+ }
+
+ bool Ref() const {
+ return IsBitSet(kFlagRef);
+ }
+
+ bool LowWord() const {
+ return IsBitSet(kFlagLowWord);
+ }
+
+ bool HighWord() const {
+ return IsBitSet(kFlagHighWord);
+ }
+
+ uint32_t ArrayDepth() const {
+ return raw_bits_ >> kBitArrayDepthStart;
+ }
+
+ Type NestedType() const {
+ DCHECK_NE(ArrayDepth(), 0u);
+ return Type(kFlagLowWord | ((raw_bits_ & kMaskArrayWideAndType) >> kArrayTypeShift));
+ }
+
+ Type ComponentType() const {
+ DCHECK_NE(ArrayDepth(), 0u);
+ Type temp(raw_bits_ - (1u << kBitArrayDepthStart)); // array_depth - 1u;
+ return (temp.ArrayDepth() != 0u) ? temp.AsNull() : NestedType();
+ }
+
+ void SetWide() {
+ SetBits(kFlagWide);
+ }
+
+ void SetNarrow() {
+ SetBits(kFlagNarrow);
+ }
+
+ void SetFp() {
+ SetBits(kFlagFp);
+ }
+
+ void SetCore() {
+ SetBits(kFlagCore);
+ }
+
+ void SetRef() {
+ SetBits(kFlagRef);
+ }
+
+ void SetLowWord() {
+ SetBits(kFlagLowWord);
+ }
+
+ void SetHighWord() {
+ SetBits(kFlagHighWord);
+ }
+
+ Type ToHighWord() const {
+ DCHECK_EQ(raw_bits_ & (kMaskWide | kMaskWord), kFlagWide | kFlagLowWord);
+ return Type(raw_bits_ ^ (kFlagLowWord | kFlagHighWord));
+ }
+
+ bool MergeHighWord(Type low_word_type) {
+ // NOTE: low_word_type may be also Narrow() or HighWord().
+ DCHECK(low_word_type.Wide() && low_word_type.LowWord());
+ return MergeBits(Type(low_word_type.raw_bits_ | kFlagHighWord),
+ kMaskWideAndType | kFlagHighWord);
+ }
+
+ bool Copy(Type type) {
+ if (raw_bits_ != type.raw_bits_) {
+ raw_bits_ = type.raw_bits_;
+ return true;
+ }
+ return false;
+ }
+
+ // Merge non-array flags.
+ bool MergeNonArrayFlags(Type src_type) {
+ return MergeBits(src_type, kMaskNonArray);
+ }
+
+ // Merge array flags for conflict.
+ bool MergeArrayConflict(Type src_type);
+
+ // Merge all flags.
+ bool MergeStrong(Type src_type);
+
+ // Merge all flags.
+ bool MergeWeak(Type src_type);
+
+ // Get the same type but mark that it should not be treated as null.
+ Type AsNonNull() const {
+ return Type(raw_bits_ | kFlagNonNull);
+ }
+
+ // Get the same type but mark that it can be treated as null.
+ Type AsNull() const {
+ return Type(raw_bits_ & ~kFlagNonNull);
+ }
+
+ private:
+ enum FlagBits {
+ kBitNonNull = 0,
+ kBitWide,
+ kBitNarrow,
+ kBitFp,
+ kBitCore,
+ kBitRef,
+ kBitLowWord,
+ kBitHighWord,
+ kBitArrayWide,
+ kBitArrayNarrow,
+ kBitArrayFp,
+ kBitArrayCore,
+ kBitArrayRef,
+ kBitArrayDepthStart,
+ };
+ static constexpr size_t kArrayDepthBits = sizeof(uint32_t) * 8u - kBitArrayDepthStart;
+
+ static constexpr uint32_t kFlagNonNull = 1u << kBitNonNull;
+ static constexpr uint32_t kFlagWide = 1u << kBitWide;
+ static constexpr uint32_t kFlagNarrow = 1u << kBitNarrow;
+ static constexpr uint32_t kFlagFp = 1u << kBitFp;
+ static constexpr uint32_t kFlagCore = 1u << kBitCore;
+ static constexpr uint32_t kFlagRef = 1u << kBitRef;
+ static constexpr uint32_t kFlagLowWord = 1u << kBitLowWord;
+ static constexpr uint32_t kFlagHighWord = 1u << kBitHighWord;
+ static constexpr uint32_t kFlagArrayWide = 1u << kBitArrayWide;
+ static constexpr uint32_t kFlagArrayNarrow = 1u << kBitArrayNarrow;
+ static constexpr uint32_t kFlagArrayFp = 1u << kBitArrayFp;
+ static constexpr uint32_t kFlagArrayCore = 1u << kBitArrayCore;
+ static constexpr uint32_t kFlagArrayRef = 1u << kBitArrayRef;
+
+ static constexpr uint32_t kMaskWide = kFlagWide | kFlagNarrow;
+ static constexpr uint32_t kMaskType = kFlagFp | kFlagCore | kFlagRef;
+ static constexpr uint32_t kMaskWord = kFlagLowWord | kFlagHighWord;
+ static constexpr uint32_t kMaskArrayWide = kFlagArrayWide | kFlagArrayNarrow;
+ static constexpr uint32_t kMaskArrayType = kFlagArrayFp | kFlagArrayCore | kFlagArrayRef;
+ static constexpr uint32_t kMaskWideAndType = kMaskWide | kMaskType;
+ static constexpr uint32_t kMaskArrayWideAndType = kMaskArrayWide | kMaskArrayType;
+
+ static constexpr size_t kArrayTypeShift = kBitArrayWide - kBitWide;
+ static_assert(kArrayTypeShift == kBitArrayNarrow - kBitNarrow, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayFp - kBitFp, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayCore - kBitCore, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayRef - kBitRef, "shift mismatch");
+ static_assert((kMaskWide << kArrayTypeShift) == kMaskArrayWide, "shift mismatch");
+ static_assert((kMaskType << kArrayTypeShift) == kMaskArrayType, "shift mismatch");
+ static_assert((kMaskWideAndType << kArrayTypeShift) == kMaskArrayWideAndType, "shift mismatch");
+
+ static constexpr uint32_t kMaskArrayDepth = static_cast<uint32_t>(-1) << kBitArrayDepthStart;
+ static constexpr uint32_t kMaskNonArray = ~(kMaskArrayWideAndType | kMaskArrayDepth);
+
+ // The maximum representable array depth. If we exceed the maximum (which can happen
+ // only with an absurd nested array type in a dex file which would presumably cause
+ // OOM while being resolved), we can report false conflicts.
+ static constexpr uint32_t kMaxArrayDepth = static_cast<uint32_t>(-1) >> kBitArrayDepthStart;
+
+ explicit Type(uint32_t raw_bits) : raw_bits_(raw_bits) { }
+
+ bool IsBitSet(uint32_t flag) const {
+ return (raw_bits_ & flag) != 0u;
+ }
+
+ void SetBits(uint32_t flags) {
+ raw_bits_ |= flags;
+ }
+
+ bool MergeBits(Type src_type, uint32_t mask) {
+ uint32_t new_bits = raw_bits_ | (src_type.raw_bits_ & mask);
+ if (new_bits != raw_bits_) {
+ raw_bits_ = new_bits;
+ return true;
+ }
+ return false;
+ }
+
+ uint32_t raw_bits_;
+ };
+
+ struct MethodSignature {
+ Type return_type;
+ size_t num_params;
+ Type* param_types;
+ };
+
+ struct SplitSRegData {
+ int32_t current_mod_s_reg;
+ int32_t* starting_mod_s_reg; // Indexed by BasicBlock::id.
+ int32_t* ending_mod_s_reg; // Indexed by BasicBlock::id.
+
+ // NOTE: Before AddPseudoPhis(), def_phi_blocks_ marks the blocks
+ // with check-casts and the block with the original SSA reg.
+ // After AddPseudoPhis(), it marks blocks with pseudo-phis.
+ ArenaBitVector* def_phi_blocks_; // Indexed by BasicBlock::id.
+ };
+
+ class CheckCastData : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+ CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ size_t NumSRegs() const {
+ return num_sregs_;
+ }
+
+ void AddCheckCast(MIR* check_cast, Type type);
+ void AddPseudoPhis();
+ void InitializeCheckCastSRegs(Type* sregs) const;
+ void MergeCheckCastConflicts(Type* sregs) const;
+ void MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const;
+
+ void Start(BasicBlock* bb);
+ bool ProcessPseudoPhis(BasicBlock* bb, Type* sregs);
+ void ProcessCheckCast(MIR* mir);
+
+ SplitSRegData* GetSplitSRegData(int32_t s_reg);
+
+ private:
+ BasicBlock* FindDefBlock(MIR* check_cast);
+ BasicBlock* FindTopologicallyEarliestPredecessor(BasicBlock* bb);
+ bool IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg);
+
+ MIRGraph* const mir_graph_;
+ ScopedArenaAllocator* const alloc_;
+ const size_t num_blocks_;
+ size_t num_sregs_;
+
+ // Map check-cast mir to special sreg and type.
+ struct CheckCastMapValue {
+ int32_t modified_s_reg;
+ Type type;
+ };
+ ScopedArenaSafeMap<MIR*, CheckCastMapValue> check_cast_map_;
+ ScopedArenaSafeMap<int32_t, SplitSRegData> split_sreg_data_;
+ };
+
+ static Type FieldType(const DexFile* dex_file, uint32_t field_idx);
+ static Type* PrepareIFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static Type* PrepareSFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static MethodSignature Signature(const DexFile* dex_file, uint32_t method_idx, bool is_static,
+ ScopedArenaAllocator* alloc);
+ static MethodSignature* PrepareSignatures(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static CheckCastData* InitializeCheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ void InitializeSRegs();
+
+ int32_t ModifiedSReg(int32_t s_reg);
+ int32_t PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx);
+
+ bool UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type);
+
+ MIRGraph* const mir_graph_;
+ CompilationUnit* const cu_;
+
+ // The type inference propagates types also backwards but this must not happen across
+ // check-cast. So we need to effectively split an SSA reg into two at check-cast and
+ // keep track of the types separately.
+ std::unique_ptr<CheckCastData> check_cast_data_;
+
+ size_t num_sregs_; // Number of SSA regs or modified SSA regs, see check-cast.
+ const Type* const ifields_; // Indexed by MIR::meta::ifield_lowering_info.
+ const Type* const sfields_; // Indexed by MIR::meta::sfield_lowering_info.
+ const MethodSignature* const signatures_; // Indexed by MIR::meta::method_lowering_info.
+ const MethodSignature current_method_signature_;
+ Type* const sregs_; // Indexed by SSA reg or modified SSA reg, see check-cast.
+ uint64_t* const bb_df_attrs_; // Indexed by BasicBlock::id.
+
+ friend class TypeInferenceTest;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_TYPE_INFERENCE_H_
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
new file mode 100644
index 0000000000..eaa2bfac93
--- /dev/null
+++ b/compiler/dex/type_inference_test.cc
@@ -0,0 +1,2044 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex/mir_field_info.h"
+#include "dex/mir_graph.h"
+#include "driver/dex_compilation_unit.h"
+#include "gtest/gtest.h"
+#include "type_inference.h"
+#include "utils/test_dex_file_builder.h"
+
+namespace art {
+
+class TypeInferenceTest : public testing::Test {
+ protected:
+ struct TypeDef {
+ const char* descriptor;
+ };
+
+ struct FieldDef {
+ const char* class_descriptor;
+ const char* type;
+ const char* name;
+ };
+
+ struct MethodDef {
+ const char* class_descriptor;
+ const char* signature;
+ const char* name;
+ InvokeType type;
+ };
+
+ struct BBDef {
+ static constexpr size_t kMaxSuccessors = 4;
+ static constexpr size_t kMaxPredecessors = 4;
+
+ BBType type;
+ size_t num_successors;
+ BasicBlockId successors[kMaxPredecessors];
+ size_t num_predecessors;
+ BasicBlockId predecessors[kMaxPredecessors];
+ };
+
+ struct MIRDef {
+ static constexpr size_t kMaxSsaDefs = 2;
+ static constexpr size_t kMaxSsaUses = 4;
+
+ BasicBlockId bbid;
+ Instruction::Code opcode;
+ int64_t value;
+ uint32_t metadata;
+ size_t num_uses;
+ int32_t uses[kMaxSsaUses];
+ size_t num_defs;
+ int32_t defs[kMaxSsaDefs];
+ };
+
+#define DEF_SUCC0() \
+ 0u, { }
+#define DEF_SUCC1(s1) \
+ 1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+ 2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+ 3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+ 4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+ 0u, { }
+#define DEF_PRED1(p1) \
+ 1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+ 2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+ 3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+ 4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+ { type, succ, pred }
+
+#define DEF_CONST(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(bb, opcode, reg, index) \
+ { bb, opcode, index, 0u, 0, { }, 1, { reg } }
+#define DEF_IGET(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
+#define DEF_INVOKE0(bb, opcode, method_idx) \
+ { bb, opcode, 0u, method_idx, 0, { }, 0, { } }
+#define DEF_INVOKE1(bb, opcode, reg, method_idx) \
+ { bb, opcode, 0u, method_idx, 1, { reg }, 0, { } }
+#define DEF_INVOKE2(bb, opcode, reg1, reg2, method_idx) \
+ { bb, opcode, 0u, method_idx, 2, { reg1, reg2 }, 0, { } }
+#define DEF_IFZ(bb, opcode, reg) \
+ { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_MOVE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
+#define DEF_PHI2(bb, reg, src1, src2) \
+ { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+ { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
+#define DEF_NULOP(bb, opcode, result) DEF_CONST(bb, opcode, result, 0)
+#define DEF_NULOP_WIDE(bb, opcode, result) DEF_CONST_WIDE(bb, opcode, result, 0)
+#define DEF_CHECK_CAST(bb, opcode, reg, type) \
+ { bb, opcode, 0, type, 1, { reg }, 0, { } }
+#define DEF_NEW_ARRAY(bb, opcode, reg, length, type) \
+ { bb, opcode, 0, type, 1, { length }, 1, { reg } }
+
+ void AddTypes(const TypeDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const TypeDef* def = &defs[i];
+ dex_file_builder_.AddType(def->descriptor);
+ }
+ }
+
+ template <size_t count>
+ void PrepareTypes(const TypeDef (&defs)[count]) {
+ type_defs_ = defs;
+ type_count_ = count;
+ AddTypes(defs, count);
+ }
+
+ void AddFields(const FieldDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const FieldDef* def = &defs[i];
+ dex_file_builder_.AddField(def->class_descriptor, def->type, def->name);
+ }
+ }
+
+ template <size_t count>
+ void PrepareIFields(const FieldDef (&defs)[count]) {
+ ifield_defs_ = defs;
+ ifield_count_ = count;
+ AddFields(defs, count);
+ }
+
+ template <size_t count>
+ void PrepareSFields(const FieldDef (&defs)[count]) {
+ sfield_defs_ = defs;
+ sfield_count_ = count;
+ AddFields(defs, count);
+ }
+
+ void AddMethods(const MethodDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const MethodDef* def = &defs[i];
+ dex_file_builder_.AddMethod(def->class_descriptor, def->signature, def->name);
+ }
+ }
+
+ template <size_t count>
+ void PrepareMethods(const MethodDef (&defs)[count]) {
+ method_defs_ = defs;
+ method_count_ = count;
+ AddMethods(defs, count);
+ }
+
+ DexMemAccessType AccessTypeForDescriptor(const char* descriptor) {
+ switch (descriptor[0]) {
+ case 'I':
+ case 'F':
+ return kDexMemAccessWord;
+ case 'J':
+ case 'D':
+ return kDexMemAccessWide;
+ case '[':
+ case 'L':
+ return kDexMemAccessObject;
+ case 'Z':
+ return kDexMemAccessBoolean;
+ case 'B':
+ return kDexMemAccessByte;
+ case 'C':
+ return kDexMemAccessChar;
+ case 'S':
+ return kDexMemAccessShort;
+ default:
+ LOG(FATAL) << "Bad descriptor: " << descriptor;
+ UNREACHABLE();
+ }
+ }
+
+ size_t CountIns(const std::string& test_method_signature, bool is_static) {
+ const char* sig = test_method_signature.c_str();
+ CHECK_EQ(sig[0], '(');
+ ++sig;
+ size_t result = is_static ? 0u : 1u;
+ while (*sig != ')') {
+ result += (AccessTypeForDescriptor(sig) == kDexMemAccessWide) ? 2u : 1u;
+ while (*sig == '[') {
+ ++sig;
+ }
+ if (*sig == 'L') {
+ do {
+ ++sig;
+ CHECK(*sig != '\0' && *sig != ')');
+ } while (*sig != ';');
+ }
+ ++sig;
+ }
+ return result;
+ }
+
+ void BuildDexFile(const std::string& test_method_signature, bool is_static) {
+ dex_file_builder_.AddMethod(kClassName, test_method_signature, kMethodName);
+ dex_file_ = dex_file_builder_.Build(kDexLocation);
+ cu_.dex_file = dex_file_.get();
+ cu_.method_idx = dex_file_builder_.GetMethodIdx(kClassName, test_method_signature, kMethodName);
+ cu_.access_flags = is_static ? kAccStatic : 0u;
+ cu_.mir_graph->m_units_.push_back(new (cu_.mir_graph->arena_) DexCompilationUnit(
+ &cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
+ 0u /* class_def_idx not used */, 0u /* method_index not used */,
+ cu_.access_flags, nullptr /* verified_method not used */));
+ cu_.mir_graph->current_method_ = 0u;
+ code_item_ = static_cast<DexFile::CodeItem*>(
+ cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+
+ code_item_->ins_size_ = CountIns(test_method_signature, is_static);
+ code_item_->registers_size_ = kLocalVRs + code_item_->ins_size_;
+ cu_.mir_graph->current_code_item_ = code_item_;
+ cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
+
+ cu_.mir_graph->ifield_lowering_infos_.clear();
+ cu_.mir_graph->ifield_lowering_infos_.reserve(ifield_count_);
+ for (size_t i = 0u; i != ifield_count_; ++i) {
+ const FieldDef* def = &ifield_defs_[i];
+ uint32_t field_idx =
+ dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+ MirIFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type), false);
+ field_info.declaring_dex_file_ = cu_.dex_file;
+ field_info.declaring_field_idx_ = field_idx;
+ cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+ }
+
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(sfield_count_);
+ for (size_t i = 0u; i != sfield_count_; ++i) {
+ const FieldDef* def = &sfield_defs_[i];
+ uint32_t field_idx =
+ dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+ MirSFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type));
+ field_info.declaring_dex_file_ = cu_.dex_file;
+ field_info.declaring_field_idx_ = field_idx;
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+ }
+
+ cu_.mir_graph->method_lowering_infos_.clear();
+ cu_.mir_graph->method_lowering_infos_.reserve(ifield_count_);
+ for (size_t i = 0u; i != method_count_; ++i) {
+ const MethodDef* def = &method_defs_[i];
+ uint32_t method_idx =
+ dex_file_builder_.GetMethodIdx(def->class_descriptor, def->signature, def->name);
+ MirMethodLoweringInfo method_info(method_idx, def->type, false);
+ method_info.declaring_dex_file_ = cu_.dex_file;
+ method_info.declaring_method_idx_ = method_idx;
+ cu_.mir_graph->method_lowering_infos_.push_back(method_info);
+ }
+ }
+
+ void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+ cu_.mir_graph->block_id_map_.clear();
+ cu_.mir_graph->block_list_.clear();
+ ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
+ ASSERT_EQ(kNullBlock, defs[0].type);
+ ASSERT_EQ(kEntryBlock, defs[1].type);
+ ASSERT_EQ(kExitBlock, defs[2].type);
+ for (size_t i = 0u; i != count; ++i) {
+ const BBDef* def = &defs[i];
+ BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
+ if (def->num_successors <= 2) {
+ bb->successor_block_list_type = kNotUsed;
+ bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+ bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+ } else {
+ bb->successor_block_list_type = kPackedSwitch;
+ bb->fall_through = 0u;
+ bb->taken = 0u;
+ bb->successor_blocks.reserve(def->num_successors);
+ for (size_t j = 0u; j != def->num_successors; ++j) {
+ SuccessorBlockInfo* successor_block_info =
+ static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+ kArenaAllocSuccessor));
+ successor_block_info->block = j;
+ successor_block_info->key = 0u; // Not used by class init check elimination.
+ bb->successor_blocks.push_back(successor_block_info);
+ }
+ }
+ bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
+ if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+ cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ bb->data_flow_info->live_in_v = live_in_v_;
+ }
+ }
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
+ ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
+ ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+ }
+
+ template <size_t count>
+ void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+ DoPrepareBasicBlocks(defs, count);
+ }
+
+ void PrepareSingleBlock() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareDiamond() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareLoop() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+ mir_count_ = count;
+ mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
+ ssa_reps_.resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const MIRDef* def = &defs[i];
+ MIR* mir = &mirs_[i];
+ ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+ BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
+ bb->AppendMIR(mir);
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+ mir->dalvikInsn.vB_wide = def->value;
+ if (IsInstructionIGetOrIPut(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->ifield_lowering_infos_.size());
+ mir->meta.ifield_lowering_info = def->metadata;
+ ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->metadata].MemAccessType(),
+ IGetOrIPutMemAccessType(def->opcode));
+ cu_.mir_graph->merged_df_flags_ |= DF_IFIELD;
+ } else if (IsInstructionSGetOrSPut(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->sfield_lowering_infos_.size());
+ mir->meta.sfield_lowering_info = def->metadata;
+ ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->metadata].MemAccessType(),
+ SGetOrSPutMemAccessType(def->opcode));
+ cu_.mir_graph->merged_df_flags_ |= DF_SFIELD;
+ } else if (IsInstructionInvoke(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->method_lowering_infos_.size());
+ mir->meta.method_lowering_info = def->metadata;
+ mir->dalvikInsn.vA = def->num_uses;
+ cu_.mir_graph->merged_df_flags_ |= DF_FORMAT_35C;
+ } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
+ mir->meta.phi_incoming =
+ allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
+ ASSERT_EQ(def->num_uses, bb->predecessors.size());
+ std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
+ } else if (def->opcode == Instruction::CHECK_CAST) {
+ ASSERT_LT(def->metadata, type_count_);
+ mir->dalvikInsn.vB = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+ cu_.mir_graph->merged_df_flags_ |= DF_CHK_CAST;
+ } else if (def->opcode == Instruction::NEW_ARRAY) {
+ ASSERT_LT(def->metadata, type_count_);
+ mir->dalvikInsn.vC = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+ }
+ mir->ssa_rep = &ssa_reps_[i];
+ mir->ssa_rep->num_uses = def->num_uses;
+ mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
+ mir->ssa_rep->num_defs = def->num_defs;
+ mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->offset = i; // LVN uses offset only for debug output
+ mir->optimization_flags = 0u;
+ }
+ code_item_->insns_size_in_code_units_ = 2u * count;
+ }
+
+ template <size_t count>
+ void PrepareMIRs(const MIRDef (&defs)[count]) {
+ DoPrepareMIRs(defs, count);
+ }
+
+ // BasicBlockDataFlow::vreg_to_ssa_map_exit is used only for check-casts.
+ void AllocEndingVRegToSRegMaps() {
+ AllNodesIterator iterator(cu_.mir_graph.get());
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ if (bb->data_flow_info != nullptr) {
+ if (bb->data_flow_info->vreg_to_ssa_map_exit == nullptr) {
+ size_t num_vregs = code_item_->registers_size_;
+ bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
+ cu_.arena.AllocArray<int32_t>(num_vregs, kArenaAllocDFInfo));
+ std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs, INVALID_SREG);
+ }
+ }
+ }
+ }
+
+ template <size_t count>
+ void MapVRegToSReg(int vreg, int32_t sreg, const BasicBlockId (&bb_ids)[count]) {
+ AllocEndingVRegToSRegMaps();
+ for (BasicBlockId bb_id : bb_ids) {
+ BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
+ CHECK(bb != nullptr);
+ CHECK(bb->data_flow_info != nullptr);
+ CHECK(bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ bb->data_flow_info->vreg_to_ssa_map_exit[vreg] = sreg;
+ }
+ }
+
+ void PerformTypeInference() {
+ cu_.mir_graph->SSATransformationStart();
+ cu_.mir_graph->ComputeDFSOrders();
+ cu_.mir_graph->ComputeDominators();
+ cu_.mir_graph->ComputeTopologicalSortOrder();
+ cu_.mir_graph->SSATransformationEnd();
+ ASSERT_TRUE(type_inference_ == nullptr);
+ type_inference_.reset(new (allocator_.get()) TypeInference(cu_.mir_graph.get(),
+ allocator_.get()));
+ RepeatingPreOrderDfsIterator iter(cu_.mir_graph.get());
+ bool changed = false;
+ for (BasicBlock* bb = iter.Next(changed); bb != nullptr; bb = iter.Next(changed)) {
+ changed = type_inference_->Apply(bb);
+ }
+ type_inference_->Finish();
+ }
+
+ TypeInferenceTest()
+ : pool_(),
+ cu_(&pool_, kRuntimeISA, nullptr, nullptr),
+ mir_count_(0u),
+ mirs_(nullptr),
+ code_item_(nullptr),
+ ssa_reps_(),
+ allocator_(),
+ live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)),
+ type_defs_(nullptr),
+ type_count_(0u),
+ ifield_defs_(nullptr),
+ ifield_count_(0u),
+ sfield_defs_(nullptr),
+ sfield_count_(0u),
+ method_defs_(nullptr),
+ method_count_(0u),
+ dex_file_builder_(),
+ dex_file_(nullptr) {
+ cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+ kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+ cu_.mir_graph->method_sreg_ = kMaxSsaRegs - 1u;
+ cu_.mir_graph->reg_location_[cu_.mir_graph->GetMethodSReg()].location = kLocCompilerTemp;
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+ cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
+ for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+ cu_.mir_graph->ssa_base_vregs_.push_back(i);
+ cu_.mir_graph->ssa_subscripts_.push_back(0);
+ }
+ }
+
+ enum ExpectFlags : uint32_t {
+ kExpectWide = 0x0001u,
+ kExpectNarrow = 0x0002u,
+ kExpectFp = 0x0004u,
+ kExpectCore = 0x0008u,
+ kExpectRef = 0x0010u,
+ kExpectArrayWide = 0x0020u,
+ kExpectArrayNarrow = 0x0040u,
+ kExpectArrayFp = 0x0080u,
+ kExpectArrayCore = 0x0100u,
+ kExpectArrayRef = 0x0200u,
+ kExpectNull = 0x0400u,
+ kExpectHigh = 0x0800u, // Reserved for ExpectSRegType().
+ };
+
+ struct SRegExpectation {
+ uint32_t array_depth;
+ uint32_t flags;
+ };
+
+ void ExpectSRegType(int s_reg, const SRegExpectation& expectation, bool check_loc = true) {
+ uint32_t flags = expectation.flags;
+ uint32_t array_depth = expectation.array_depth;
+ TypeInference::Type type = type_inference_->sregs_[s_reg];
+
+ if (check_loc) {
+ RegLocation loc = cu_.mir_graph->reg_location_[s_reg];
+ EXPECT_EQ((flags & kExpectWide) != 0u, loc.wide) << s_reg;
+ EXPECT_EQ((flags & kExpectFp) != 0u, loc.fp) << s_reg;
+ EXPECT_EQ((flags & kExpectCore) != 0u, loc.core) << s_reg;
+ EXPECT_EQ((flags & kExpectRef) != 0u, loc.ref) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) != 0u, loc.high_word) << s_reg;
+ }
+
+ EXPECT_EQ((flags & kExpectWide) != 0u, type.Wide()) << s_reg;
+ EXPECT_EQ((flags & kExpectNarrow) != 0u, type.Narrow()) << s_reg;
+ EXPECT_EQ((flags & kExpectFp) != 0u, type.Fp()) << s_reg;
+ EXPECT_EQ((flags & kExpectCore) != 0u, type.Core()) << s_reg;
+ EXPECT_EQ((flags & kExpectRef) != 0u, type.Ref()) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) == 0u, type.LowWord()) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) != 0u, type.HighWord()) << s_reg;
+
+ if ((flags & kExpectRef) != 0u) {
+ EXPECT_EQ((flags & kExpectNull) != 0u, !type.NonNull()) << s_reg;
+ } else {
+ // Null should be checked only for references.
+ ASSERT_EQ((flags & kExpectNull), 0u);
+ }
+
+ ASSERT_EQ(array_depth, type.ArrayDepth()) << s_reg;
+ if (array_depth != 0u) {
+ ASSERT_NE((flags & kExpectRef), 0u);
+ TypeInference::Type nested_type = type.NestedType();
+ EXPECT_EQ((flags & kExpectArrayWide) != 0u, nested_type.Wide()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayNarrow) != 0u, nested_type.Narrow()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayFp) != 0u, nested_type.Fp()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayCore) != 0u, nested_type.Core()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayRef) != 0u, nested_type.Ref()) << s_reg;
+ }
+ if (!type.Narrow() && type.LowWord() &&
+ (expectation.flags & (kExpectWide | kExpectNarrow | kExpectHigh)) == kExpectWide) {
+ SRegExpectation high_expectation = { array_depth, flags | kExpectHigh };
+ ExpectSRegType(s_reg + 1, high_expectation);
+ }
+ }
+
+ void ExpectCore(int s_reg, bool core) {
+ EXPECT_EQ(core, type_inference_->sregs_[s_reg].Core());
+ }
+
+ void ExpectRef(int s_reg, bool ref) {
+ EXPECT_EQ(ref, type_inference_->sregs_[s_reg].Ref());
+ }
+
+ void ExpectArrayDepth(int s_reg, uint32_t array_depth) {
+ EXPECT_EQ(array_depth, type_inference_->sregs_[s_reg].ArrayDepth());
+ }
+
+ static constexpr size_t kMaxSsaRegs = 16384u;
+ static constexpr uint16_t kLocalVRs = 1000u;
+
+ static constexpr const char* kDexLocation = "TypeInferenceDexFile;";
+ static constexpr const char* kClassName = "LTypeInferenceTest;";
+ static constexpr const char* kMethodName = "test";
+
+ ArenaPool pool_;
+ CompilationUnit cu_;
+ size_t mir_count_;
+ MIR* mirs_;
+ DexFile::CodeItem* code_item_;
+ std::vector<SSARepresentation> ssa_reps_;
+ std::unique_ptr<ScopedArenaAllocator> allocator_;
+ std::unique_ptr<TypeInference> type_inference_;
+ ArenaBitVector* live_in_v_;
+
+ const TypeDef* type_defs_;
+ size_t type_count_;
+ const FieldDef* ifield_defs_;
+ size_t ifield_count_;
+ const FieldDef* sfield_defs_;
+ size_t sfield_count_;
+ const MethodDef* method_defs_;
+ size_t method_count_;
+
+ TestDexFileBuilder dex_file_builder_;
+ std::unique_ptr<const DexFile> dex_file_;
+};
+
+TEST_F(TypeInferenceTest, IGet) {
+ static const FieldDef ifields[] = {
+ { kClassName, "B", "byteField" },
+ { kClassName, "C", "charField" },
+ { kClassName, "D", "doubleField" },
+ { kClassName, "F", "floatField" },
+ { kClassName, "I", "intField" },
+ { kClassName, "J", "longField" },
+ { kClassName, "S", "shortField" },
+ { kClassName, "Z", "booleanField" },
+ { kClassName, "Ljava/lang/Object;", "objectField" },
+ { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+ };
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_IGET(3u, Instruction::IGET_BYTE, 0u, thiz, 0u),
+ DEF_IGET(3u, Instruction::IGET_CHAR, 1u, thiz, 1u),
+ DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 2u, thiz, 2u),
+ DEF_IGET(3u, Instruction::IGET, 4u, thiz, 3u),
+ DEF_IGET(3u, Instruction::IGET, 5u, thiz, 4u),
+ DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 6u, thiz, 5u),
+ DEF_IGET(3u, Instruction::IGET_SHORT, 8u, thiz, 6u),
+ DEF_IGET(3u, Instruction::IGET_BOOLEAN, 9u, thiz, 7u),
+ DEF_IGET(3u, Instruction::IGET_OBJECT, 10u, thiz, 8u),
+ DEF_IGET(3u, Instruction::IGET_OBJECT, 11u, thiz, 9u),
+ };
+
+ PrepareIFields(ifields);
+ BuildDexFile("()V", false);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGet) {
+ static const FieldDef sfields[] = {
+ { kClassName, "B", "staticByteField" },
+ { kClassName, "C", "staticCharField" },
+ { kClassName, "D", "staticDoubleField" },
+ { kClassName, "F", "staticFloatField" },
+ { kClassName, "I", "staticIntField" },
+ { kClassName, "J", "staticLongField" },
+ { kClassName, "S", "staticShortField" },
+ { kClassName, "Z", "staticBooleanField" },
+ { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+ { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET_BYTE, 0u, 0u),
+ DEF_SGET(3u, Instruction::SGET_CHAR, 1u, 1u),
+ DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 2u, 2u),
+ DEF_SGET(3u, Instruction::SGET, 4u, 3u),
+ DEF_SGET(3u, Instruction::SGET, 5u, 4u),
+ DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 6u, 5u),
+ DEF_SGET(3u, Instruction::SGET_SHORT, 8u, 6u),
+ DEF_SGET(3u, Instruction::SGET_BOOLEAN, 9u, 7u),
+ DEF_SGET(3u, Instruction::SGET_OBJECT, 10u, 8u),
+ DEF_SGET(3u, Instruction::SGET_OBJECT, 11u, 9u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IPut) {
+ static const FieldDef ifields[] = {
+ { kClassName, "B", "byteField" },
+ { kClassName, "C", "charField" },
+ { kClassName, "D", "doubleField" },
+ { kClassName, "F", "floatField" },
+ { kClassName, "I", "intField" },
+ { kClassName, "J", "longField" },
+ { kClassName, "S", "shortField" },
+ { kClassName, "Z", "booleanField" },
+ { kClassName, "Ljava/lang/Object;", "objectField" },
+ { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+ };
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_BYTE, 0u, thiz, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_CHAR, 1u, thiz, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 2u, thiz, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_IPUT(3u, Instruction::IPUT, 4u, thiz, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_IPUT(3u, Instruction::IPUT, 5u, thiz, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 6u, thiz, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_SHORT, 8u, thiz, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_BOOLEAN, 9u, thiz, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_OBJECT, 10u, thiz, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_OBJECT, 11u, thiz, 9u),
+ };
+
+ PrepareIFields(ifields);
+ BuildDexFile("()V", false);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SPut) {
+ static const FieldDef sfields[] = {
+ { kClassName, "B", "staticByteField" },
+ { kClassName, "C", "staticCharField" },
+ { kClassName, "D", "staticDoubleField" },
+ { kClassName, "F", "staticFloatField" },
+ { kClassName, "I", "staticIntField" },
+ { kClassName, "J", "staticLongField" },
+ { kClassName, "S", "staticShortField" },
+ { kClassName, "Z", "staticBooleanField" },
+ { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+ { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_BYTE, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_CHAR, 1u, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 2u, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_SPUT(3u, Instruction::SPUT, 4u, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_SPUT(3u, Instruction::SPUT, 5u, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 6u, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_SHORT, 8u, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_BOOLEAN, 9u, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 10u, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 11u, 9u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodReturnType) {
+ static const MethodDef methods[] = {
+ { kClassName, "()B", "byteFoo", kStatic },
+ { kClassName, "()C", "charFoo", kStatic },
+ { kClassName, "()D", "doubleFoo", kStatic },
+ { kClassName, "()F", "floatFoo", kStatic },
+ { kClassName, "()I", "intFoo", kStatic },
+ { kClassName, "()J", "longFoo", kStatic },
+ { kClassName, "()S", "shortFoo", kStatic },
+ { kClassName, "()Z", "booleanFoo", kStatic },
+ { kClassName, "()Ljava/lang/Object;", "objectFoo", kStatic },
+ { kClassName, "()[Ljava/lang/Object;", "objectArrayFoo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 0u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 0u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 1u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 1u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 2u),
+ DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 2u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 3u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 4u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 4u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 5u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 5u),
+ DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 6u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 6u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 8u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 7u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 9u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 8u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 10u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 9u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 11u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i + 1].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i + 1].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodArgType) {
+ static const MethodDef methods[] = {
+ { kClassName, "(B)V", "fooByte", kStatic },
+ { kClassName, "(C)V", "fooChar", kStatic },
+ { kClassName, "(D)V", "fooDouble", kStatic },
+ { kClassName, "(F)V", "fooFloat", kStatic },
+ { kClassName, "(I)V", "fooInt", kStatic },
+ { kClassName, "(J)V", "fooLong", kStatic },
+ { kClassName, "(S)V", "fooShort", kStatic },
+ { kClassName, "(Z)V", "fooBoolean", kStatic },
+ { kClassName, "(Ljava/lang/Object;)V", "fooObject", kStatic },
+ { kClassName, "([Ljava/lang/Object;)V", "fooObjectArray", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 1u, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 2u, 3u, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 4u, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 5u, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 6u, 7u, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 8u, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 9u, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 10u, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 11u, 9u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // value; can't even determine whether core or fp.
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayNarrow },
+ { 0u, kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut2) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // Object[] value
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut3) {
+ static const MIRDef mirs[] = {
+ // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array1
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // Object[] array2
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 0u, 1u, 2u),
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut4) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // value; can't even determine whether core or fp.
+ DEF_APUT(3u, Instruction::APUT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayNarrow },
+ { 0u, kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut5) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // Object[] value
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut6) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array1
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u), // Object[] array2
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 2u, 3u, 1u),
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, TwoNullObjectArraysInLoop) {
+ static const MIRDef mirs[] = {
+ // void foo() {
+ // Object[] array1 = ((Object[])null)[0];
+ // Object[] array2 = ((Object[])null)[0];
+ // for (int i = 0; i != 3; ++i) {
+ // Object[] a1 = null; // One of these could be Object[][] but not both.
+ // Object[] a2 = null; // But they will be deduced as Object[].
+ // try { a1[0] = a2; } catch (Throwable ignored) { }
+ // try { a2[0] = a1; } catch (Throwable ignored) { }
+ // array1 = a1;
+ // array2 = a2;
+ // }
+ // }
+ //
+ // Omitting the try-catch:
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // null
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // array1
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u), // array2
+ DEF_PHI2(4u, 4u, 2u, 8u), // ? + [L -> [? gives [L (see array-length below)
+ DEF_PHI2(4u, 5u, 3u, 9u), // ? + [L -> ? gives ?
+ DEF_AGET(4u, Instruction::AGET_OBJECT, 6u, 0u, 1u), // a1
+ DEF_AGET(4u, Instruction::AGET_OBJECT, 7u, 0u, 1u), // a2
+ DEF_APUT(4u, Instruction::APUT_OBJECT, 6u, 7u, 1u),
+ DEF_APUT(4u, Instruction::APUT_OBJECT, 7u, 6u, 1u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 8u, 6u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 9u, 7u),
+ DEF_UNOP(5u, Instruction::ARRAY_LENGTH, 10u, 4u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareLoop();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayFloat) {
+ static const MethodDef methods[] = {
+ { kClassName, "(F)V", "fooFloat", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ // void foo() {
+ // try {
+ // float[][][] aaaf = null;
+ // float[][] array = aaaf[0]; // Make sure array is treated as properly typed.
+ // array[0][0] = 0.0f; // const + aget-object[1] + aput
+ // fooFloat(array[0][0]); // aget-object[2] + aget + invoke
+ // // invoke: signature => input is F.
+ // // aget: output is F => base is [F (precise)
+ // // aget-object[2]: output is [F => base is [[F (precise)
+ // // aput: unknown input type => base is [?
+ // // aget-object[1]: base is [[F => result is L or [F, merge with [? => result is [F
+ // // aput (again): base is [F => result is F
+ // // const: F determined by the aput reprocessing.
+ // } catch (Throwable ignored) {
+ // }
+ // }
+ //
+ // Omitting the try-catch:
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // 0
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // aaaf
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 1u, 0u), // array = aaaf[0]
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // 0.0f
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 4u, 2u, 0u), // array[0]
+ DEF_APUT(3u, Instruction::APUT, 3u, 4u, 0u), // array[0][0] = 0.0f
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 5u, 2u, 0u), // array[0]
+ DEF_AGET(3u, Instruction::AGET, 6u, 5u, 0u), // array[0][0]
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 6u, 0u), // fooFloat(array[0][0])
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+ // Pseudo-phi from [I and [I into L infers only L but not [.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+ // Pseudo-phi from [I and [I into [? infers [I.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast3) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into L correctly leaves it as L.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // The type conflict in array element wasn't propagated to an SSA reg.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_AGET(6u, Instruction::AGET, 4u, 2u, 1u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // Type conflict in an SSA reg, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+ // Phi from [I and [I infers only L but not [.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi2) {
+ static const TypeDef types[] = {
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+ // Phi from [F and [F into [? infers [F.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi3) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Phi from [I and [F infers L.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi4) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_CONST(5u, Instruction::CONST, 2u, 0),
+ // Pseudo-phi from [I and null infers L.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // The type conflict in array element wasn't propagated to an SSA reg.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_AGET(6u, Instruction::AGET, 4u, 3u, 0u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // Type conflict in an SSA reg, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Wide1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // long[]
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 3u, 0), // long
+ DEF_APUT_WIDE(3u, Instruction::APUT_WIDE, 3u, 2u, 1u),
+ { 3u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 2u }, 0u, { } },
+ };
+
+ BuildDexFile("()[J", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectWide },
+ // NOTE: High word checked implicitly for sreg = 3.
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, WideSizeConflict1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE, 2u, 0u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectNarrow | kExpectWide },
+ { 0u, kExpectNarrow | kExpectWide },
+ };
+ ExpectSRegType(0u, expectations[0], false);
+ ExpectSRegType(2u, expectations[1], false);
+ EXPECT_TRUE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayLongLength) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[J", "arrayLongField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+ DEF_PHI2(6u, 2u, 0u, 1u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+ DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayCore | kExpectArrayWide },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayObjectLength) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[[Ljava/lang/Object;", "arrayLongField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+ DEF_PHI2(6u, 2u, 0u, 1u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+ DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGetAdd0SPut) {
+ static const FieldDef sfields[] = {
+ { kClassName, "I", "staticIntField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+ DEF_UNOP(3u, Instruction::ADD_INT_LIT8, 1u, 0u), // +0
+ DEF_SPUT(3u, Instruction::SPUT, 1u, 0u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveObjectNull) {
+ static const MethodDef methods[] = {
+ { kClassName, "([I[D)V", "foo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE_OBJECT, 1u, 0u),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ ExpectSRegType(1u, expectation);
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull1) {
+ static const MethodDef methods[] = {
+ { kClassName, "([I[D)V", "foo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE, 1u, 0u),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectCore | kExpectRef | kExpectFp | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ ExpectSRegType(1u, expectation);
+ // Type conflict using move instead of move-object for null, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull2) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[F", "staticArrayArrayFloatField" },
+ { kClassName, "[I", "staticArrayIntField" },
+ { kClassName, "[[I", "staticArrayArrayIntField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 1u, 0u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 2u, 1u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 3u, 0u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 4u, 1u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 5u, 2u),
+ DEF_PHI2(6u, 6u, 0u, 3u),
+ DEF_PHI2(6u, 7u, 1u, 4u),
+ DEF_PHI2(6u, 8u, 2u, 5u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 9u, 6u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 10u, 7u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 11u, 8u),
+ { 6u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 8u }, 0u, { } },
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()[[I", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull1) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[I", "staticArrayLongField" },
+ { kClassName, "[[F", "staticArrayArrayFloatField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull2) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[J", "staticArrayLongField" },
+ { kClassName, "[[F", "staticArrayArrayFloatField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArgIsNonNull) {
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_MOVE(3u, Instruction::MOVE_OBJECT, 0u, thiz),
+ };
+
+ BuildDexFile("(Ljava/lang/Object;)V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 0u,
+ kExpectRef | kExpectNarrow
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IfCc) {
+ static const FieldDef sfields[] = {
+ { kClassName, "I", "intField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0u),
+ { 3u, Instruction::IF_EQ, 0, 0u, 2, { 0u, 1u }, 0, { } },
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", false);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+} // namespace art
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a4df00e203..c1d5cb7213 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,7 +46,7 @@ VerificationResults::~VerificationResults() {
}
bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
- DCHECK(method_verifier != NULL);
+ DCHECK(method_verifier != nullptr);
MethodReference ref = method_verifier->GetMethodReference();
bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 977757fd3e..7eba515200 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -166,7 +166,7 @@ void VerifiedMethod::VerifyGcMap(verifier::MethodVerifier* method_verifier,
}
}
} else {
- DCHECK(i >= 65536 || reg_bitmap == NULL);
+ DCHECK(i >= 65536 || reg_bitmap == nullptr);
}
}
}
@@ -283,7 +283,7 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
}
mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
- if (abstract_method == NULL) {
+ if (abstract_method == nullptr) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
continue;
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 437ae52437..ad07639b1c 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -59,7 +59,7 @@ class VerifiedMethod {
return safe_cast_set_;
}
- // Returns the devirtualization target method, or nullptr if none.
+ // Returns the devirtualization target method, or null if none.
const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
// Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 2b78e38f5a..948ba7b273 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -23,400 +23,6 @@
namespace art {
-bool MIRGraph::SetFp(int index, bool is_fp) {
- bool change = false;
- if (is_fp && !reg_location_[index].fp) {
- reg_location_[index].fp = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetFp(int index) {
- bool change = false;
- if (!reg_location_[index].fp) {
- reg_location_[index].fp = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetCore(int index, bool is_core) {
- bool change = false;
- if (is_core && !reg_location_[index].defined) {
- reg_location_[index].core = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetCore(int index) {
- bool change = false;
- if (!reg_location_[index].defined) {
- reg_location_[index].core = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetRef(int index, bool is_ref) {
- bool change = false;
- if (is_ref && !reg_location_[index].defined) {
- reg_location_[index].ref = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetRef(int index) {
- bool change = false;
- if (!reg_location_[index].defined) {
- reg_location_[index].ref = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetWide(int index, bool is_wide) {
- bool change = false;
- if (is_wide && !reg_location_[index].wide) {
- reg_location_[index].wide = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetWide(int index) {
- bool change = false;
- if (!reg_location_[index].wide) {
- reg_location_[index].wide = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetHigh(int index, bool is_high) {
- bool change = false;
- if (is_high && !reg_location_[index].high_word) {
- reg_location_[index].high_word = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetHigh(int index) {
- bool change = false;
- if (!reg_location_[index].high_word) {
- reg_location_[index].high_word = true;
- change = true;
- }
- return change;
-}
-
-
-/*
- * Infer types and sizes. We don't need to track change on sizes,
- * as it doesn't propagate. We're guaranteed at least one pass through
- * the cfg.
- */
-bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
- SSARepresentation *ssa_rep = mir->ssa_rep;
-
- /*
- * The dex bytecode definition does not explicitly outlaw the definition of the same
- * virtual register to be used in both a 32-bit and 64-bit pair context. However, dx
- * does not generate this pattern (at least recently). Further, in the next revision of
- * dex, we will forbid this. To support the few cases in the wild, detect this pattern
- * and punt to the interpreter.
- */
- bool type_mismatch = false;
-
- if (ssa_rep) {
- uint64_t attrs = GetDataFlowAttributes(mir);
- const int* uses = ssa_rep->uses;
- const int* defs = ssa_rep->defs;
-
- // Handle defs
- if (attrs & DF_DA) {
- if (attrs & DF_CORE_A) {
- changed |= SetCore(defs[0]);
- }
- if (attrs & DF_REF_A) {
- changed |= SetRef(defs[0]);
- }
- if (attrs & DF_A_WIDE) {
- reg_location_[defs[0]].wide = true;
- reg_location_[defs[1]].wide = true;
- reg_location_[defs[1]].high_word = true;
- DCHECK_EQ(SRegToVReg(defs[0])+1,
- SRegToVReg(defs[1]));
- }
- }
-
-
- // Handles uses
- int next = 0;
- if (attrs & DF_UA) {
- if (attrs & DF_CORE_A) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_A) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_A_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- next += 2;
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- }
- if (attrs & DF_UB) {
- if (attrs & DF_CORE_B) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_B) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_B_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- next += 2;
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- }
- if (attrs & DF_UC) {
- if (attrs & DF_CORE_C) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_C) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_C_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- }
- }
-
- // Special-case return handling
- if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
- (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
- (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
- switch (cu_->shorty[0]) {
- case 'I':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetCore(uses[0]);
- break;
- case 'J':
- changed |= SetCore(uses[0]);
- changed |= SetCore(uses[1]);
- reg_location_[uses[0]].wide = true;
- reg_location_[uses[1]].wide = true;
- reg_location_[uses[1]].high_word = true;
- break;
- case 'F':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetFp(uses[0]);
- break;
- case 'D':
- changed |= SetFp(uses[0]);
- changed |= SetFp(uses[1]);
- reg_location_[uses[0]].wide = true;
- reg_location_[uses[1]].wide = true;
- reg_location_[uses[1]].high_word = true;
- break;
- case 'L':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetRef(uses[0]);
- break;
- default: break;
- }
- }
-
- // Special-case handling for format 35c/3rc invokes
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
- 0 : mir->dalvikInsn.FlagsOf();
- if ((flags & Instruction::kInvoke) &&
- (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
- DCHECK_EQ(next, 0);
- const auto& lowering_info = GetMethodLoweringInfo(mir);
- const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
- // Handle result type if floating point
- if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
- MIR* move_result_mir = FindMoveResult(bb, mir);
- // Result might not be used at all, so no move-result
- if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
- Instruction::MOVE_RESULT_OBJECT)) {
- SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
- DCHECK(tgt_rep != NULL);
- tgt_rep->fp_def[0] = true;
- changed |= SetFp(tgt_rep->defs[0]);
- if (shorty[0] == 'D') {
- tgt_rep->fp_def[1] = true;
- changed |= SetFp(tgt_rep->defs[1]);
- }
- }
- }
- int num_uses = mir->dalvikInsn.vA;
- // If this is a non-static invoke, mark implicit "this"
- if (!IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
- reg_location_[uses[next]].defined = true;
- reg_location_[uses[next]].ref = true;
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- uint32_t cpos = 1;
- if (strlen(shorty) > 1) {
- for (int i = next; i < num_uses;) {
- DCHECK_LT(cpos, strlen(shorty));
- switch (shorty[cpos++]) {
- case 'D':
- ssa_rep->fp_use[i] = true;
- ssa_rep->fp_use[i+1] = true;
- reg_location_[uses[i]].wide = true;
- reg_location_[uses[i+1]].wide = true;
- reg_location_[uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
- i++;
- break;
- case 'J':
- reg_location_[uses[i]].wide = true;
- reg_location_[uses[i+1]].wide = true;
- reg_location_[uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
- changed |= SetCore(uses[i]);
- i++;
- break;
- case 'F':
- type_mismatch |= reg_location_[uses[i]].wide;
- ssa_rep->fp_use[i] = true;
- break;
- case 'L':
- type_mismatch |= reg_location_[uses[i]].wide;
- changed |= SetRef(uses[i]);
- break;
- default:
- type_mismatch |= reg_location_[uses[i]].wide;
- changed |= SetCore(uses[i]);
- break;
- }
- i++;
- }
- }
- }
-
- for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
- if (ssa_rep->fp_use[i]) {
- changed |= SetFp(uses[i]);
- }
- }
- for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
- if (ssa_rep->fp_def[i]) {
- changed |= SetFp(defs[i]);
- }
- }
- // Special-case handling for moves & Phi
- if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
- /*
- * If any of our inputs or outputs is defined, set all.
- * Some ugliness related to Phi nodes and wide values.
- * The Phi set will include all low words or all high
- * words, so we have to treat them specially.
- */
- bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi);
- RegLocation rl_temp = reg_location_[defs[0]];
- bool defined_fp = rl_temp.defined && rl_temp.fp;
- bool defined_core = rl_temp.defined && rl_temp.core;
- bool defined_ref = rl_temp.defined && rl_temp.ref;
- bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
- bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
- for (int i = 0; i < ssa_rep->num_uses; i++) {
- rl_temp = reg_location_[uses[i]];
- defined_fp |= rl_temp.defined && rl_temp.fp;
- defined_core |= rl_temp.defined && rl_temp.core;
- defined_ref |= rl_temp.defined && rl_temp.ref;
- is_wide |= rl_temp.wide;
- is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
- }
- /*
- * We don't normally expect to see a Dalvik register definition used both as a
- * floating point and core value, though technically it could happen with constants.
- * Until we have proper typing, detect this situation and disable register promotion
- * (which relies on the distinction between core a fp usages).
- */
- if ((defined_fp && (defined_core | defined_ref)) &&
- ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
- LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
- << " op at block " << bb->id
- << " has both fp and core/ref uses for same def.";
- cu_->disable_opt |= (1 << kPromoteRegs);
- }
- changed |= SetFp(defs[0], defined_fp);
- changed |= SetCore(defs[0], defined_core);
- changed |= SetRef(defs[0], defined_ref);
- changed |= SetWide(defs[0], is_wide);
- changed |= SetHigh(defs[0], is_high);
- if (attrs & DF_A_WIDE) {
- changed |= SetWide(defs[1]);
- changed |= SetHigh(defs[1]);
- }
-
- bool has_ins = (GetNumOfInVRs() > 0);
-
- for (int i = 0; i < ssa_rep->num_uses; i++) {
- if (has_ins && IsInVReg(uses[i])) {
- // NB: The SSA name for the first def of an in-reg will be the same as
- // the reg's actual name.
- if (!reg_location_[uses[i]].fp && defined_fp) {
- // If we were about to infer that this first def of an in-reg is a float
- // when it wasn't previously (because float/int is set during SSA initialization),
- // do not allow this to happen.
- continue;
- }
- }
- changed |= SetFp(uses[i], defined_fp);
- changed |= SetCore(uses[i], defined_core);
- changed |= SetRef(uses[i], defined_ref);
- changed |= SetWide(uses[i], is_wide);
- changed |= SetHigh(uses[i], is_high);
- }
- if (attrs & DF_A_WIDE) {
- DCHECK_EQ(ssa_rep->num_uses, 2);
- changed |= SetWide(uses[1]);
- changed |= SetHigh(uses[1]);
- }
- }
- }
- if (type_mismatch) {
- LOG(WARNING) << "Deprecated dex type mismatch, interpreting "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- LOG(INFO) << "@ 0x" << std::hex << mir->offset;
- SetPuntToInterpreter(true);
- }
- return changed;
-}
-
static const char* storage_name[] = {" Frame ", "PhysReg", " CompilerTemp "};
void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
@@ -446,66 +52,12 @@ void MIRGraph::InitRegLocations() {
loc[i] = fresh_loc;
loc[i].s_reg_low = i;
loc[i].is_const = false; // Constants will be marked by constant propagation pass later.
- loc[i].wide = false;
}
- /* Treat Method* as a normal reference */
- int method_sreg = GetMethodSReg();
- loc[method_sreg].ref = true;
- loc[method_sreg].location = kLocCompilerTemp;
- loc[method_sreg].defined = true;
+ /* Mark the location of ArtMethod* as temporary */
+ loc[GetMethodSReg()].location = kLocCompilerTemp;
reg_location_ = loc;
-
- int num_regs = GetNumOfCodeVRs();
-
- /* Add types of incoming arguments based on signature */
- int num_ins = GetNumOfInVRs();
- if (num_ins > 0) {
- int s_reg = num_regs - num_ins;
- if ((cu_->access_flags & kAccStatic) == 0) {
- // For non-static, skip past "this"
- reg_location_[s_reg].defined = true;
- reg_location_[s_reg].ref = true;
- s_reg++;
- }
- const char* shorty = cu_->shorty;
- int shorty_len = strlen(shorty);
- for (int i = 1; i < shorty_len; i++) {
- switch (shorty[i]) {
- case 'D':
- reg_location_[s_reg].wide = true;
- reg_location_[s_reg+1].high_word = true;
- reg_location_[s_reg+1].fp = true;
- DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
- reg_location_[s_reg].fp = true;
- reg_location_[s_reg].defined = true;
- s_reg++;
- break;
- case 'J':
- reg_location_[s_reg].wide = true;
- reg_location_[s_reg+1].high_word = true;
- DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
- reg_location_[s_reg].core = true;
- reg_location_[s_reg].defined = true;
- s_reg++;
- break;
- case 'F':
- reg_location_[s_reg].fp = true;
- reg_location_[s_reg].defined = true;
- break;
- case 'L':
- reg_location_[s_reg].ref = true;
- reg_location_[s_reg].defined = true;
- break;
- default:
- reg_location_[s_reg].core = true;
- reg_location_[s_reg].defined = true;
- break;
- }
- s_reg++;
- }
- }
}
/*
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index b4d46954f1..bad83359d7 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -79,7 +79,7 @@ inline ArtField* CompilerDriver::ResolveFieldWithDexFile(
}
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
// ClassLinker can return a field of the wrong kind directly from the DexCache.
- // Silently return nullptr on such incompatible class change.
+ // Silently return null on such incompatible class change.
return nullptr;
}
return resolved_field;
@@ -206,7 +206,7 @@ inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
}
if (check_incompatible_class_change &&
UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
- // Silently return nullptr on incompatible class change.
+ // Silently return null on incompatible class change.
return nullptr;
}
return resolved_method;
@@ -302,7 +302,7 @@ inline int CompilerDriver::IsFastInvoke(
target_dex_cache, class_loader,
NullHandle<mirror::ArtMethod>(), kVirtual);
}
- CHECK(called_method != NULL);
+ CHECK(called_method != nullptr);
CHECK(!called_method->IsAbstract());
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 641d174935..c858326562 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -76,6 +76,10 @@ static constexpr bool kTimeCompileMethod = !kIsDebugBuild;
// Whether to produce 64-bit ELF files for 64-bit targets. Leave this off for now.
static constexpr bool kProduce64BitELFFiles = false;
+// Whether classes-to-compile and methods-to-compile are only applied to the boot image, or, when
+// given, too all compilations.
+static constexpr bool kRestrictCompilationFiltersToImage = true;
+
static double Percentage(size_t x, size_t y) {
return 100.0 * (static_cast<double>(x)) / (static_cast<double>(x + y));
}
@@ -343,9 +347,10 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- bool image, std::set<std::string>* image_classes,
- std::set<std::string>* compiled_classes, size_t thread_count,
- bool dump_stats, bool dump_passes,
+ bool image, std::unordered_set<std::string>* image_classes,
+ std::unordered_set<std::string>* compiled_classes,
+ std::unordered_set<std::string>* compiled_methods,
+ size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name, CumulativeLogger* timer,
int swap_fd, const std::string& profile_file)
: swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
@@ -365,6 +370,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
image_(image),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
+ methods_to_compile_(compiled_methods),
had_hard_verifier_failure_(false),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
@@ -489,7 +495,8 @@ void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
- std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+ std::unique_ptr<ThreadPool> thread_pool(
+ new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
PreCompile(class_loader, dex_files, thread_pool.get(), timings);
Compile(class_loader, dex_files, thread_pool.get(), timings);
@@ -656,14 +663,27 @@ bool CompilerDriver::IsImageClass(const char* descriptor) const {
}
bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
- if (!IsImage()) {
+ if (kRestrictCompilationFiltersToImage && !IsImage()) {
+ return true;
+ }
+
+ if (classes_to_compile_ == nullptr) {
return true;
- } else {
- if (classes_to_compile_ == nullptr) {
- return true;
- }
- return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
}
+ return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+}
+
+bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const {
+ if (kRestrictCompilationFiltersToImage && !IsImage()) {
+ return true;
+ }
+
+ if (methods_to_compile_ == nullptr) {
+ return true;
+ }
+
+ std::string tmp = PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file, true);
+ return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end();
}
static void ResolveExceptionsForMethod(MutableHandle<mirror::ArtMethod> method_handle,
@@ -723,7 +743,8 @@ static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::set<std::string>* image_classes = reinterpret_cast<std::set<std::string>*>(arg);
+ std::unordered_set<std::string>* image_classes =
+ reinterpret_cast<std::unordered_set<std::string>*>(arg);
std::string temp;
image_classes->insert(klass->GetDescriptor(&temp));
return true;
@@ -795,7 +816,8 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
CHECK_NE(image_classes_->size(), 0U);
}
-static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string>* image_classes)
+static void MaybeAddToImageClasses(Handle<mirror::Class> c,
+ std::unordered_set<std::string>* image_classes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -804,7 +826,8 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string
std::string temp;
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
- std::pair<std::set<std::string>::iterator, bool> result = image_classes->insert(descriptor);
+ std::pair<std::unordered_set<std::string>::iterator, bool> result =
+ image_classes->insert(descriptor);
if (!result.second) { // Previously inserted.
break;
}
@@ -826,8 +849,8 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string
// Note: we can use object pointers because we suspend all threads.
class ClinitImageUpdate {
public:
- static ClinitImageUpdate* Create(std::set<std::string>* image_class_descriptors, Thread* self,
- ClassLinker* linker, std::string* error_msg) {
+ static ClinitImageUpdate* Create(std::unordered_set<std::string>* image_class_descriptors,
+ Thread* self, ClassLinker* linker, std::string* error_msg) {
std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(image_class_descriptors, self,
linker));
if (res->art_method_class_ == nullptr) {
@@ -867,7 +890,7 @@ class ClinitImageUpdate {
}
private:
- ClinitImageUpdate(std::set<std::string>* image_class_descriptors, Thread* self,
+ ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
ClassLinker* linker)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
image_class_descriptors_(image_class_descriptors), self_(self) {
@@ -933,7 +956,7 @@ class ClinitImageUpdate {
}
mutable std::unordered_set<mirror::Object*> marked_objects_;
- std::set<std::string>* const image_class_descriptors_;
+ std::unordered_set<std::string>* const image_class_descriptors_;
std::vector<mirror::Class*> image_classes_;
const mirror::Class* art_method_class_;
const mirror::Class* dex_cache_class_;
@@ -2079,7 +2102,8 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi
VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
+void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
+ size_t class_def_index) {
ATRACE_CALL();
const DexFile& dex_file = *manager->GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -2225,9 +2249,11 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
// Basic checks, e.g., not <clinit>.
verification_results_->IsCandidateForCompilation(method_ref, access_flags) &&
// Did not fail to create VerifiedMethod metadata.
- has_verified_method;
+ has_verified_method &&
+ // Is eligable for compilation by methods-to-compile filter.
+ IsMethodToCompile(method_ref);
if (compile) {
- // NOTE: if compiler declines to compile this method, it will return nullptr.
+ // NOTE: if compiler declines to compile this method, it will return null.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file);
}
@@ -2381,7 +2407,7 @@ void CompilerDriver::AddRequiresConstructorBarrier(Thread* self, const DexFile*
}
bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
- uint16_t class_def_index) {
+ uint16_t class_def_index) const {
ReaderMutexLock mu(self, freezing_constructor_lock_);
return freezing_constructor_classes_.count(ClassReference(dex_file, class_def_index)) != 0;
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 1a4ae13176..03c5c5c352 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -19,6 +19,7 @@
#include <set>
#include <string>
+#include <unordered_set>
#include <vector>
#include "arch/instruction_set.h"
@@ -93,7 +94,7 @@ class CompilerDriver {
// Create a compiler targeting the requested "instruction_set".
// "image" should be true if image specific optimizations should be
// enabled. "image_classes" lets the compiler know what classes it
- // can assume will be in the image, with nullptr implying all available
+ // can assume will be in the image, with null implying all available
// classes.
explicit CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
@@ -101,8 +102,9 @@ class CompilerDriver {
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- bool image, std::set<std::string>* image_classes,
- std::set<std::string>* compiled_classes,
+ bool image, std::unordered_set<std::string>* image_classes,
+ std::unordered_set<std::string>* compiled_classes,
+ std::unordered_set<std::string>* compiled_methods,
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name,
CumulativeLogger* timer, int swap_fd,
@@ -154,7 +156,7 @@ class CompilerDriver {
return image_;
}
- const std::set<std::string>* GetImageClasses() const {
+ const std::unordered_set<std::string>* GetImageClasses() const {
return image_classes_.get();
}
@@ -187,7 +189,8 @@ class CompilerDriver {
void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
uint16_t class_def_index);
- bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index);
+ bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
+ uint16_t class_def_index) const;
// Callbacks from compiler to see what runtime checks must be generated.
@@ -225,7 +228,7 @@ class CompilerDriver {
mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve compiling method's class. Returns nullptr on failure.
+ // Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
@@ -237,7 +240,7 @@ class CompilerDriver {
const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a field. Returns nullptr on failure, including incompatible class change.
+ // Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
ArtField* ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -287,7 +290,7 @@ class CompilerDriver {
ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a method. Returns nullptr on failure, including incompatible class change.
+ // Resolve a method. Returns null on failure, including incompatible class change.
mirror::ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -397,6 +400,10 @@ class CompilerDriver {
return thread_count_;
}
+ bool GetDumpStats() const {
+ return dump_stats_;
+ }
+
bool GetDumpPasses() const {
return dump_passes_;
}
@@ -419,9 +426,12 @@ class CompilerDriver {
// Checks if class specified by type_idx is one of the image_classes_
bool IsImageClass(const char* descriptor) const;
- // Checks if the provided class should be compiled, i.e., is in classes_to_compile_.
+ // Checks whether the provided class should be compiled, i.e., is in classes_to_compile_.
bool IsClassToCompile(const char* descriptor) const;
+ // Checks whether the provided method should be compiled, i.e., is in method_to_compile_.
+ bool IsMethodToCompile(const MethodReference& method_ref) const;
+
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
LOCKS_EXCLUDED(compiled_classes_lock_);
@@ -582,14 +592,19 @@ class CompilerDriver {
const bool image_;
// If image_ is true, specifies the classes that will be included in
- // the image. Note if image_classes_ is nullptr, all classes are
+ // the image. Note if image_classes_ is null, all classes are
// included in the image.
- std::unique_ptr<std::set<std::string>> image_classes_;
+ std::unique_ptr<std::unordered_set<std::string>> image_classes_;
- // If image_ is true, specifies the classes that will be compiled in
- // the image. Note if classes_to_compile_ is nullptr, all classes are
- // included in the image.
- std::unique_ptr<std::set<std::string>> classes_to_compile_;
+ // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
+ // all classes are eligible for compilation (duplication filters etc. will still apply).
+ // This option may be restricted to the boot image, depending on a flag in the implementation.
+ std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
+
+ // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null,
+ // all methods are eligible for compilation (compilation filters etc. will still apply).
+ // This option may be restricted to the boot image, depending on a flag in the implementation.
+ std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
bool had_hard_verifier_failure_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index e78ff9078b..5085f32aec 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -56,20 +56,20 @@ class CompilerDriverTest : public CommonCompilerTest {
CHECK(started);
env_ = Thread::Current()->GetJniEnv();
class_ = env_->FindClass(class_name);
- CHECK(class_ != NULL) << "Class not found: " << class_name;
+ CHECK(class_ != nullptr) << "Class not found: " << class_name;
if (is_virtual) {
mid_ = env_->GetMethodID(class_, method, signature);
} else {
mid_ = env_->GetStaticMethodID(class_, method, signature);
}
- CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature;
+ CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature;
}
void MakeAllExecutable(jobject class_loader) {
const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
MakeDexFileExecutable(class_loader, *dex_file);
}
}
@@ -84,7 +84,7 @@ class CompilerDriverTest : public CommonCompilerTest {
Handle<mirror::ClassLoader> loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
- CHECK(c != NULL);
+ CHECK(c != nullptr);
for (size_t j = 0; j < c->NumDirectMethods(); j++) {
MakeExecutable(c->GetDirectMethod(j));
}
@@ -101,39 +101,38 @@ class CompilerDriverTest : public CommonCompilerTest {
// Disabled due to 10 second runtime on host
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
- CompileAll(NULL);
+ CompileAll(nullptr);
// All libcore references should resolve
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(java_lang_dex_file_ != NULL);
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex = *java_lang_dex_file_;
mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
const mirror::String* string = dex_cache->GetResolvedString(i);
- EXPECT_TRUE(string != NULL) << "string_idx=" << i;
+ EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
}
EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
mirror::Class* type = dex_cache->GetResolvedType(i);
- EXPECT_TRUE(type != NULL) << "type_idx=" << i
+ EXPECT_TRUE(type != nullptr) << "type_idx=" << i
<< " " << dex.GetTypeDescriptor(dex.GetTypeId(i));
}
EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
- EXPECT_TRUE(method != NULL) << "method_idx=" << i
+ EXPECT_TRUE(method != nullptr) << "method_idx=" << i
<< " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
<< " " << dex.GetMethodName(dex.GetMethodId(i));
- EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != NULL) << "method_idx=" << i
- << " "
- << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
- << " " << dex.GetMethodName(dex.GetMethodId(i));
+ EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i
+ << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " "
+ << dex.GetMethodName(dex.GetMethodId(i));
}
EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
- EXPECT_TRUE(field != NULL) << "field_idx=" << i
+ EXPECT_TRUE(field != nullptr) << "field_idx=" << i
<< " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
<< " " << dex.GetFieldName(dex.GetFieldId(i));
}
@@ -153,14 +152,14 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
}
- ASSERT_TRUE(class_loader != NULL);
+ ASSERT_TRUE(class_loader != nullptr);
EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true);
// Create a jobj_ of ConcreteClass, NOT AbstractClass.
jclass c_class = env_->FindClass("ConcreteClass");
jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
jobject jobj_ = env_->NewObject(c_class, constructor);
- ASSERT_TRUE(jobj_ != NULL);
+ ASSERT_TRUE(jobj_ != nullptr);
// Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
@@ -175,6 +174,60 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
}
}
+class CompilerDriverMethodsTest : public CompilerDriverTest {
+ protected:
+ std::unordered_set<std::string>* GetCompiledMethods() OVERRIDE {
+ return new std::unordered_set<std::string>({
+ "byte StaticLeafMethods.identity(byte)",
+ "int StaticLeafMethods.sum(int, int, int)",
+ "double StaticLeafMethods.sum(double, double, double, double)"
+ });
+ }
+};
+
+TEST_F(CompilerDriverMethodsTest, Selection) {
+ Thread* self = Thread::Current();
+ jobject class_loader;
+ {
+ ScopedObjectAccess soa(self);
+ class_loader = LoadDex("StaticLeafMethods");
+ }
+ ASSERT_NE(class_loader, nullptr);
+
+ // Need to enable dex-file writability. Methods rejected to be compiled will run through the
+ // dex-to-dex compiler.
+ for (const DexFile* dex_file : GetDexFiles(class_loader)) {
+ ASSERT_TRUE(dex_file->EnableWrite());
+ }
+
+ CompileAll(class_loader);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<1> hs(self);
+ ScopedObjectAccess soa(self);
+ Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
+ reinterpret_cast<mirror::ClassLoader*>(self->DecodeJObject(class_loader))));
+ mirror::Class* klass = class_linker->FindClass(self, "LStaticLeafMethods;", h_loader);
+ ASSERT_NE(klass, nullptr);
+
+ std::unique_ptr<std::unordered_set<std::string>> expected(GetCompiledMethods());
+
+ for (int32_t i = 0; static_cast<uint32_t>(i) < klass->NumDirectMethods(); i++) {
+ mirror::ArtMethod* m = klass->GetDirectMethod(i);
+ std::string name = PrettyMethod(m, true);
+ const void* code =
+ m->GetEntryPointFromQuickCompiledCodePtrSize(InstructionSetPointerSize(kRuntimeISA));
+ ASSERT_NE(code, nullptr);
+ if (expected->find(name) != expected->end()) {
+ expected->erase(name);
+ EXPECT_FALSE(class_linker->IsQuickToInterpreterBridge(code));
+ } else {
+ EXPECT_TRUE(class_linker->IsQuickToInterpreterBridge(code));
+ }
+ }
+ EXPECT_TRUE(expected->empty());
+}
+
// TODO: need check-cast test (when stub complete & we can throw/catch
} // namespace art
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 03ae489da1..398300699e 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -21,6 +21,7 @@
#include "dex_file.h"
#include "jni.h"
+#include "base/arena_object.h"
namespace art {
namespace mirror {
@@ -31,7 +32,7 @@ class ClassLinker;
struct CompilationUnit;
class VerifiedMethod;
-class DexCompilationUnit {
+class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
public:
explicit DexCompilationUnit(CompilationUnit* cu);
diff --git a/compiler/dwarf/dwarf_constants.h b/compiler/dwarf/dwarf_constants.h
index 8e39ca703b..61a44cdabc 100644
--- a/compiler/dwarf/dwarf_constants.h
+++ b/compiler/dwarf/dwarf_constants.h
@@ -658,6 +658,28 @@ enum CallFrameInstruction : uint8_t {
DW_CFA_hi_user = 0x3f
};
+enum ExceptionHeaderValueFormat : uint8_t {
+ DW_EH_PE_native = 0x00,
+ DW_EH_PE_uleb128 = 0x01,
+ DW_EH_PE_udata2 = 0x02,
+ DW_EH_PE_udata4 = 0x03,
+ DW_EH_PE_udata8 = 0x04,
+ DW_EH_PE_sleb128 = 0x09,
+ DW_EH_PE_sdata2 = 0x0A,
+ DW_EH_PE_sdata4 = 0x0B,
+ DW_EH_PE_sdata8 = 0x0C,
+ DW_EH_PE_omit = 0xFF,
+};
+
+enum ExceptionHeaderValueApplication : uint8_t {
+ DW_EH_PE_absptr = 0x00,
+ DW_EH_PE_pcrel = 0x10,
+ DW_EH_PE_textrel = 0x20,
+ DW_EH_PE_datarel = 0x30,
+ DW_EH_PE_funcrel = 0x40,
+ DW_EH_PE_aligned = 0x50,
+};
+
} // namespace dwarf
} // namespace art
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index 98f691a7a1..edba00aeaa 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -16,6 +16,7 @@
#include "dwarf_test.h"
+#include "dwarf/dwarf_constants.h"
#include "dwarf/debug_frame_opcode_writer.h"
#include "dwarf/debug_info_entry_writer.h"
#include "dwarf/debug_line_opcode_writer.h"
@@ -119,7 +120,8 @@ TEST_F(DwarfTest, DebugFrame) {
DW_CHECK_NEXT("DW_CFA_restore: r5 (ebp)");
DebugFrameOpCodeWriter<> initial_opcodes;
- WriteEhFrameCIE(is64bit, Reg(is64bit ? 16 : 8), initial_opcodes, &eh_frame_data_);
+ WriteEhFrameCIE(is64bit, DW_EH_PE_absptr, Reg(is64bit ? 16 : 8),
+ initial_opcodes, &eh_frame_data_);
std::vector<uintptr_t> eh_frame_patches;
std::vector<uintptr_t> expected_patches { 28 }; // NOLINT
WriteEhFrameFDE(is64bit, 0, 0x01000000, 0x01000000, opcodes.data(),
@@ -132,7 +134,8 @@ TEST_F(DwarfTest, DebugFrame) {
TEST_F(DwarfTest, DebugFrame64) {
constexpr bool is64bit = true;
DebugFrameOpCodeWriter<> initial_opcodes;
- WriteEhFrameCIE(is64bit, Reg(16), initial_opcodes, &eh_frame_data_);
+ WriteEhFrameCIE(is64bit, DW_EH_PE_absptr, Reg(16),
+ initial_opcodes, &eh_frame_data_);
DebugFrameOpCodeWriter<> opcodes;
std::vector<uintptr_t> eh_frame_patches;
std::vector<uintptr_t> expected_patches { 32 }; // NOLINT
@@ -170,7 +173,8 @@ TEST_F(DwarfTest, x86_64_RegisterMapping) {
DW_CHECK_NEXT("DW_CFA_offset: r14 (r14)");
DW_CHECK_NEXT("DW_CFA_offset: r15 (r15)");
DebugFrameOpCodeWriter<> initial_opcodes;
- WriteEhFrameCIE(is64bit, Reg(16), initial_opcodes, &eh_frame_data_);
+ WriteEhFrameCIE(is64bit, DW_EH_PE_absptr, Reg(16),
+ initial_opcodes, &eh_frame_data_);
std::vector<uintptr_t> eh_frame_patches;
WriteEhFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
opcodes.data(), &eh_frame_data_, &eh_frame_patches);
diff --git a/compiler/dwarf/dwarf_test.h b/compiler/dwarf/dwarf_test.h
index dd5e0c286e..99b8e793d1 100644
--- a/compiler/dwarf/dwarf_test.h
+++ b/compiler/dwarf/dwarf_test.h
@@ -55,39 +55,8 @@ class DwarfTest : public CommonRuntimeTest {
expected_lines_.push_back(ExpectedLine {substr, next, at_file, at_line});
}
- static std::string GetObjdumpPath() {
- const char* android_build_top = getenv("ANDROID_BUILD_TOP");
- if (android_build_top != nullptr) {
- std::string host_prebuilts = std::string(android_build_top) +
- "/prebuilts/gcc/linux-x86/host/";
- // Read the content of the directory.
- std::set<std::string> entries;
- DIR* dir = opendir(host_prebuilts.c_str());
- if (dir != nullptr) {
- struct dirent* entry;
- while ((entry = readdir(dir)) != nullptr) {
- if (strstr(entry->d_name, "linux-glibc")) {
- entries.insert(host_prebuilts + entry->d_name);
- }
- }
- closedir(dir);
- }
- // Strings are sorted so the last one should be the most recent version.
- if (!entries.empty()) {
- std::string path = *entries.rbegin() + "/x86_64-linux/bin/objdump";
- struct stat st;
- if (stat(path.c_str(), &st) == 0) {
- return path; // File exists.
- }
- }
- }
- ADD_FAILURE() << "Can not find prebuild objdump.";
- return "objdump"; // Use the system objdump as fallback.
- }
-
// Pretty-print the generated DWARF data using objdump.
- template<typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Dyn,
- typename Elf_Sym, typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr>
+ template<typename ElfTypes>
std::vector<std::string> Objdump(bool is64bit, const char* args) {
// Write simple elf file with just the DWARF sections.
class NoCode : public CodeOutput {
@@ -96,42 +65,41 @@ class DwarfTest : public CommonRuntimeTest {
} code;
ScratchFile file;
InstructionSet isa = is64bit ? kX86_64 : kX86;
- ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr> builder(
+ ElfBuilder<ElfTypes> builder(
&code, file.GetFile(), isa, 0, 0, 0, 0, 0, 0, false, false);
- typedef ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> Section;
+ typedef ElfRawSectionBuilder<ElfTypes> Section;
+ Section debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ Section debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ Section debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ Section debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ Section eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
if (!debug_info_data_.empty()) {
- Section debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
debug_info.SetBuffer(debug_info_data_);
- builder.RegisterRawSection(debug_info);
+ builder.RegisterRawSection(&debug_info);
}
if (!debug_abbrev_data_.empty()) {
- Section debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
debug_abbrev.SetBuffer(debug_abbrev_data_);
- builder.RegisterRawSection(debug_abbrev);
+ builder.RegisterRawSection(&debug_abbrev);
}
if (!debug_str_data_.empty()) {
- Section debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
debug_str.SetBuffer(debug_str_data_);
- builder.RegisterRawSection(debug_str);
+ builder.RegisterRawSection(&debug_str);
}
if (!debug_line_data_.empty()) {
- Section debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
debug_line.SetBuffer(debug_line_data_);
- builder.RegisterRawSection(debug_line);
+ builder.RegisterRawSection(&debug_line);
}
if (!eh_frame_data_.empty()) {
- Section eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
eh_frame.SetBuffer(eh_frame_data_);
- builder.RegisterRawSection(eh_frame);
+ builder.RegisterRawSection(&eh_frame);
}
builder.Init();
builder.Write();
// Read the elf file back using objdump.
std::vector<std::string> lines;
- std::string cmd = GetObjdumpPath();
- cmd = cmd + " " + args + " " + file.GetFilename() + " 2>&1";
+ std::string cmd = GetAndroidHostToolsDir();
+ cmd = cmd + "objdump " + args + " " + file.GetFilename() + " 2>&1";
FILE* output = popen(cmd.data(), "r");
char buffer[1024];
const char* line;
@@ -155,11 +123,9 @@ class DwarfTest : public CommonRuntimeTest {
std::vector<std::string> Objdump(bool is64bit, const char* args) {
if (is64bit) {
- return Objdump<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
- Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr>(is64bit, args);
+ return Objdump<ElfTypes64>(is64bit, args);
} else {
- return Objdump<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>(is64bit, args);
+ return Objdump<ElfTypes32>(is64bit, args);
}
}
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index 760f53c6e4..9f64766e18 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -22,6 +22,7 @@
#include "dwarf/debug_frame_opcode_writer.h"
#include "dwarf/debug_info_entry_writer.h"
#include "dwarf/debug_line_opcode_writer.h"
+#include "dwarf/dwarf_constants.h"
#include "dwarf/register.h"
#include "dwarf/writer.h"
@@ -36,7 +37,9 @@ namespace dwarf {
// Write common information entry (CIE) to .eh_frame section.
template<typename Allocator>
-void WriteEhFrameCIE(bool is64bit, Reg return_address_register,
+void WriteEhFrameCIE(bool is64bit,
+ ExceptionHeaderValueApplication address_type,
+ Reg return_address_register,
const DebugFrameOpCodeWriter<Allocator>& opcodes,
std::vector<uint8_t>* eh_frame) {
Writer<> writer(eh_frame);
@@ -50,9 +53,9 @@ void WriteEhFrameCIE(bool is64bit, Reg return_address_register,
writer.PushUleb128(return_address_register.num()); // ubyte in DWARF2.
writer.PushUleb128(1); // z: Augmentation data size.
if (is64bit) {
- writer.PushUint8(0x04); // R: ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata8).
+ writer.PushUint8(address_type | DW_EH_PE_udata8); // R: Pointer encoding.
} else {
- writer.PushUint8(0x03); // R: ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
+ writer.PushUint8(address_type | DW_EH_PE_udata4); // R: Pointer encoding.
}
writer.PushData(opcodes.data());
writer.Pad(is64bit ? 8 : 4);
diff --git a/compiler/dwarf/register.h b/compiler/dwarf/register.h
index fa666dffa9..70452377dd 100644
--- a/compiler/dwarf/register.h
+++ b/compiler/dwarf/register.h
@@ -33,6 +33,7 @@ class Reg {
// There are ways around this in DWARF but they are complex.
// It would be much simpler to always spill whole D registers.
// Arm64 mapping is correct since we already do this there.
+ // libunwind might struggle with the new mapping as well.
static Reg ArmCore(int num) { return Reg(num); }
static Reg ArmFp(int num) { return Reg(64 + num); } // S0–S31.
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 124ed03c21..32c8cce031 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -26,11 +26,14 @@
namespace art {
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
+template <typename ElfTypes>
class ElfSectionBuilder : public ValueObject {
public:
+ using Elf_Word = typename ElfTypes::Word;
+ using Elf_Shdr = typename ElfTypes::Shdr;
+
ElfSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *link, Elf_Word info,
+ const ElfSectionBuilder<ElfTypes> *link, Elf_Word info,
Elf_Word align, Elf_Word entsize)
: section_index_(0), name_(sec_name), link_(link) {
memset(&section_, 0, sizeof(section_));
@@ -75,9 +78,14 @@ class ElfSectionBuilder : public ValueObject {
const ElfSectionBuilder* const link_;
};
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Dyn, typename Elf_Shdr>
-class ElfDynamicBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+template <typename ElfTypes>
+class ElfDynamicBuilder FINAL : public ElfSectionBuilder<ElfTypes> {
public:
+ using Elf_Word = typename ElfTypes::Word;
+ using Elf_Sword = typename ElfTypes::Sword;
+ using Elf_Shdr = typename ElfTypes::Shdr;
+ using Elf_Dyn = typename ElfTypes::Dyn;
+
void AddDynamicTag(Elf_Sword tag, Elf_Word d_un) {
if (tag == DT_NULL) {
return;
@@ -86,7 +94,7 @@ class ElfDynamicBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, El
}
void AddDynamicTag(Elf_Sword tag, Elf_Word d_un,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section) {
+ const ElfSectionBuilder<ElfTypes>* section) {
if (tag == DT_NULL) {
return;
}
@@ -94,9 +102,9 @@ class ElfDynamicBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, El
}
ElfDynamicBuilder(const std::string& sec_name,
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *link)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, SHT_DYNAMIC, SHF_ALLOC | SHF_ALLOC,
- link, 0, kPageSize, sizeof(Elf_Dyn)) {}
+ ElfSectionBuilder<ElfTypes> *link)
+ : ElfSectionBuilder<ElfTypes>(sec_name, SHT_DYNAMIC, SHF_ALLOC | SHF_ALLOC,
+ link, 0, kPageSize, sizeof(Elf_Dyn)) {}
~ElfDynamicBuilder() {}
Elf_Word GetSize() const {
@@ -129,21 +137,22 @@ class ElfDynamicBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, El
private:
struct ElfDynamicState {
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
+ const ElfSectionBuilder<ElfTypes>* section_;
Elf_Sword tag_;
Elf_Word off_;
};
std::vector<ElfDynamicState> dynamics_;
};
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfRawSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+template <typename ElfTypes>
+class ElfRawSectionBuilder FINAL : public ElfSectionBuilder<ElfTypes> {
public:
+ using Elf_Word = typename ElfTypes::Word;
+
ElfRawSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* link, Elf_Word info,
+ const ElfSectionBuilder<ElfTypes>* link, Elf_Word info,
Elf_Word align, Elf_Word entsize)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, link, info, align,
- entsize) {
+ : ElfSectionBuilder<ElfTypes>(sec_name, type, flags, link, info, align, entsize) {
}
ElfRawSectionBuilder(const ElfRawSectionBuilder&) = default;
@@ -161,13 +170,14 @@ class ElfRawSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword,
std::vector<uint8_t> buf_;
};
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfOatSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+template <typename ElfTypes>
+class ElfOatSectionBuilder FINAL : public ElfSectionBuilder<ElfTypes> {
public:
+ using Elf_Word = typename ElfTypes::Word;
+
ElfOatSectionBuilder(const std::string& sec_name, Elf_Word size, Elf_Word offset,
Elf_Word type, Elf_Word flags)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, nullptr, 0, kPageSize,
- 0),
+ : ElfSectionBuilder<ElfTypes>(sec_name, type, flags, nullptr, 0, kPageSize, 0),
offset_(offset), size_(size) {
}
@@ -206,14 +216,17 @@ static inline unsigned elfhash(const char *_name) {
return h;
}
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Sym,
- typename Elf_Shdr>
-class ElfSymtabBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+template <typename ElfTypes>
+class ElfSymtabBuilder FINAL : public ElfSectionBuilder<ElfTypes> {
public:
+ using Elf_Addr = typename ElfTypes::Addr;
+ using Elf_Word = typename ElfTypes::Word;
+ using Elf_Sym = typename ElfTypes::Sym;
+
// Add a symbol with given name to this symtab. The symbol refers to
// 'relative_addr' within the given section and has the given attributes.
void AddSymbol(const std::string& name,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section,
+ const ElfSectionBuilder<ElfTypes>* section,
Elf_Addr addr,
bool is_relative,
Elf_Word size,
@@ -228,14 +241,14 @@ class ElfSymtabBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf
ElfSymtabBuilder(const std::string& sec_name, Elf_Word type,
const std::string& str_name, Elf_Word str_type, bool alloc)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, ((alloc) ? SHF_ALLOC : 0U),
- &strtab_, 0, sizeof(Elf_Word),
- sizeof(Elf_Sym)), str_name_(str_name),
- str_type_(str_type),
- strtab_(str_name,
- str_type,
- ((alloc) ? SHF_ALLOC : 0U),
- nullptr, 0, 1, 1) {
+ : ElfSectionBuilder<ElfTypes>(sec_name, type, ((alloc) ? SHF_ALLOC : 0U),
+ &strtab_, 0, sizeof(Elf_Word),
+ sizeof(Elf_Sym)), str_name_(str_name),
+ str_type_(str_type),
+ strtab_(str_name,
+ str_type,
+ ((alloc) ? SHF_ALLOC : 0U),
+ nullptr, 0, 1, 1) {
}
~ElfSymtabBuilder() {}
@@ -361,18 +374,18 @@ class ElfSymtabBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf
}
Elf_Word GetSize() const {
- // 1 is for the implicit NULL symbol.
+ // 1 is for the implicit null symbol.
return symbols_.size() + 1;
}
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* GetStrTab() {
+ ElfSectionBuilder<ElfTypes>* GetStrTab() {
return &strtab_;
}
private:
struct ElfSymbolState {
const std::string name_;
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
+ const ElfSectionBuilder<ElfTypes>* section_;
Elf_Addr addr_;
Elf_Word size_;
bool is_relative_;
@@ -387,7 +400,7 @@ class ElfSymtabBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf
Elf_Word str_type_;
// The symbols in the same order they will be in the symbol table.
std::vector<ElfSymbolState> symbols_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> strtab_;
+ ElfSectionBuilder<ElfTypes> strtab_;
};
template <typename Elf_Word>
@@ -529,10 +542,18 @@ static inline constexpr Elf_Word NextOffset(const Elf_Shdr& cur, const Elf_Shdr&
return RoundUp(prev.sh_size + prev.sh_offset, cur.sh_addralign);
}
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Dyn,
- typename Elf_Sym, typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr>
+template <typename ElfTypes>
class ElfBuilder FINAL {
public:
+ using Elf_Addr = typename ElfTypes::Addr;
+ using Elf_Word = typename ElfTypes::Word;
+ using Elf_Sword = typename ElfTypes::Sword;
+ using Elf_Ehdr = typename ElfTypes::Ehdr;
+ using Elf_Shdr = typename ElfTypes::Shdr;
+ using Elf_Sym = typename ElfTypes::Sym;
+ using Elf_Phdr = typename ElfTypes::Phdr;
+ using Elf_Dyn = typename ElfTypes::Dyn;
+
ElfBuilder(CodeOutput* oat_writer,
File* elf_file,
InstructionSet isa,
@@ -557,7 +578,7 @@ class ElfBuilder FINAL {
hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
sizeof(Elf_Word)),
dynamic_builder_(".dynamic", &dynsym_builder_),
- shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, NULL, 0, 1, 1) {
+ shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, nullptr, 0, 1, 1) {
SetupEhdr();
SetupDynamic();
SetupRequiredSymbols();
@@ -565,11 +586,11 @@ class ElfBuilder FINAL {
}
~ElfBuilder() {}
- const ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>& GetTextBuilder() const {
+ const ElfOatSectionBuilder<ElfTypes>& GetTextBuilder() const {
return text_builder_;
}
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* GetSymtabBuilder() {
+ ElfSymtabBuilder<ElfTypes>* GetSymtabBuilder() {
return &symtab_builder_;
}
@@ -584,11 +605,12 @@ class ElfBuilder FINAL {
// | Elf_Ehdr |
// +-------------------------+
// | Elf_Phdr PHDR |
- // | Elf_Phdr LOAD R | .dynsym .dynstr .hash .rodata
+ // | Elf_Phdr LOAD R | .dynsym .dynstr .hash .eh_frame .eh_frame_hdr .rodata
// | Elf_Phdr LOAD R X | .text
// | Elf_Phdr LOAD RW | .bss (Optional)
// | Elf_Phdr LOAD RW | .dynamic
// | Elf_Phdr DYNAMIC | .dynamic
+ // | Elf_Phdr EH_FRAME R | .eh_frame_hdr
// +-------------------------+
// | .dynsym |
// | Elf_Sym STN_UNDEF |
@@ -615,6 +637,10 @@ class ElfBuilder FINAL {
// | ... |
// | Elf_Word chain[c - 1] |
// +-------------------------+
+ // | .eh_frame | (Optional)
+ // +-------------------------+
+ // | .eh_frame_hdr | (Optional)
+ // +-------------------------+
// | .rodata |
// | oatdata..oatexec-4 |
// +-------------------------+
@@ -648,23 +674,22 @@ class ElfBuilder FINAL {
// | .shstrtab\0 |
// | .symtab\0 | (Optional)
// | .strtab\0 | (Optional)
- // | .debug_str\0 | (Optional)
- // | .debug_info\0 | (Optional)
// | .eh_frame\0 | (Optional)
- // | .debug_line\0 | (Optional)
+ // | .eh_frame_hdr\0 | (Optional)
+ // | .debug_info\0 | (Optional)
// | .debug_abbrev\0 | (Optional)
+ // | .debug_str\0 | (Optional)
+ // | .debug_line\0 | (Optional)
// +-------------------------+ (Optional)
// | .debug_info | (Optional)
// +-------------------------+ (Optional)
// | .debug_abbrev | (Optional)
// +-------------------------+ (Optional)
- // | .eh_frame | (Optional)
+ // | .debug_str | (Optional)
// +-------------------------+ (Optional)
// | .debug_line | (Optional)
// +-------------------------+ (Optional)
- // | .debug_str | (Optional)
- // +-------------------------+ (Optional)
- // | Elf_Shdr NULL |
+ // | Elf_Shdr null |
// | Elf_Shdr .dynsym |
// | Elf_Shdr .dynstr |
// | Elf_Shdr .hash |
@@ -673,11 +698,12 @@ class ElfBuilder FINAL {
// | Elf_Shdr .bss | (Optional)
// | Elf_Shdr .dynamic |
// | Elf_Shdr .shstrtab |
+ // | Elf_Shdr .eh_frame | (Optional)
+ // | Elf_Shdr .eh_frame_hdr | (Optional)
// | Elf_Shdr .debug_info | (Optional)
// | Elf_Shdr .debug_abbrev | (Optional)
- // | Elf_Shdr .eh_frame | (Optional)
- // | Elf_Shdr .debug_line | (Optional)
// | Elf_Shdr .debug_str | (Optional)
+ // | Elf_Shdr .debug_line | (Optional)
// +-------------------------+
if (fatal_error_) {
@@ -718,6 +744,9 @@ class ElfBuilder FINAL {
program_headers_[PH_DYNAMIC].p_type = PT_DYNAMIC;
program_headers_[PH_DYNAMIC].p_flags = PF_R | PF_W;
+ program_headers_[PH_EH_FRAME_HDR].p_type = PT_NULL;
+ program_headers_[PH_EH_FRAME_HDR].p_flags = PF_R;
+
// Get the dynstr string.
dynstr_ = dynsym_builder_.GenerateStrtab();
@@ -828,10 +857,37 @@ class ElfBuilder FINAL {
hash_builder_.GetSection()->sh_size = hash_.size() * sizeof(Elf_Word);
hash_builder_.GetSection()->sh_link = hash_builder_.GetLink();
+ // Get the layout of the extra sections with SHF_ALLOC flag.
+ // This will deal with .eh_frame and .eh_frame_hdr.
+ // .eh_frame contains relative pointers to .text which we
+ // want to fixup between the calls to Init() and Write().
+ // Therefore we handle those sections here as opposed to Write().
+ // It also has the nice side effect of including .eh_frame
+ // with the rest of LOAD_R segment. It must come before .rodata
+ // because .rodata and .text must be next to each other.
+ Elf_Shdr* prev = hash_builder_.GetSection();
+ for (auto* it : other_builders_) {
+ if ((it->GetSection()->sh_flags & SHF_ALLOC) != 0) {
+ it->GetSection()->sh_offset = NextOffset<Elf_Word, Elf_Shdr>(*it->GetSection(), *prev);
+ it->GetSection()->sh_addr = it->GetSection()->sh_offset;
+ it->GetSection()->sh_size = it->GetBuffer()->size();
+ it->GetSection()->sh_link = it->GetLink();
+ prev = it->GetSection();
+ }
+ }
+ // If the sections exist, check that they have been handled.
+ const auto* eh_frame = FindRawSection(".eh_frame");
+ if (eh_frame != nullptr) {
+ DCHECK_NE(eh_frame->GetSection()->sh_offset, 0u);
+ }
+ const auto* eh_frame_hdr = FindRawSection(".eh_frame_hdr");
+ if (eh_frame_hdr != nullptr) {
+ DCHECK_NE(eh_frame_hdr->GetSection()->sh_offset, 0u);
+ }
+
// Get the layout of the rodata section.
rodata_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*rodata_builder_.GetSection(),
- *hash_builder_.GetSection());
+ NextOffset<Elf_Word, Elf_Shdr>(*rodata_builder_.GetSection(), *prev);
rodata_builder_.GetSection()->sh_addr = rodata_builder_.GetSection()->sh_offset;
rodata_builder_.GetSection()->sh_size = rodata_builder_.GetSize();
rodata_builder_.GetSection()->sh_link = rodata_builder_.GetLink();
@@ -909,9 +965,7 @@ class ElfBuilder FINAL {
}
// Setup all the other sections.
- for (ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *builder = other_builders_.data(),
- *end = builder + other_builders_.size();
- builder != end; ++builder) {
+ for (auto* builder : other_builders_) {
section_ptrs_.push_back(builder->GetSection());
AssignSectionStr(builder, &shstrtab_);
builder->SetSectionIndex(section_index_);
@@ -958,20 +1012,22 @@ class ElfBuilder FINAL {
}
}
- // Get the layout of the extra sections. (This will deal with the debug
- // sections if they are there)
- for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
- it->GetSection()->sh_offset = NextOffset<Elf_Word, Elf_Shdr>(*it->GetSection(), *prev);
- it->GetSection()->sh_addr = 0;
- it->GetSection()->sh_size = it->GetBuffer()->size();
- it->GetSection()->sh_link = it->GetLink();
-
- // We postpone adding an ElfFilePiece to keep the order in "pieces."
-
- prev = it->GetSection();
- if (debug_logging_) {
- LOG(INFO) << it->GetName() << " off=" << it->GetSection()->sh_offset
- << " size=" << it->GetSection()->sh_size;
+ // Get the layout of the extra sections without SHF_ALLOC flag.
+ // (This will deal with the debug sections if they are there)
+ for (auto* it : other_builders_) {
+ if ((it->GetSection()->sh_flags & SHF_ALLOC) == 0) {
+ it->GetSection()->sh_offset = NextOffset<Elf_Word, Elf_Shdr>(*it->GetSection(), *prev);
+ it->GetSection()->sh_addr = 0;
+ it->GetSection()->sh_size = it->GetBuffer()->size();
+ it->GetSection()->sh_link = it->GetLink();
+
+ // We postpone adding an ElfFilePiece to keep the order in "pieces."
+
+ prev = it->GetSection();
+ if (debug_logging_) {
+ LOG(INFO) << it->GetName() << " off=" << it->GetSection()->sh_offset
+ << " size=" << it->GetSection()->sh_size;
+ }
}
}
@@ -1044,6 +1100,26 @@ class ElfBuilder FINAL {
program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.GetSection()->sh_size;
program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.GetSection()->sh_addralign;
+ const auto* eh_frame_hdr = FindRawSection(".eh_frame_hdr");
+ if (eh_frame_hdr != nullptr) {
+ const auto* eh_frame = FindRawSection(".eh_frame");
+ // Check layout:
+ // 1) eh_frame is before eh_frame_hdr.
+ // 2) There's no gap.
+ CHECK(eh_frame != nullptr);
+ CHECK_LE(eh_frame->GetSection()->sh_offset, eh_frame_hdr->GetSection()->sh_offset);
+ CHECK_EQ(eh_frame->GetSection()->sh_offset + eh_frame->GetSection()->sh_size,
+ eh_frame_hdr->GetSection()->sh_offset);
+
+ program_headers_[PH_EH_FRAME_HDR].p_type = PT_GNU_EH_FRAME;
+ program_headers_[PH_EH_FRAME_HDR].p_offset = eh_frame_hdr->GetSection()->sh_offset;
+ program_headers_[PH_EH_FRAME_HDR].p_vaddr = eh_frame_hdr->GetSection()->sh_addr;
+ program_headers_[PH_EH_FRAME_HDR].p_paddr = eh_frame_hdr->GetSection()->sh_addr;
+ program_headers_[PH_EH_FRAME_HDR].p_filesz = eh_frame_hdr->GetSection()->sh_size;
+ program_headers_[PH_EH_FRAME_HDR].p_memsz = eh_frame_hdr->GetSection()->sh_size;
+ program_headers_[PH_EH_FRAME_HDR].p_align = eh_frame_hdr->GetSection()->sh_addralign;
+ }
+
// Finish setup of the Ehdr values.
elf_header_.e_phoff = PHDR_OFFSET;
elf_header_.e_shoff = sections_offset;
@@ -1108,7 +1184,7 @@ class ElfBuilder FINAL {
}
// Postponed debug info.
- for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
+ for (auto* it : other_builders_) {
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(it->GetName(), it->GetSection()->sh_offset,
it->GetBuffer()->data(),
it->GetBuffer()->size()));
@@ -1125,12 +1201,20 @@ class ElfBuilder FINAL {
return true;
}
- // Adds the given raw section to the builder. This will copy it. The caller
- // is responsible for deallocating their copy.
- void RegisterRawSection(ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> bld) {
+ // Adds the given raw section to the builder. It does not take ownership.
+ void RegisterRawSection(ElfRawSectionBuilder<ElfTypes>* bld) {
other_builders_.push_back(bld);
}
+ const ElfRawSectionBuilder<ElfTypes>* FindRawSection(const char* name) {
+ for (const auto* other_builder : other_builders_) {
+ if (other_builder->GetName() == name) {
+ return other_builder;
+ }
+ }
+ return nullptr;
+ }
+
private:
void SetISA(InstructionSet isa) {
switch (isa) {
@@ -1240,8 +1324,7 @@ class ElfBuilder FINAL {
}
}
- void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* builder,
- std::string* strtab) {
+ void AssignSectionStr(ElfSectionBuilder<ElfTypes>* builder, std::string* strtab) {
builder->GetSection()->sh_name = strtab->size();
*strtab += builder->GetName();
*strtab += '\0';
@@ -1282,7 +1365,8 @@ class ElfBuilder FINAL {
PH_LOAD_RW_BSS = 3,
PH_LOAD_RW_DYNAMIC = 4,
PH_DYNAMIC = 5,
- PH_NUM = 6,
+ PH_EH_FRAME_HDR = 6,
+ PH_NUM = 7,
};
static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
Elf_Phdr program_headers_[PH_NUM];
@@ -1298,15 +1382,15 @@ class ElfBuilder FINAL {
std::vector<const Elf_Shdr*> section_ptrs_;
std::vector<Elf_Word> hash_;
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> bss_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
- ElfDynamicBuilder<Elf_Word, Elf_Sword, Elf_Dyn, Elf_Shdr> dynamic_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> shstrtab_builder_;
- std::vector<ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>> other_builders_;
+ ElfOatSectionBuilder<ElfTypes> text_builder_;
+ ElfOatSectionBuilder<ElfTypes> rodata_builder_;
+ ElfOatSectionBuilder<ElfTypes> bss_builder_;
+ ElfSymtabBuilder<ElfTypes> dynsym_builder_;
+ ElfSymtabBuilder<ElfTypes> symtab_builder_;
+ ElfSectionBuilder<ElfTypes> hash_builder_;
+ ElfDynamicBuilder<ElfTypes> dynamic_builder_;
+ ElfSectionBuilder<ElfTypes> shstrtab_builder_;
+ std::vector<ElfRawSectionBuilder<ElfTypes>*> other_builders_;
DISALLOW_COPY_AND_ASSIGN(ElfBuilder);
};
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 39233ce94a..28e6999472 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -28,7 +28,9 @@
namespace art {
namespace dwarf {
-static void WriteEhFrameCIE(InstructionSet isa, std::vector<uint8_t>* eh_frame) {
+static void WriteEhFrameCIE(InstructionSet isa,
+ ExceptionHeaderValueApplication addr_type,
+ std::vector<uint8_t>* eh_frame) {
// Scratch registers should be marked as undefined. This tells the
// debugger that its value in the previous frame is not recoverable.
bool is64bit = Is64BitInstructionSet(isa);
@@ -53,8 +55,8 @@ static void WriteEhFrameCIE(InstructionSet isa, std::vector<uint8_t>* eh_frame)
opcodes.SameValue(Reg::ArmFp(reg));
}
}
- auto return_address_reg = Reg::ArmCore(14); // R14(LR).
- WriteEhFrameCIE(is64bit, return_address_reg, opcodes, eh_frame);
+ auto return_reg = Reg::ArmCore(14); // R14(LR).
+ WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
return;
}
case kArm64: {
@@ -76,8 +78,8 @@ static void WriteEhFrameCIE(InstructionSet isa, std::vector<uint8_t>* eh_frame)
opcodes.SameValue(Reg::Arm64Fp(reg));
}
}
- auto return_address_reg = Reg::Arm64Core(30); // R30(LR).
- WriteEhFrameCIE(is64bit, return_address_reg, opcodes, eh_frame);
+ auto return_reg = Reg::Arm64Core(30); // R30(LR).
+ WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
return;
}
case kMips:
@@ -92,11 +94,13 @@ static void WriteEhFrameCIE(InstructionSet isa, std::vector<uint8_t>* eh_frame)
opcodes.SameValue(Reg::MipsCore(reg));
}
}
- auto return_address_reg = Reg::MipsCore(31); // R31(RA).
- WriteEhFrameCIE(is64bit, return_address_reg, opcodes, eh_frame);
+ auto return_reg = Reg::MipsCore(31); // R31(RA).
+ WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
return;
}
case kX86: {
+ // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
+ constexpr bool generate_opcodes_for_x86_fp = false;
DebugFrameOpCodeWriter<> opcodes;
opcodes.DefCFA(Reg::X86Core(4), 4); // R4(ESP).
opcodes.Offset(Reg::X86Core(8), -4); // R8(EIP).
@@ -111,11 +115,13 @@ static void WriteEhFrameCIE(InstructionSet isa, std::vector<uint8_t>* eh_frame)
}
}
// fp registers.
- for (int reg = 0; reg < 8; reg++) {
- opcodes.Undefined(Reg::X86Fp(reg));
+ if (generate_opcodes_for_x86_fp) {
+ for (int reg = 0; reg < 8; reg++) {
+ opcodes.Undefined(Reg::X86Fp(reg));
+ }
}
- auto return_address_reg = Reg::X86Core(8); // R8(EIP).
- WriteEhFrameCIE(is64bit, return_address_reg, opcodes, eh_frame);
+ auto return_reg = Reg::X86Core(8); // R8(EIP).
+ WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
return;
}
case kX86_64: {
@@ -140,8 +146,8 @@ static void WriteEhFrameCIE(InstructionSet isa, std::vector<uint8_t>* eh_frame)
opcodes.SameValue(Reg::X86_64Fp(reg));
}
}
- auto return_address_reg = Reg::X86_64Core(16); // R16(RIP).
- WriteEhFrameCIE(is64bit, return_address_reg, opcodes, eh_frame);
+ auto return_reg = Reg::X86_64Core(16); // R16(RIP).
+ WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
return;
}
case kNone:
@@ -152,22 +158,37 @@ static void WriteEhFrameCIE(InstructionSet isa, std::vector<uint8_t>* eh_frame)
}
void WriteEhFrame(const CompilerDriver* compiler,
- OatWriter* oat_writer,
- uint32_t text_section_offset,
- std::vector<uint8_t>* eh_frame) {
+ const OatWriter* oat_writer,
+ ExceptionHeaderValueApplication address_type,
+ std::vector<uint8_t>* eh_frame,
+ std::vector<uintptr_t>* eh_frame_patches,
+ std::vector<uint8_t>* eh_frame_hdr) {
const auto& method_infos = oat_writer->GetMethodDebugInfo();
const InstructionSet isa = compiler->GetInstructionSet();
+
+ // Write .eh_frame section.
size_t cie_offset = eh_frame->size();
- auto* eh_frame_patches = oat_writer->GetAbsolutePatchLocationsFor(".eh_frame");
- WriteEhFrameCIE(isa, eh_frame);
+ WriteEhFrameCIE(isa, address_type, eh_frame);
for (const OatWriter::DebugInfo& mi : method_infos) {
const SwapVector<uint8_t>* opcodes = mi.compiled_method_->GetCFIInfo();
if (opcodes != nullptr) {
WriteEhFrameFDE(Is64BitInstructionSet(isa), cie_offset,
- text_section_offset + mi.low_pc_, mi.high_pc_ - mi.low_pc_,
+ mi.low_pc_, mi.high_pc_ - mi.low_pc_,
opcodes, eh_frame, eh_frame_patches);
}
}
+
+ // Write .eh_frame_hdr section.
+ Writer<> header(eh_frame_hdr);
+ header.PushUint8(1); // Version.
+ header.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata4); // Encoding of .eh_frame pointer.
+ header.PushUint8(DW_EH_PE_omit); // Encoding of binary search table size.
+ header.PushUint8(DW_EH_PE_omit); // Encoding of binary search table addresses.
+ // .eh_frame pointer - .eh_frame_hdr section is after .eh_frame section, and need to encode
+ // relative to this location as libunwind doesn't honor datarel for eh_frame_hdr correctly.
+ header.PushInt32(-static_cast<int32_t>(eh_frame->size() + 4U));
+ // Omit binary search table size (number of entries).
+ // Omit binary search table.
}
/*
@@ -175,17 +196,20 @@ void WriteEhFrame(const CompilerDriver* compiler,
* @param oat_writer The Oat file Writer.
* @param eh_frame Call Frame Information.
* @param debug_info Compilation unit information.
+ * @param debug_info_patches Address locations to be patched.
* @param debug_abbrev Abbreviations used to generate dbg_info.
* @param debug_str Debug strings.
* @param debug_line Line number table.
+ * @param debug_line_patches Address locations to be patched.
*/
void WriteDebugSections(const CompilerDriver* compiler,
- OatWriter* oat_writer,
- uint32_t text_section_offset,
+ const OatWriter* oat_writer,
std::vector<uint8_t>* debug_info,
+ std::vector<uintptr_t>* debug_info_patches,
std::vector<uint8_t>* debug_abbrev,
std::vector<uint8_t>* debug_str,
- std::vector<uint8_t>* debug_line) {
+ std::vector<uint8_t>* debug_line,
+ std::vector<uintptr_t>* debug_line_patches) {
const std::vector<OatWriter::DebugInfo>& method_infos = oat_writer->GetMethodDebugInfo();
const InstructionSet isa = compiler->GetInstructionSet();
@@ -229,8 +253,8 @@ void WriteDebugSections(const CompilerDriver* compiler,
info.StartTag(DW_TAG_compile_unit, DW_CHILDREN_yes);
info.WriteStrp(DW_AT_producer, "Android dex2oat", debug_str);
info.WriteData1(DW_AT_language, DW_LANG_Java);
- info.WriteAddr(DW_AT_low_pc, cunit_low_pc + text_section_offset);
- info.WriteAddr(DW_AT_high_pc, cunit_high_pc + text_section_offset);
+ info.WriteAddr(DW_AT_low_pc, cunit_low_pc);
+ info.WriteAddr(DW_AT_high_pc, cunit_high_pc);
info.WriteData4(DW_AT_stmt_list, debug_line->size());
for (auto method_info : compilation_unit) {
std::string method_name = PrettyMethod(method_info->dex_method_index_,
@@ -240,12 +264,11 @@ void WriteDebugSections(const CompilerDriver* compiler,
}
info.StartTag(DW_TAG_subprogram, DW_CHILDREN_no);
info.WriteStrp(DW_AT_name, method_name.data(), debug_str);
- info.WriteAddr(DW_AT_low_pc, method_info->low_pc_ + text_section_offset);
- info.WriteAddr(DW_AT_high_pc, method_info->high_pc_ + text_section_offset);
+ info.WriteAddr(DW_AT_low_pc, method_info->low_pc_);
+ info.WriteAddr(DW_AT_high_pc, method_info->high_pc_);
info.EndTag(); // DW_TAG_subprogram
}
info.EndTag(); // DW_TAG_compile_unit
- auto* debug_info_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_info");
WriteDebugInfoCU(debug_abbrev_offset, info, debug_info, debug_info_patches);
// Write .debug_line section.
@@ -272,7 +295,7 @@ void WriteDebugSections(const CompilerDriver* compiler,
break;
}
DebugLineOpCodeWriter<> opcodes(false /* 32bit */, code_factor_bits_);
- opcodes.SetAddress(text_section_offset + cunit_low_pc);
+ opcodes.SetAddress(cunit_low_pc);
if (dwarf_isa != -1) {
opcodes.SetISA(dwarf_isa);
}
@@ -343,7 +366,6 @@ void WriteDebugSections(const CompilerDriver* compiler,
// Generate mapping opcodes from PC to Java lines.
const DefaultSrcMap& dex2line_map = debug_info_callbacks.dex2line_;
- uint32_t low_pc = text_section_offset + mi->low_pc_;
if (file_index != 0 && !dex2line_map.empty()) {
bool first = true;
for (SrcMapElem pc2dex : mi->compiled_method_->GetSrcMappingTable()) {
@@ -359,24 +381,23 @@ void WriteDebugSections(const CompilerDriver* compiler,
int first_line = dex2line_map.front().to_;
// Prologue is not a sensible place for a breakpoint.
opcodes.NegateStmt();
- opcodes.AddRow(low_pc, first_line);
+ opcodes.AddRow(mi->low_pc_, first_line);
opcodes.NegateStmt();
opcodes.SetPrologueEnd();
}
- opcodes.AddRow(low_pc + pc, line);
+ opcodes.AddRow(mi->low_pc_ + pc, line);
} else if (line != opcodes.CurrentLine()) {
- opcodes.AddRow(low_pc + pc, line);
+ opcodes.AddRow(mi->low_pc_ + pc, line);
}
}
}
} else {
// line 0 - instruction cannot be attributed to any source line.
- opcodes.AddRow(low_pc, 0);
+ opcodes.AddRow(mi->low_pc_, 0);
}
}
- opcodes.AdvancePC(text_section_offset + cunit_high_pc);
+ opcodes.AdvancePC(cunit_high_pc);
opcodes.EndSequence();
- auto* debug_line_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_line");
WriteDebugLineTable(directories, files, opcodes, debug_line, debug_line_patches);
}
}
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 2c03b98777..5bf484185a 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -19,23 +19,27 @@
#include <vector>
+#include "dwarf/dwarf_constants.h"
#include "oat_writer.h"
namespace art {
namespace dwarf {
void WriteEhFrame(const CompilerDriver* compiler,
- OatWriter* oat_writer,
- uint32_t text_section_offset,
- std::vector<uint8_t>* eh_frame);
+ const OatWriter* oat_writer,
+ ExceptionHeaderValueApplication address_type,
+ std::vector<uint8_t>* eh_frame,
+ std::vector<uintptr_t>* eh_frame_patches,
+ std::vector<uint8_t>* eh_frame_hdr);
void WriteDebugSections(const CompilerDriver* compiler,
- OatWriter* oat_writer,
- uint32_t text_section_offset,
+ const OatWriter* oat_writer,
std::vector<uint8_t>* debug_info,
+ std::vector<uintptr_t>* debug_info_patches,
std::vector<uint8_t>* debug_abbrev,
std::vector<uint8_t>* debug_str,
- std::vector<uint8_t>* debug_line);
+ std::vector<uint8_t>* debug_line,
+ std::vector<uintptr_t>* debug_line_patches);
} // namespace dwarf
} // namespace art
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 429cd851e0..3b2ca9451e 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -39,16 +39,13 @@
namespace art {
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Create(File* elf_file,
- OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host,
- const CompilerDriver& driver) {
+template <typename ElfTypes>
+bool ElfWriterQuick<ElfTypes>::Create(File* elf_file,
+ OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host,
+ const CompilerDriver& driver) {
ElfWriterQuick elf_writer(driver, elf_file);
return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
}
@@ -67,21 +64,14 @@ class OatWriterWrapper FINAL : public CodeOutput {
OatWriter* const oat_writer_;
};
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-static void WriteDebugSymbols(const CompilerDriver* compiler_driver,
- ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
- OatWriter* oat_writer);
+template <typename ElfTypes>
+static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder, OatWriter* oat_writer);
// Encode patch locations in .oat_patches format.
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn, Elf_Sym, Elf_Ehdr,
- Elf_Phdr, Elf_Shdr>::EncodeOatPatches(const OatWriter::PatchLocationsMap& sections,
- std::vector<uint8_t>* buffer) {
+template <typename ElfTypes>
+void ElfWriterQuick<ElfTypes>::EncodeOatPatches(
+ const OatWriter::PatchLocationsMap& sections,
+ std::vector<uint8_t>* buffer) {
for (const auto& section : sections) {
const std::string& name = section.first;
std::vector<uintptr_t>* locations = section.second.get();
@@ -109,79 +99,126 @@ void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn, Elf_Sym, Elf_Ehdr,
buffer->push_back(0); // End of sections.
}
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
- const std::string& android_root_unused ATTRIBUTE_UNUSED,
- bool is_host_unused ATTRIBUTE_UNUSED) {
+template<typename AddressType, bool SubtractPatchLocation = false>
+static void PatchAddresses(const std::vector<uintptr_t>* patch_locations,
+ AddressType delta, std::vector<uint8_t>* buffer) {
+ // Addresses in .debug_* sections are unaligned.
+ typedef __attribute__((__aligned__(1))) AddressType UnalignedAddressType;
+ if (patch_locations != nullptr) {
+ for (uintptr_t patch_location : *patch_locations) {
+ *reinterpret_cast<UnalignedAddressType*>(buffer->data() + patch_location) +=
+ delta - (SubtractPatchLocation ? patch_location : 0);
+ }
+ }
+}
+
+template <typename ElfTypes>
+bool ElfWriterQuick<ElfTypes>::Write(
+ OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
+ const std::string& android_root_unused ATTRIBUTE_UNUSED,
+ bool is_host_unused ATTRIBUTE_UNUSED) {
constexpr bool debug = false;
const OatHeader& oat_header = oat_writer->GetOatHeader();
- Elf_Word oat_data_size = oat_header.GetExecutableOffset();
+ typename ElfTypes::Word oat_data_size = oat_header.GetExecutableOffset();
uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
uint32_t oat_bss_size = oat_writer->GetBssSize();
OatWriterWrapper wrapper(oat_writer);
- std::unique_ptr<ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr> > builder(
- new ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>(
- &wrapper,
- elf_file_,
- compiler_driver_->GetInstructionSet(),
- 0,
- oat_data_size,
- oat_data_size,
- oat_exec_size,
- RoundUp(oat_data_size + oat_exec_size, kPageSize),
- oat_bss_size,
- compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
- debug));
+ std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(
+ &wrapper,
+ elf_file_,
+ compiler_driver_->GetInstructionSet(),
+ 0,
+ oat_data_size,
+ oat_data_size,
+ oat_exec_size,
+ RoundUp(oat_data_size + oat_exec_size, kPageSize),
+ oat_bss_size,
+ compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
+ debug));
- if (!builder->Init()) {
- return false;
- }
+ InstructionSet isa = compiler_driver_->GetInstructionSet();
+ int alignment = GetInstructionSetPointerSize(isa);
+ typedef ElfRawSectionBuilder<ElfTypes> RawSection;
+ RawSection eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, alignment, 0);
+ RawSection eh_frame_hdr(".eh_frame_hdr", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+ RawSection debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, nullptr, 0, 1, 0);
+ // Do not add to .oat_patches since we will make the addresses relative.
+ std::vector<uintptr_t> eh_frame_patches;
if (compiler_driver_->GetCompilerOptions().GetIncludeCFI() &&
!oat_writer->GetMethodDebugInfo().empty()) {
- ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> eh_frame(
- ".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
dwarf::WriteEhFrame(compiler_driver_, oat_writer,
- builder->GetTextBuilder().GetSection()->sh_addr,
- eh_frame.GetBuffer());
- builder->RegisterRawSection(eh_frame);
+ dwarf::DW_EH_PE_pcrel,
+ eh_frame.GetBuffer(), &eh_frame_patches,
+ eh_frame_hdr.GetBuffer());
+ builder->RegisterRawSection(&eh_frame);
+ builder->RegisterRawSection(&eh_frame_hdr);
}
+ // Must be done after .eh_frame is created since it is used in the Elf layout.
+ if (!builder->Init()) {
+ return false;
+ }
+
+ std::vector<uintptr_t>* debug_info_patches = nullptr;
+ std::vector<uintptr_t>* debug_line_patches = nullptr;
if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols() &&
!oat_writer->GetMethodDebugInfo().empty()) {
- WriteDebugSymbols(compiler_driver_, builder.get(), oat_writer);
+ // Add methods to .symtab.
+ WriteDebugSymbols(builder.get(), oat_writer);
+ // Generate DWARF .debug_* sections.
+ debug_info_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_info");
+ debug_line_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_line");
+ dwarf::WriteDebugSections(compiler_driver_, oat_writer,
+ debug_info.GetBuffer(), debug_info_patches,
+ debug_abbrev.GetBuffer(),
+ debug_str.GetBuffer(),
+ debug_line.GetBuffer(), debug_line_patches);
+ builder->RegisterRawSection(&debug_info);
+ builder->RegisterRawSection(&debug_abbrev);
+ builder->RegisterRawSection(&debug_str);
+ builder->RegisterRawSection(&debug_line);
}
if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation() ||
// ElfWriter::Fixup will be called regardless and it needs to be able
// to patch debug sections so we have to include patches for them.
compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
- ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> oat_patches(
- ".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, 1, 0);
EncodeOatPatches(oat_writer->GetAbsolutePatchLocations(), oat_patches.GetBuffer());
- builder->RegisterRawSection(oat_patches);
+ builder->RegisterRawSection(&oat_patches);
+ }
+
+ // We know where .text and .eh_frame will be located, so patch the addresses.
+ typename ElfTypes::Addr text_addr = builder->GetTextBuilder().GetSection()->sh_addr;
+ // TODO: Simplify once we use Elf64 - we can use ElfTypes::Addr instead of branching.
+ if (Is64BitInstructionSet(compiler_driver_->GetInstructionSet())) {
+ // relative_address = (text_addr + address) - (eh_frame_addr + patch_location);
+ PatchAddresses<uint64_t, true>(&eh_frame_patches,
+ text_addr - eh_frame.GetSection()->sh_addr, eh_frame.GetBuffer());
+ PatchAddresses<uint64_t>(debug_info_patches, text_addr, debug_info.GetBuffer());
+ PatchAddresses<uint64_t>(debug_line_patches, text_addr, debug_line.GetBuffer());
+ } else {
+ // relative_address = (text_addr + address) - (eh_frame_addr + patch_location);
+ PatchAddresses<uint32_t, true>(&eh_frame_patches,
+ text_addr - eh_frame.GetSection()->sh_addr, eh_frame.GetBuffer());
+ PatchAddresses<uint32_t>(debug_info_patches, text_addr, debug_info.GetBuffer());
+ PatchAddresses<uint32_t>(debug_line_patches, text_addr, debug_line.GetBuffer());
}
return builder->Write();
}
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
+template <typename ElfTypes>
// Do not inline to avoid Clang stack frame problems. b/18738594
NO_INLINE
-static void WriteDebugSymbols(const CompilerDriver* compiler_driver,
- ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
- OatWriter* oat_writer) {
+static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder, OatWriter* oat_writer) {
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetMethodDebugInfo();
// Find all addresses (low_pc) which contain deduped methods.
@@ -193,8 +230,7 @@ static void WriteDebugSymbols(const CompilerDriver* compiler_driver,
}
}
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* symtab =
- builder->GetSymtabBuilder();
+ ElfSymtabBuilder<ElfTypes>* symtab = builder->GetSymtabBuilder();
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
std::string name = PrettyMethod(it->dex_method_index_, *it->dex_file_, true);
if (deduped_addresses.find(it->low_pc_) != deduped_addresses.end()) {
@@ -214,31 +250,10 @@ static void WriteDebugSymbols(const CompilerDriver* compiler_driver,
0, STB_LOCAL, STT_NOTYPE);
}
}
-
- typedef ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> Section;
- Section debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- Section debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- Section debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- Section debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
-
- dwarf::WriteDebugSections(compiler_driver,
- oat_writer,
- builder->GetTextBuilder().GetSection()->sh_addr,
- debug_info.GetBuffer(),
- debug_abbrev.GetBuffer(),
- debug_str.GetBuffer(),
- debug_line.GetBuffer());
-
- builder->RegisterRawSection(debug_info);
- builder->RegisterRawSection(debug_abbrev);
- builder->RegisterRawSection(debug_str);
- builder->RegisterRawSection(debug_line);
}
// Explicit instantiations
-template class ElfWriterQuick<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>;
-template class ElfWriterQuick<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
- Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr>;
+template class ElfWriterQuick<ElfTypes32>;
+template class ElfWriterQuick<ElfTypes64>;
} // namespace art
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index 811beb44d2..955b5684e7 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -23,9 +23,7 @@
namespace art {
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
+template <typename ElfTypes>
class ElfWriterQuick FINAL : public ElfWriter {
public:
// Write an ELF file. Returns true on success, false on failure.
@@ -57,10 +55,8 @@ class ElfWriterQuick FINAL : public ElfWriter {
};
// Explicitly instantiated in elf_writer_quick.cc
-typedef ElfWriterQuick<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr> ElfWriterQuick32;
-typedef ElfWriterQuick<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
- Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr> ElfWriterQuick64;
+typedef ElfWriterQuick<ElfTypes32> ElfWriterQuick32;
+typedef ElfWriterQuick<ElfTypes64> ElfWriterQuick64;
} // namespace art
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 3e5ad7b9f8..08523d8587 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -55,12 +55,12 @@ TEST_F(ElfWriterTest, dlsym) {
LOG(INFO) << "elf_filename=" << elf_filename;
UnreserveImageSpace();
- void* dl_oatdata = NULL;
- void* dl_oatexec = NULL;
- void* dl_oatlastword = NULL;
+ void* dl_oatdata = nullptr;
+ void* dl_oatexec = nullptr;
+ void* dl_oatlastword = nullptr;
std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index cfd525cc5c..eaf3489f8f 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -68,7 +68,7 @@ TEST_F(ImageTest, WriteRead) {
// TODO: compile_pic should be a test argument.
{
{
- jobject class_loader = NULL;
+ jobject class_loader = nullptr;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
@@ -92,7 +92,7 @@ TEST_F(ImageTest, WriteRead) {
}
// Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
- ASSERT_TRUE(dup_oat.get() != NULL);
+ ASSERT_TRUE(dup_oat.get() != nullptr);
{
bool success_image =
@@ -107,7 +107,7 @@ TEST_F(ImageTest, WriteRead) {
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
ImageHeader image_header;
ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
@@ -118,13 +118,13 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
ASSERT_FALSE(space->IsImageSpace());
- ASSERT_TRUE(space != NULL);
+ ASSERT_TRUE(space != nullptr);
ASSERT_TRUE(space->IsMallocSpace());
ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
}
- ASSERT_TRUE(compiler_driver_->GetImageClasses() != NULL);
- std::set<std::string> image_classes(*compiler_driver_->GetImageClasses());
+ ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
+ std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
// Need to delete the compiler since it has worker threads which are attached to runtime.
compiler_driver_.reset();
@@ -137,7 +137,7 @@ TEST_F(ImageTest, WriteRead) {
writer.reset(nullptr);
runtime_.reset();
- java_lang_dex_file_ = NULL;
+ java_lang_dex_file_ = nullptr;
MemMap::Init();
std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
@@ -145,7 +145,7 @@ TEST_F(ImageTest, WriteRead) {
RuntimeOptions options;
std::string image("-Ximage:");
image.append(image_location.GetFilename());
- options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
+ options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
// By default the compiler this creates will not include patch information.
options.push_back(std::make_pair("-Xnorelocate", nullptr));
@@ -158,7 +158,7 @@ TEST_F(ImageTest, WriteRead) {
// give it away now and then switch to a more managable ScopedObjectAccess.
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(runtime_.get() != NULL);
+ ASSERT_TRUE(runtime_.get() != nullptr);
class_linker_ = runtime_->GetClassLinker();
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 670c897e2d..fc70d8f998 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -129,7 +129,7 @@ bool ImageWriter::Write(const std::string& image_filename,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
- if (oat_file.get() == NULL) {
+ if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
return false;
}
@@ -180,7 +180,7 @@ bool ImageWriter::Write(const std::string& image_filename,
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- if (image_file.get() == NULL) {
+ if (image_file.get() == nullptr) {
LOG(ERROR) << "Failed to open image file " << image_filename;
return false;
}
@@ -519,7 +519,7 @@ bool ImageWriter::AllocMemory() {
void ImageWriter::ComputeLazyFieldsForImageClasses() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
+ class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
}
bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
@@ -675,7 +675,7 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATT
if (string_id != nullptr) {
// This string occurs in this dex file, assign the dex cache entry.
uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
- if (dex_cache->GetResolvedString(string_idx) == NULL) {
+ if (dex_cache->GetResolvedString(string_idx) == nullptr) {
dex_cache->SetResolvedString(string_idx, string);
}
}
@@ -697,7 +697,7 @@ struct NonImageClasses {
};
void ImageWriter::PruneNonImageClasses() {
- if (compiler_driver_.GetImageClasses() == NULL) {
+ if (compiler_driver_.GetImageClasses() == nullptr) {
return;
}
Runtime* runtime = Runtime::Current();
@@ -712,7 +712,7 @@ void ImageWriter::PruneNonImageClasses() {
// Remove the undesired classes from the class roots.
for (const std::string& it : non_image_classes) {
- bool result = class_linker->RemoveClass(it.c_str(), NULL);
+ bool result = class_linker->RemoveClass(it.c_str(), nullptr);
DCHECK(result);
}
@@ -724,13 +724,13 @@ void ImageWriter::PruneNonImageClasses() {
DexCache* dex_cache = class_linker->GetDexCache(idx);
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
- if (klass != NULL && !IsImageClass(klass)) {
- dex_cache->SetResolvedType(i, NULL);
+ if (klass != nullptr && !IsImageClass(klass)) {
+ dex_cache->SetResolvedType(i, nullptr);
}
}
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
ArtMethod* method = dex_cache->GetResolvedMethod(i);
- if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
+ if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) {
dex_cache->SetResolvedMethod(i, resolution_method);
}
}
@@ -776,15 +776,15 @@ void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
}
void ImageWriter::DumpImageClasses() {
- const std::set<std::string>* image_classes = compiler_driver_.GetImageClasses();
- CHECK(image_classes != NULL);
+ auto image_classes = compiler_driver_.GetImageClasses();
+ CHECK(image_classes != nullptr);
for (const std::string& image_class : *image_classes) {
LOG(INFO) << " " << image_class;
}
}
void ImageWriter::CalculateObjectBinSlots(Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
// we must be an interned string that was forward referenced and already assigned
@@ -856,7 +856,7 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
- CHECK(image_roots->Get(i) != NULL);
+ CHECK(image_roots->Get(i) != nullptr);
}
return image_roots.Get();
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index be2c8c61b0..6a085482ff 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -94,7 +94,7 @@ JitCompiler::JitCompiler() : total_time_(0) {
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
- nullptr, new std::set<std::string>, 1, false, true,
+ nullptr, nullptr, nullptr, 1, false, true,
std::string(), cumulative_logger_.get(), -1, std::string()));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index d25acc74e2..436fc0cfd0 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@ ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
@@ -122,7 +122,7 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch
return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 2402ea50bf..6f2cb25911 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -152,9 +152,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// References need placing in handle scope and the entry value passing
if (ref_param) {
// Compute handle scope entry, note null is placed in the handle scope but its boxed value
- // must be NULL
+ // must be null.
FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- // Check handle scope offset is within frame and doesn't run into the saved segment state
+ // Check handle scope offset is within frame and doesn't run into the saved segment state.
CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
CHECK_NE(handle_scope_offset.Uint32Value(),
main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
@@ -243,9 +243,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// 7. Iterate over arguments placing values from managed calling convention in
// to the convention required for a native call (shuffling). For references
// place an index/pointer to the reference after checking whether it is
- // NULL (which must be encoded as NULL).
+ // null (which must be encoded as null).
// Note: we do this prior to materializing the JNIEnv* and static's jclass to
- // give as many free registers for the shuffle as possible
+ // give as many free registers for the shuffle as possible.
mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
uint32_t args_count = 0;
while (mr_conv->HasNext()) {
@@ -451,7 +451,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
ArrayRef<const LinkerPatch>());
}
-// Copy a single parameter from the managed to the JNI calling convention
+// Copy a single parameter from the managed to the JNI calling convention.
static void CopyParameter(Assembler* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
JniCallingConvention* jni_conv,
@@ -469,7 +469,7 @@ static void CopyParameter(Assembler* jni_asm,
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in handle scope and the entry address passing
+ // References need placing in handle scope and the entry address passing.
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
// Compute handle scope offset. Note null is placed in the handle scope but the jobject
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 70630f366f..1f7500a6ee 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -45,7 +45,7 @@ class RelativePatcherTest : public testing::Test {
inliner_map_(),
driver_(&compiler_options_, &verification_results_, &inliner_map_,
Compiler::kQuick, instruction_set, nullptr,
- false, nullptr, nullptr, 1u,
+ false, nullptr, nullptr, nullptr, 1u,
false, false, "", nullptr, -1, ""),
error_msg_(),
instruction_set_(instruction_set),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 989b04fa36..925b507e09 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -93,8 +93,8 @@ TEST_F(OatTest, WriteRead) {
verification_results_.get(),
method_inliner_map_.get(),
compiler_kind, insn_set,
- insn_features.get(), false, nullptr, nullptr, 2, true,
- true, "", timer_.get(), -1, ""));
+ insn_features.get(), false, nullptr, nullptr, nullptr,
+ 2, true, true, "", timer_.get(), -1, ""));
jobject class_loader = nullptr;
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5abd204f58..d2d38da49f 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -73,7 +73,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
image_file_location_oat_begin_(image_file_location_oat_begin),
image_patch_delta_(image_patch_delta),
key_value_store_(key_value_store),
- oat_header_(NULL),
+ oat_header_(nullptr),
size_dex_file_alignment_(0),
size_executable_offset_alignment_(0),
size_oat_header_(0),
@@ -326,7 +326,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = writer_->compiler_driver_->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -473,7 +473,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -690,7 +690,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -893,7 +893,7 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -940,7 +940,7 @@ bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
}
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data != NULL) { // ie not an empty class, such as a marker interface
+ if (class_data != nullptr) { // ie not an empty class, such as a marker interface
ClassDataItemIterator it(*dex_file, class_data);
while (it.HasNextStaticField()) {
it.Next();
@@ -987,7 +987,7 @@ size_t OatWriter::InitOatDexFiles(size_t offset) {
// create the OatDexFiles
for (size_t i = 0; i != dex_files_->size(); ++i) {
const DexFile* dex_file = (*dex_files_)[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
OatDexFile* oat_dex_file = new OatDexFile(offset, *dex_file);
oat_dex_files_.push_back(oat_dex_file);
offset += oat_dex_file->SizeOf();
@@ -1471,13 +1471,13 @@ OatWriter::OatClass::OatClass(size_t offset,
oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
} else {
- method_bitmap_ = NULL;
+ method_bitmap_ = nullptr;
method_bitmap_size_ = 0;
}
for (size_t i = 0; i < num_methods; i++) {
CompiledMethod* compiled_method = compiled_methods_[i];
- if (compiled_method == NULL) {
+ if (compiled_method == nullptr) {
oat_method_offsets_offsets_from_oat_class_[i] = 0;
} else {
oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index cc2b39a8eb..8c79b44153 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -235,13 +235,13 @@ class OatWriter {
// used to validate file position when writing.
size_t offset_;
- // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+ // CompiledMethods for each class_def_method_index, or null if no method is available.
std::vector<CompiledMethod*> compiled_methods_;
// Offset from OatClass::offset_ to the OatMethodOffsets for the
// class_def_method_index. If 0, it means the corresponding
// CompiledMethod entry in OatClass::compiled_methods_ should be
- // NULL and that the OatClass::type_ should be kOatClassBitmap.
+ // null and that the OatClass::type_ should be kOatClassBitmap.
std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
// data to write
@@ -258,12 +258,12 @@ class OatWriter {
// OatClassType::type_ is kOatClassBitmap, a set bit indicates the
// method has an OatMethodOffsets in methods_offsets_, otherwise
// the entry was ommited to save space. If OatClassType::type_ is
- // not is kOatClassBitmap, the bitmap will be NULL.
+ // not is kOatClassBitmap, the bitmap will be null.
BitVector* method_bitmap_;
// OatMethodOffsets and OatMethodHeaders for each CompiledMethod
// present in the OatClass. Note that some may be missing if
- // OatClass::compiled_methods_ contains NULL values (and
+ // OatClass::compiled_methods_ contains null values (and
// oat_method_offsets_offsets_from_oat_class_ should contain 0
// values in this case).
std::vector<OatMethodOffsets> method_offsets_;
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index 06328f2490..30c89f2d15 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -72,8 +72,8 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
return graph->GetIntConstant(0);
}
} else {
- // General case when 'cond' is another instruction of type boolean.
- DCHECK_EQ(cond->GetType(), Primitive::Type::kPrimBoolean);
+ // General case when 'cond' is another instruction of type boolean,
+ // as verified by SSAChecker.
return new (allocator) HBooleanNot(cond);
}
}
@@ -120,8 +120,11 @@ void HBooleanSimplifier::Run() {
phi->ReplaceWith(replacement);
merge_block->RemovePhi(phi);
- // Link the start/end blocks and remove empty branches.
- graph_->MergeEmptyBranches(block, merge_block);
+ // Delete the true branch and merge the resulting chain of blocks
+ // 'block->false_block->merge_block' into one.
+ true_block->DisconnectAndDelete();
+ block->MergeWith(false_block);
+ block->MergeWith(merge_block);
// Remove the original condition if it is now unused.
if (!if_condition->HasUses()) {
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8a64d81485..818d671b5b 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -520,8 +520,24 @@ void HGraphBuilder::Binop_22b(const Instruction& instruction, bool reverse) {
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
+static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, const CompilerDriver& driver) {
+ // dex compilation unit is null only when unit testing.
+ if (cu == nullptr) {
+ return false;
+ }
+
+ Thread* self = Thread::Current();
+ return cu->IsConstructor()
+ && driver.RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
+}
+
void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type type) {
if (type == Primitive::kPrimVoid) {
+ // Note that we might insert redundant barriers when inlining `super` calls.
+ // TODO: add a data flow analysis to get rid of duplicate barriers.
+ if (RequiresConstructorBarrier(dex_compilation_unit_, *compiler_driver_)) {
+ current_block_->AddInstruction(new (arena_) HMemoryBarrier(kStoreStore));
+ }
current_block_->AddInstruction(new (arena_) HReturnVoid());
} else {
HInstruction* value = LoadLocal(instruction.VRegA(), type);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8ab759d393..5163395cac 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -612,7 +612,7 @@ void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const {
}
void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) {
- uint32_t size = stack_map_stream_.ComputeNeededSize();
+ uint32_t size = stack_map_stream_.PrepareForFillIn();
data->resize(size);
MemoryRegion region(data->data(), size);
stack_map_stream_.FillIn(region);
@@ -654,7 +654,8 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
if (instruction == nullptr) {
// For stack overflow checks.
- stack_map_stream_.AddStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, inlining_depth);
+ stack_map_stream_.BeginStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, inlining_depth);
+ stack_map_stream_.EndStackMapEntry();
return;
}
LocationSummary* locations = instruction->GetLocations();
@@ -672,12 +673,12 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
}
// The register mask must be a subset of callee-save registers.
DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
- stack_map_stream_.AddStackMapEntry(dex_pc,
- pc_info.native_pc,
- register_mask,
- locations->GetStackMask(),
- environment_size,
- inlining_depth);
+ stack_map_stream_.BeginStackMapEntry(dex_pc,
+ pc_info.native_pc,
+ register_mask,
+ locations->GetStackMask(),
+ environment_size,
+ inlining_depth);
// Walk over the environment, and record the location of dex registers.
for (size_t i = 0; i < environment_size; ++i) {
@@ -823,11 +824,14 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
LOG(FATAL) << "Unexpected kind " << location.GetKind();
}
}
+ stack_map_stream_.EndStackMapEntry();
}
bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
- return (first_next_not_move != nullptr) && first_next_not_move->CanDoImplicitNullCheck();
+
+ return (first_next_not_move != nullptr)
+ && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0));
}
void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
@@ -842,7 +846,7 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
return;
}
- if (!instr->CanDoImplicitNullCheck()) {
+ if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
return;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7e03865b5f..ae1fb537bb 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1214,6 +1214,14 @@ void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant)
UNUSED(constant);
}
+void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
@@ -3890,9 +3898,11 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
SlowPathCodeARM* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: avoid this check if we know obj is not null.
- __ cmp(obj, ShifterOperand(0));
- __ b(&zero, EQ);
+ // avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ cmp(obj, ShifterOperand(0));
+ __ b(&zero, EQ);
+ }
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, out, obj, class_offset);
__ cmp(out, ShifterOperand(cls));
@@ -3911,8 +3921,12 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
__ LoadImmediate(out, 1);
__ b(&done);
}
- __ Bind(&zero);
- __ LoadImmediate(out, 0);
+
+ if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ __ Bind(&zero);
+ __ LoadImmediate(out, 0);
+ }
+
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
@@ -3938,9 +3952,11 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ cmp(obj, ShifterOperand(0));
- __ b(slow_path->GetExitLabel(), EQ);
+ // avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ cmp(obj, ShifterOperand(0));
+ __ b(slow_path->GetExitLabel(), EQ);
+ }
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, temp, obj, class_offset);
__ cmp(temp, ShifterOperand(cls));
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 06f425ea21..600903621d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -96,10 +96,10 @@ class InvokeDexCallingConventionVisitor {
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
};
-class ParallelMoveResolverARM : public ParallelMoveResolver {
+class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
- : ParallelMoveResolver(allocator), codegen_(codegen) {}
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
void EmitMove(size_t index) OVERRIDE;
void EmitSwap(size_t index) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4be46126e9..1c6debdded 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -425,30 +425,67 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
CodeGenerator::Finalize(allocator);
}
-void ParallelMoveResolverARM64::EmitMove(size_t index) {
- MoveOperands* move = moves_.Get(index);
- codegen_->MoveLocation(move->GetDestination(), move->GetSource());
-}
-
-void ParallelMoveResolverARM64::EmitSwap(size_t index) {
- MoveOperands* move = moves_.Get(index);
- codegen_->SwapLocations(move->GetDestination(), move->GetSource());
+void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
+ // Note: There are 6 kinds of moves:
+ // 1. constant -> GPR/FPR (non-cycle)
+ // 2. constant -> stack (non-cycle)
+ // 3. GPR/FPR -> GPR/FPR
+ // 4. GPR/FPR -> stack
+ // 5. stack -> GPR/FPR
+ // 6. stack -> stack (non-cycle)
+ // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5
+ // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting
+ // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the
+ // dependency.
+ vixl_temps_.Open(GetVIXLAssembler());
+}
+
+void ParallelMoveResolverARM64::FinishEmitNativeCode() {
+ vixl_temps_.Close();
+}
+
+Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
+ DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
+ kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
+ kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
+ Location scratch = GetScratchLocation(kind);
+ if (!scratch.Equals(Location::NoLocation())) {
+ return scratch;
+ }
+ // Allocate from VIXL temp registers.
+ if (kind == Location::kRegister) {
+ scratch = LocationFrom(vixl_temps_.AcquireX());
+ } else {
+ DCHECK(kind == Location::kFpuRegister);
+ scratch = LocationFrom(vixl_temps_.AcquireD());
+ }
+ AddScratchLocation(scratch);
+ return scratch;
}
-void ParallelMoveResolverARM64::RestoreScratch(int reg) {
- __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize));
+void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
+ if (loc.IsRegister()) {
+ vixl_temps_.Release(XRegisterFrom(loc));
+ } else {
+ DCHECK(loc.IsFpuRegister());
+ vixl_temps_.Release(DRegisterFrom(loc));
+ }
+ RemoveScratchLocation(loc);
}
-void ParallelMoveResolverARM64::SpillScratch(int reg) {
- __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize));
+void ParallelMoveResolverARM64::EmitMove(size_t index) {
+ MoveOperands* move = moves_.Get(index);
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource());
}
void CodeGeneratorARM64::GenerateFrameEntry() {
+ MacroAssembler* masm = GetVIXLAssembler();
+ BlockPoolsScope block_pools(masm);
__ Bind(&frame_entry_label_);
bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
if (do_overflow_check) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
+ UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireX();
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
__ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
@@ -474,6 +511,7 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
}
void CodeGeneratorARM64::GenerateFrameExit() {
+ BlockPoolsScope block_pools(GetVIXLAssembler());
GetAssembler()->cfi().RememberState();
if (!HasEmptyFrame()) {
int frame_size = GetFrameSize();
@@ -726,10 +764,10 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
if (destination.IsRegister()) {
__ Mov(Register(dst), RegisterFrom(source, type));
} else {
+ DCHECK(destination.IsFpuRegister());
__ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
}
}
-
} else { // The destination is not a register. It must be a stack slot.
DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
if (source.IsRegister() || source.IsFpuRegister()) {
@@ -772,67 +810,6 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
}
}
-void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) {
- DCHECK(!loc1.IsConstant());
- DCHECK(!loc2.IsConstant());
-
- if (loc1.Equals(loc2)) {
- return;
- }
-
- UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
-
- bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
- bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
- bool is_fp_reg1 = loc1.IsFpuRegister();
- bool is_fp_reg2 = loc2.IsFpuRegister();
-
- if (loc2.IsRegister() && loc1.IsRegister()) {
- Register r1 = XRegisterFrom(loc1);
- Register r2 = XRegisterFrom(loc2);
- Register tmp = temps.AcquireSameSizeAs(r1);
- __ Mov(tmp, r2);
- __ Mov(r2, r1);
- __ Mov(r1, tmp);
- } else if (is_fp_reg2 && is_fp_reg1) {
- FPRegister r1 = DRegisterFrom(loc1);
- FPRegister r2 = DRegisterFrom(loc2);
- FPRegister tmp = temps.AcquireSameSizeAs(r1);
- __ Fmov(tmp, r2);
- __ Fmov(r2, r1);
- __ Fmov(r1, tmp);
- } else if (is_slot1 != is_slot2) {
- MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2);
- Location reg_loc = is_slot1 ? loc2 : loc1;
- CPURegister reg, tmp;
- if (reg_loc.IsFpuRegister()) {
- reg = DRegisterFrom(reg_loc);
- tmp = temps.AcquireD();
- } else {
- reg = XRegisterFrom(reg_loc);
- tmp = temps.AcquireX();
- }
- __ Ldr(tmp, mem);
- __ Str(reg, mem);
- if (reg_loc.IsFpuRegister()) {
- __ Fmov(FPRegister(reg), FPRegister(tmp));
- } else {
- __ Mov(Register(reg), Register(tmp));
- }
- } else if (is_slot1 && is_slot2) {
- MemOperand mem1 = StackOperandFrom(loc1);
- MemOperand mem2 = StackOperandFrom(loc2);
- Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX();
- Register tmp2 = temps.AcquireSameSizeAs(tmp1);
- __ Ldr(tmp1, mem1);
- __ Ldr(tmp2, mem2);
- __ Str(tmp1, mem2);
- __ Str(tmp2, mem1);
- } else {
- LOG(FATAL) << "Unimplemented";
- }
-}
-
void CodeGeneratorARM64::Load(Primitive::Type type,
CPURegister dst,
const MemOperand& src) {
@@ -865,7 +842,9 @@ void CodeGeneratorARM64::Load(Primitive::Type type,
void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
CPURegister dst,
const MemOperand& src) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
+ MacroAssembler* masm = GetVIXLAssembler();
+ BlockPoolsScope block_pools(masm);
+ UseScratchRegisterScope temps(masm);
Register temp_base = temps.AcquireX();
Primitive::Type type = instruction->GetType();
@@ -995,6 +974,7 @@ void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
+ BlockPoolsScope block_pools(GetVIXLAssembler());
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
if (instruction != nullptr) {
@@ -1130,6 +1110,83 @@ void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
}
}
+void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
+ const FieldInfo& field_info) {
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ BlockPoolsScope block_pools(GetVIXLAssembler());
+
+ MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
+ bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
+
+ if (field_info.IsVolatile()) {
+ if (use_acquire_release) {
+ // NB: LoadAcquire will record the pc info if needed.
+ codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
+ } else {
+ codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // For IRIW sequential consistency kLoadAny is not sufficient.
+ GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
+ } else {
+ codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info) {
+ DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+ BlockPoolsScope block_pools(GetVIXLAssembler());
+
+ Register obj = InputRegisterAt(instruction, 0);
+ CPURegister value = InputCPURegisterAt(instruction, 1);
+ Offset offset = field_info.GetFieldOffset();
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
+
+ if (field_info.IsVolatile()) {
+ if (use_acquire_release) {
+ codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ } else {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ codegen_->Store(field_type, value, HeapOperand(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
+ } else {
+ codegen_->Store(field_type, value, HeapOperand(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
+ codegen_->MarkGCCard(obj, Register(value));
+ }
+}
+
void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
Primitive::Type type = instr->GetType();
@@ -1264,7 +1321,9 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
Location index = locations->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
MemOperand source = HeapOperand(obj);
- UseScratchRegisterScope temps(GetVIXLAssembler());
+ MacroAssembler* masm = GetVIXLAssembler();
+ UseScratchRegisterScope temps(masm);
+ BlockPoolsScope block_pools(masm);
if (index.IsConstant()) {
offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
@@ -1287,22 +1346,23 @@ void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
}
void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
+ BlockPoolsScope block_pools(GetVIXLAssembler());
__ Ldr(OutputRegister(instruction),
HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
- Primitive::Type value_type = instruction->GetComponentType();
- bool is_object = value_type == Primitive::kPrimNot;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
- if (is_object) {
+ if (instruction->NeedsTypeCheck()) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
} else {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
@@ -1315,31 +1375,42 @@ void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
Primitive::Type value_type = instruction->GetComponentType();
- if (value_type == Primitive::kPrimNot) {
+ LocationSummary* locations = instruction->GetLocations();
+ bool needs_runtime_call = locations->WillCall();
+
+ if (needs_runtime_call) {
codegen_->InvokeRuntime(
QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
} else {
- LocationSummary* locations = instruction->GetLocations();
Register obj = InputRegisterAt(instruction, 0);
CPURegister value = InputCPURegisterAt(instruction, 2);
Location index = locations->InAt(1);
size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
MemOperand destination = HeapOperand(obj);
- UseScratchRegisterScope temps(GetVIXLAssembler());
+ MacroAssembler* masm = GetVIXLAssembler();
+ BlockPoolsScope block_pools(masm);
+ {
+ // We use a block to end the scratch scope before the write barrier, thus
+ // freeing the temporary registers so they can be used in `MarkGCCard`.
+ UseScratchRegisterScope temps(masm);
+
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
+ destination = HeapOperand(obj, offset);
+ } else {
+ Register temp = temps.AcquireSameSizeAs(obj);
+ Register index_reg = InputRegisterAt(instruction, 1);
+ __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
+ destination = HeapOperand(temp, offset);
+ }
- if (index.IsConstant()) {
- offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
- destination = HeapOperand(obj, offset);
- } else {
- Register temp = temps.AcquireSameSizeAs(obj);
- Register index_reg = InputRegisterAt(instruction, 1);
- __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
- destination = HeapOperand(temp, offset);
+ codegen_->Store(value_type, value, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+ if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
+ codegen_->MarkGCCard(obj, value.W());
}
-
- codegen_->Store(value_type, value, destination);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -1381,8 +1452,10 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ Cbz(obj, slow_path->GetExitLabel());
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Cbz(obj, slow_path->GetExitLabel());
+ }
// Compare the class of `obj` with `cls`.
__ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
__ Cmp(obj_cls, cls);
@@ -1750,72 +1823,19 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- if (Primitive::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(Location::RequiresFpuRegister());
- } else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
+ HandleFieldGet(instruction);
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
- MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
- bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
-
- if (instruction->IsVolatile()) {
- if (use_acquire_release) {
- // NB: LoadAcquire will record the pc info if needed.
- codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
- } else {
- codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // For IRIW sequential consistency kLoadAny is not sufficient.
- GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
- }
- } else {
- codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- }
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
}
void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
- locations->SetInAt(1, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(1, Location::RequiresRegister());
- }
+ HandleFieldSet(instruction);
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- Register obj = InputRegisterAt(instruction, 0);
- CPURegister value = InputCPURegisterAt(instruction, 1);
- Offset offset = instruction->GetFieldOffset();
- Primitive::Type field_type = instruction->GetFieldType();
- bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
-
- if (instruction->IsVolatile()) {
- if (use_acquire_release) {
- codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- } else {
- GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
- codegen_->Store(field_type, value, HeapOperand(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
- }
- } else {
- codegen_->Store(field_type, value, HeapOperand(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- }
-
- if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- codegen_->MarkGCCard(obj, Register(value));
- }
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
}
void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -1837,9 +1857,11 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
vixl::Label done;
// Return 0 if `obj` is null.
- // TODO: Avoid this check if we know `obj` is not null.
- __ Mov(out, 0);
- __ Cbz(obj, &done);
+ // Avoid null check if we know `obj` is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Mov(out, 0);
+ __ Cbz(obj, &done);
+ }
// Compare the class of `obj` with `cls`.
__ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
@@ -1914,7 +1936,9 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// The register ip1 is required to be used for the hidden argument in
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
- UseScratchRegisterScope scratch_scope(GetVIXLAssembler());
+ MacroAssembler* masm = GetVIXLAssembler();
+ UseScratchRegisterScope scratch_scope(masm);
+ BlockPoolsScope block_pools(masm);
scratch_scope.Exclude(ip1);
__ Mov(ip1, invoke->GetDexMethodIndex());
@@ -2000,6 +2024,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
return;
}
+ BlockPoolsScope block_pools(GetVIXLAssembler());
Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
codegen_->GenerateStaticOrDirectCall(invoke, temp);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2018,6 +2043,8 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Offset class_offset = mirror::Object::ClassOffset();
Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ BlockPoolsScope block_pools(GetVIXLAssembler());
+
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
__ Ldr(temp, MemOperand(sp, receiver.GetStackIndex()));
@@ -2318,8 +2345,9 @@ void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instru
if (codegen_->CanMoveNullCheckToUser(instruction)) {
return;
}
- Location obj = instruction->GetLocations()->InAt(0);
+ BlockPoolsScope block_pools(GetVIXLAssembler());
+ Location obj = instruction->GetLocations()->InAt(0);
__ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
@@ -2446,6 +2474,14 @@ void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
}
}
+void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
Primitive::Type return_type = instruction->InputAt(0)->GetType();
@@ -2519,67 +2555,19 @@ void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
}
void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- if (Primitive::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(Location::RequiresFpuRegister());
- } else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
+ HandleFieldGet(instruction);
}
void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
- MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
- bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
-
- if (instruction->IsVolatile()) {
- if (use_acquire_release) {
- // NB: LoadAcquire will record the pc info if needed.
- codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
- } else {
- codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
- // For IRIW sequential consistency kLoadAny is not sufficient.
- GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
- }
- } else {
- codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
- }
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
}
void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
- locations->SetInAt(1, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(1, Location::RequiresRegister());
- }
+ HandleFieldSet(instruction);
}
void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
- Register cls = InputRegisterAt(instruction, 0);
- CPURegister value = InputCPURegisterAt(instruction, 1);
- Offset offset = instruction->GetFieldOffset();
- Primitive::Type field_type = instruction->GetFieldType();
- bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
-
- if (instruction->IsVolatile()) {
- if (use_acquire_release) {
- codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset));
- } else {
- GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
- codegen_->Store(field_type, value, HeapOperand(cls, offset));
- GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
- }
- } else {
- codegen_->Store(field_type, value, HeapOperand(cls, offset));
- }
-
- if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- codegen_->MarkGCCard(cls, Register(value));
- }
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
}
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 07c6dd059a..5a358671cc 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -159,6 +159,8 @@ class InstructionCodeGeneratorARM64 : public HGraphVisitor {
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* instr);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
void HandleShift(HBinaryOperation* instr);
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
@@ -185,8 +187,10 @@ class LocationsBuilderARM64 : public HGraphVisitor {
private:
void HandleBinaryOp(HBinaryOperation* instr);
- void HandleShift(HBinaryOperation* instr);
+ void HandleFieldSet(HInstruction* instruction);
+ void HandleFieldGet(HInstruction* instruction);
void HandleInvoke(HInvoke* instr);
+ void HandleShift(HBinaryOperation* instr);
CodeGeneratorARM64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -194,15 +198,17 @@ class LocationsBuilderARM64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
};
-class ParallelMoveResolverARM64 : public ParallelMoveResolver {
+class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
public:
ParallelMoveResolverARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
- : ParallelMoveResolver(allocator), codegen_(codegen) {}
+ : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
+ protected:
+ void PrepareForEmitNativeCode() OVERRIDE;
+ void FinishEmitNativeCode() OVERRIDE;
+ Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE;
+ void FreeScratchLocation(Location loc) OVERRIDE;
void EmitMove(size_t index) OVERRIDE;
- void EmitSwap(size_t index) OVERRIDE;
- void RestoreScratch(int reg) OVERRIDE;
- void SpillScratch(int reg) OVERRIDE;
private:
Arm64Assembler* GetAssembler() const;
@@ -211,6 +217,7 @@ class ParallelMoveResolverARM64 : public ParallelMoveResolver {
}
CodeGeneratorARM64* const codegen_;
+ vixl::UseScratchRegisterScope vixl_temps_;
DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64);
};
@@ -318,7 +325,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
// locations, and is used for optimisation and debugging.
void MoveLocation(Location destination, Location source,
Primitive::Type type = Primitive::kPrimVoid);
- void SwapLocations(Location loc_1, Location loc_2);
void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
void LoadCurrentMethod(vixl::Register current_method);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0f1175563e..c604842d86 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -877,7 +877,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
if (rhs.IsRegister()) {
__ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
} else if (rhs.IsConstant()) {
- int32_t constant = rhs.GetConstant()->AsIntConstant()->GetValue();
+ int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
if (constant == 0) {
__ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>());
} else {
@@ -1120,6 +1120,14 @@ void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant)
UNUSED(constant);
}
+void LocationsBuilderX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
@@ -1212,6 +1220,7 @@ void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec
codegen_->GenerateStaticOrDirectCall(
invoke, invoke->GetLocations()->GetTemp(0).AsRegister<Register>());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -1547,10 +1556,8 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimLong:
// Processing a Dex `long-to-float' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresFpuRegister());
- locations->AddTemp(Location::RequiresFpuRegister());
- locations->AddTemp(Location::RequiresFpuRegister());
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::Any());
break;
case Primitive::kPrimDouble:
@@ -1580,10 +1587,8 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimLong:
// Processing a Dex `long-to-double' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresFpuRegister());
- locations->AddTemp(Location::RequiresFpuRegister());
- locations->AddTemp(Location::RequiresFpuRegister());
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::Any());
break;
case Primitive::kPrimFloat:
@@ -1804,37 +1809,31 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimLong: {
// Processing a Dex `long-to-float' instruction.
- Register low = in.AsRegisterPairLow<Register>();
- Register high = in.AsRegisterPairHigh<Register>();
- XmmRegister result = out.AsFpuRegister<XmmRegister>();
- XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister constant = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
-
- // Operations use doubles for precision reasons (each 32-bit
- // half of a long fits in the 53-bit mantissa of a double,
- // but not in the 24-bit mantissa of a float). This is
- // especially important for the low bits. The result is
- // eventually converted to float.
-
- // low = low - 2^31 (to prevent bit 31 of `low` to be
- // interpreted as a sign bit)
- __ subl(low, Immediate(0x80000000));
- // temp = int-to-double(high)
- __ cvtsi2sd(temp, high);
- // temp = temp * 2^32
- __ LoadLongConstant(constant, k2Pow32EncodingForDouble);
- __ mulsd(temp, constant);
- // result = int-to-double(low)
- __ cvtsi2sd(result, low);
- // result = result + 2^31 (restore the original value of `low`)
- __ LoadLongConstant(constant, k2Pow31EncodingForDouble);
- __ addsd(result, constant);
- // result = result + temp
- __ addsd(result, temp);
- // result = double-to-float(result)
- __ cvtsd2ss(result, result);
- // Restore low.
- __ addl(low, Immediate(0x80000000));
+ size_t adjustment = 0;
+
+ // Create stack space for the call to
+ // InstructionCodeGeneratorX86::PushOntoFPStack and/or X86Assembler::fstps below.
+ // TODO: enhance register allocator to ask for stack temporaries.
+ if (!in.IsDoubleStackSlot() || !out.IsStackSlot()) {
+ adjustment = Primitive::ComponentSize(Primitive::kPrimLong);
+ __ subl(ESP, Immediate(adjustment));
+ }
+
+ // Load the value to the FP stack, using temporaries if needed.
+ PushOntoFPStack(in, 0, adjustment, false, true);
+
+ if (out.IsStackSlot()) {
+ __ fstps(Address(ESP, out.GetStackIndex() + adjustment));
+ } else {
+ __ fstps(Address(ESP, 0));
+ Location stack_temp = Location::StackSlot(0);
+ codegen_->Move32(out, stack_temp);
+ }
+
+ // Remove the temporary stack space we allocated.
+ if (adjustment != 0) {
+ __ addl(ESP, Immediate(adjustment));
+ }
break;
}
@@ -1863,29 +1862,31 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimLong: {
// Processing a Dex `long-to-double' instruction.
- Register low = in.AsRegisterPairLow<Register>();
- Register high = in.AsRegisterPairHigh<Register>();
- XmmRegister result = out.AsFpuRegister<XmmRegister>();
- XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister constant = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
-
- // low = low - 2^31 (to prevent bit 31 of `low` to be
- // interpreted as a sign bit)
- __ subl(low, Immediate(0x80000000));
- // temp = int-to-double(high)
- __ cvtsi2sd(temp, high);
- // temp = temp * 2^32
- __ LoadLongConstant(constant, k2Pow32EncodingForDouble);
- __ mulsd(temp, constant);
- // result = int-to-double(low)
- __ cvtsi2sd(result, low);
- // result = result + 2^31 (restore the original value of `low`)
- __ LoadLongConstant(constant, k2Pow31EncodingForDouble);
- __ addsd(result, constant);
- // result = result + temp
- __ addsd(result, temp);
- // Restore low.
- __ addl(low, Immediate(0x80000000));
+ size_t adjustment = 0;
+
+ // Create stack space for the call to
+ // InstructionCodeGeneratorX86::PushOntoFPStack and/or X86Assembler::fstpl below.
+ // TODO: enhance register allocator to ask for stack temporaries.
+ if (!in.IsDoubleStackSlot() || !out.IsDoubleStackSlot()) {
+ adjustment = Primitive::ComponentSize(Primitive::kPrimLong);
+ __ subl(ESP, Immediate(adjustment));
+ }
+
+ // Load the value to the FP stack, using temporaries if needed.
+ PushOntoFPStack(in, 0, adjustment, false, true);
+
+ if (out.IsDoubleStackSlot()) {
+ __ fstpl(Address(ESP, out.GetStackIndex() + adjustment));
+ } else {
+ __ fstpl(Address(ESP, 0));
+ Location stack_temp = Location::DoubleStackSlot(0);
+ codegen_->Move64(out, stack_temp);
+ }
+
+ // Remove the temporary stack space we allocated.
+ if (adjustment != 0) {
+ __ addl(ESP, Immediate(adjustment));
+ }
break;
}
@@ -2225,24 +2226,43 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
}
}
-void InstructionCodeGeneratorX86::PushOntoFPStack(Location source, uint32_t temp_offset,
- uint32_t stack_adjustment, bool is_float) {
+void InstructionCodeGeneratorX86::PushOntoFPStack(Location source,
+ uint32_t temp_offset,
+ uint32_t stack_adjustment,
+ bool is_fp,
+ bool is_wide) {
if (source.IsStackSlot()) {
- DCHECK(is_float);
- __ flds(Address(ESP, source.GetStackIndex() + stack_adjustment));
+ DCHECK(!is_wide);
+ if (is_fp) {
+ __ flds(Address(ESP, source.GetStackIndex() + stack_adjustment));
+ } else {
+ __ filds(Address(ESP, source.GetStackIndex() + stack_adjustment));
+ }
} else if (source.IsDoubleStackSlot()) {
- DCHECK(!is_float);
- __ fldl(Address(ESP, source.GetStackIndex() + stack_adjustment));
+ DCHECK(is_wide);
+ if (is_fp) {
+ __ fldl(Address(ESP, source.GetStackIndex() + stack_adjustment));
+ } else {
+ __ fildl(Address(ESP, source.GetStackIndex() + stack_adjustment));
+ }
} else {
// Write the value to the temporary location on the stack and load to FP stack.
- if (is_float) {
+ if (!is_wide) {
Location stack_temp = Location::StackSlot(temp_offset);
codegen_->Move32(stack_temp, source);
- __ flds(Address(ESP, temp_offset));
+ if (is_fp) {
+ __ flds(Address(ESP, temp_offset));
+ } else {
+ __ filds(Address(ESP, temp_offset));
+ }
} else {
Location stack_temp = Location::DoubleStackSlot(temp_offset);
codegen_->Move64(stack_temp, source);
- __ fldl(Address(ESP, temp_offset));
+ if (is_fp) {
+ __ fldl(Address(ESP, temp_offset));
+ } else {
+ __ fildl(Address(ESP, temp_offset));
+ }
}
}
}
@@ -2261,8 +2281,9 @@ void InstructionCodeGeneratorX86::GenerateRemFP(HRem *rem) {
__ subl(ESP, Immediate(2 * elem_size));
// Load the values to the FP stack in reverse order, using temporaries if needed.
- PushOntoFPStack(second, elem_size, 2 * elem_size, is_float);
- PushOntoFPStack(first, 0, 2 * elem_size, is_float);
+ const bool is_wide = !is_float;
+ PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp */ true, is_wide);
+ PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
// Loop doing FPREM until we stabilize.
Label retry;
@@ -3098,7 +3119,6 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
}
DCHECK(!IsLeafMethod());
- RecordPcInfo(invoke, invoke->GetDexPc());
}
void CodeGeneratorX86::MarkGCCard(Register temp, Register card, Register object, Register value) {
@@ -4230,9 +4250,11 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
SlowPathCodeX86* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, &zero);
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ }
__ movl(out, Address(obj, class_offset));
// Compare the class of `obj` with `cls`.
if (cls.IsRegister()) {
@@ -4257,8 +4279,12 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ movl(out, Immediate(1));
__ jmp(&done);
}
- __ Bind(&zero);
- __ movl(out, Immediate(0));
+
+ if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ }
+
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
@@ -4283,11 +4309,13 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, slow_path->GetExitLabel());
- __ movl(temp, Address(obj, class_offset));
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ }
+ __ movl(temp, Address(obj, class_offset));
// Compare the class of `obj` with `cls`.
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 368ae0fb0e..8bd3cd3585 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -93,10 +93,10 @@ class InvokeDexCallingConventionVisitor {
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
};
-class ParallelMoveResolverX86 : public ParallelMoveResolver {
+class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
- : ParallelMoveResolver(allocator), codegen_(codegen) {}
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
void EmitMove(size_t index) OVERRIDE;
void EmitSwap(size_t index) OVERRIDE;
@@ -174,8 +174,10 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
void GenerateMemoryBarrier(MemBarrierKind kind);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ // Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
+ // `is_wide` specifies whether it is long/double or not.
void PushOntoFPStack(Location source, uint32_t temp_offset,
- uint32_t stack_adjustment, bool is_float);
+ uint32_t stack_adjustment, bool is_fp, bool is_wide);
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5b681fa62b..47425fb9ae 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1023,14 +1023,14 @@ void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrInt32LongConstant(compare->InputAt(1)));
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::RequiresRegister());
break;
}
@@ -1052,24 +1052,46 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) {
CpuRegister left_reg = left.AsRegister<CpuRegister>();
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- DCHECK(IsInt<32>(value));
- if (value == 0) {
- __ testq(left_reg, left_reg);
+ if (IsInt<32>(value)) {
+ if (value == 0) {
+ __ testq(left_reg, left_reg);
+ } else {
+ __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
+ }
} else {
- __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
+ // Value won't fit in an int.
+ __ cmpq(left_reg, codegen_->LiteralInt64Address(value));
}
+ } else if (right.IsDoubleStackSlot()) {
+ __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
} else {
__ cmpq(left_reg, right.AsRegister<CpuRegister>());
}
break;
}
case Primitive::kPrimFloat: {
- __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ XmmRegister left_reg = left.AsFpuRegister<XmmRegister>();
+ if (right.IsConstant()) {
+ float value = right.GetConstant()->AsFloatConstant()->GetValue();
+ __ ucomiss(left_reg, codegen_->LiteralFloatAddress(value));
+ } else if (right.IsStackSlot()) {
+ __ ucomiss(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
+ } else {
+ __ ucomiss(left_reg, right.AsFpuRegister<XmmRegister>());
+ }
__ j(kUnordered, compare->IsGtBias() ? &greater : &less);
break;
}
case Primitive::kPrimDouble: {
- __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ XmmRegister left_reg = left.AsFpuRegister<XmmRegister>();
+ if (right.IsConstant()) {
+ double value = right.GetConstant()->AsDoubleConstant()->GetValue();
+ __ ucomisd(left_reg, codegen_->LiteralDoubleAddress(value));
+ } else if (right.IsDoubleStackSlot()) {
+ __ ucomisd(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
+ } else {
+ __ ucomisd(left_reg, right.AsFpuRegister<XmmRegister>());
+ }
__ j(kUnordered, compare->IsGtBias() ? &greater : &less);
break;
}
@@ -1145,6 +1167,14 @@ void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* consta
UNUSED(constant);
}
+void LocationsBuilderX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
@@ -1170,8 +1200,7 @@ void LocationsBuilderX86_64::VisitReturn(HReturn* ret) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- locations->SetInAt(0,
- Location::FpuRegisterLocation(XMM0));
+ locations->SetInAt(0, Location::FpuRegisterLocation(XMM0));
break;
default:
@@ -1411,7 +1440,6 @@ void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
break;
@@ -1439,26 +1467,22 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) {
case Primitive::kPrimFloat: {
DCHECK(in.Equals(out));
- CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>();
- XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
// Implement float negation with an exclusive or with value
// 0x80000000 (mask for bit 31, representing the sign of a
// single-precision floating-point number).
- __ movq(constant, Immediate(INT64_C(0x80000000)));
- __ movd(mask, constant);
+ __ movss(mask, codegen_->LiteralInt32Address(0x80000000));
__ xorps(out.AsFpuRegister<XmmRegister>(), mask);
break;
}
case Primitive::kPrimDouble: {
DCHECK(in.Equals(out));
- CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>();
- XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
// Implement double negation with an exclusive or with value
// 0x8000000000000000 (mask for bit 63, representing the sign of
// a double-precision floating-point number).
- __ movq(constant, Immediate(INT64_C(0x8000000000000000)));
- __ movd(mask, constant);
+ __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000)));
__ xorpd(out.AsFpuRegister<XmmRegister>(), mask);
break;
}
@@ -1605,19 +1629,19 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-float' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-float' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-float' instruction.
- locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -1636,19 +1660,19 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-double' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-double' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimFloat:
// Processing a Dex `float-to-double' instruction.
- locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -1902,17 +1926,56 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-float' instruction.
- __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ if (in.IsRegister()) {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ } else if (in.IsConstant()) {
+ int32_t v = in.GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorps(dest, dest);
+ } else {
+ __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
+ }
+ } else {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), false);
+ }
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-float' instruction.
- __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ if (in.IsRegister()) {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ } else if (in.IsConstant()) {
+ int64_t v = in.GetConstant()->AsLongConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorps(dest, dest);
+ } else {
+ __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
+ }
+ } else {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), true);
+ }
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-float' instruction.
- __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ if (in.IsFpuRegister()) {
+ __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ } else if (in.IsConstant()) {
+ double v = in.GetConstant()->AsDoubleConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (bit_cast<int64_t, double>(v) == 0) {
+ __ xorps(dest, dest);
+ } else {
+ __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
+ }
+ } else {
+ __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ }
break;
default:
@@ -1930,17 +1993,56 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-double' instruction.
- __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ if (in.IsRegister()) {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ } else if (in.IsConstant()) {
+ int32_t v = in.GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
+ }
+ } else {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), false);
+ }
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-double' instruction.
- __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ if (in.IsRegister()) {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ } else if (in.IsConstant()) {
+ int64_t v = in.GetConstant()->AsLongConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
+ }
+ } else {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), true);
+ }
break;
case Primitive::kPrimFloat:
// Processing a Dex `float-to-double' instruction.
- __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ if (in.IsFpuRegister()) {
+ __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ } else if (in.IsConstant()) {
+ float v = in.GetConstant()->AsFloatConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (bit_cast<int32_t, float>(v) == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
+ }
+ } else {
+ __ cvtss2sd(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ }
break;
default:
@@ -3120,7 +3222,7 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction,
if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
locations->SetInAt(1, Location::RequiresFpuRegister());
} else {
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1)));
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
@@ -3147,24 +3249,46 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- __ movb(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movb(Address(base, offset), Immediate(v));
+ } else {
+ __ movb(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- __ movw(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movw(Address(base, offset), Immediate(v));
+ } else {
+ __ movw(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- __ movl(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movw(Address(base, offset), Immediate(v));
+ } else {
+ __ movl(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimLong: {
- __ movq(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(base, offset), Immediate(v_32));
+ } else {
+ __ movq(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
@@ -3283,8 +3407,7 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
} else {
@@ -3423,7 +3546,7 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
1, Location::RegisterOrConstant(instruction->InputAt(1)));
locations->SetInAt(2, Location::RequiresRegister());
if (value_type == Primitive::kPrimLong) {
- locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RegisterOrInt32LongConstant(instruction->InputAt(2)));
} else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) {
locations->SetInAt(2, Location::RequiresFpuRegister());
} else {
@@ -3511,8 +3634,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
__ movl(Address(obj, offset), value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
- __ movl(Address(obj, offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movl(Address(obj, offset), Immediate(v));
}
} else {
DCHECK(index.IsRegister()) << index;
@@ -3521,8 +3644,9 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
__ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ Immediate(v));
}
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -3546,12 +3670,25 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- DCHECK(value.IsRegister());
- __ movq(Address(obj, offset), value.AsRegister<CpuRegister>());
+ if (value.IsRegister()) {
+ __ movq(Address(obj, offset), value.AsRegister<CpuRegister>());
+ } else {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(obj, offset), Immediate(v_32));
+ }
} else {
- DCHECK(value.IsRegister());
- __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
- value.AsRegister<CpuRegister>());
+ if (value.IsRegister()) {
+ __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
+ value.AsRegister<CpuRegister>());
+ } else {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
+ Immediate(v_32));
+ }
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
@@ -4044,9 +4181,11 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
SlowPathCodeX86_64* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, &zero);
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ }
// Compare the class of `obj` with `cls`.
__ movl(out, Address(obj, class_offset));
if (cls.IsRegister()) {
@@ -4070,8 +4209,12 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ movl(out, Immediate(1));
__ jmp(&done);
}
- __ Bind(&zero);
- __ movl(out, Immediate(0));
+
+ if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ }
+
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
@@ -4096,9 +4239,11 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, slow_path->GetExitLabel());
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ }
// Compare the class of `obj` with `cls`.
__ movl(temp, Address(obj, class_offset));
if (cls.IsRegister()) {
@@ -4137,13 +4282,7 @@ void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instructio
DCHECK(instruction->GetResultType() == Primitive::kPrimInt
|| instruction->GetResultType() == Primitive::kPrimLong);
locations->SetInAt(0, Location::RequiresRegister());
- if (instruction->GetType() == Primitive::kPrimInt) {
- locations->SetInAt(1, Location::Any());
- } else {
- // We can handle 32 bit constants.
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1)));
- }
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4204,25 +4343,43 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in
if (second.IsConstant()) {
second_is_constant = true;
value = second.GetConstant()->AsLongConstant()->GetValue();
- DCHECK(IsInt<32>(value));
}
+ bool is_int32_value = IsInt<32>(value);
if (instruction->IsAnd()) {
if (second_is_constant) {
- __ andq(first_reg, Immediate(static_cast<int32_t>(value)));
+ if (is_int32_value) {
+ __ andq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ andq(first_reg, codegen_->LiteralInt64Address(value));
+ }
+ } else if (second.IsDoubleStackSlot()) {
+ __ andq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex()));
} else {
__ andq(first_reg, second.AsRegister<CpuRegister>());
}
} else if (instruction->IsOr()) {
if (second_is_constant) {
- __ orq(first_reg, Immediate(static_cast<int32_t>(value)));
+ if (is_int32_value) {
+ __ orq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ orq(first_reg, codegen_->LiteralInt64Address(value));
+ }
+ } else if (second.IsDoubleStackSlot()) {
+ __ orq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex()));
} else {
__ orq(first_reg, second.AsRegister<CpuRegister>());
}
} else {
DCHECK(instruction->IsXor());
if (second_is_constant) {
- __ xorq(first_reg, Immediate(static_cast<int32_t>(value)));
+ if (is_int32_value) {
+ __ xorq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ xorq(first_reg, codegen_->LiteralInt64Address(value));
+ }
+ } else if (second.IsDoubleStackSlot()) {
+ __ xorq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex()));
} else {
__ xorq(first_reg, second.AsRegister<CpuRegister>());
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index b4876ef161..6cdc82262c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -102,10 +102,10 @@ class SlowPathCodeX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86_64);
};
-class ParallelMoveResolverX86_64 : public ParallelMoveResolver {
+class ParallelMoveResolverX86_64 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
- : ParallelMoveResolver(allocator), codegen_(codegen) {}
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
void EmitMove(size_t index) OVERRIDE;
void EmitSwap(size_t index) OVERRIDE;
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index fc3dd01ef5..91cd60acce 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -20,10 +20,78 @@
namespace art {
-void HDeadCodeElimination::Run() {
+static void MarkReachableBlocks(HBasicBlock* block, ArenaBitVector* visited) {
+ int block_id = block->GetBlockId();
+ if (visited->IsBitSet(block_id)) {
+ return;
+ }
+ visited->SetBit(block_id);
+
+ HInstruction* last_instruction = block->GetLastInstruction();
+ if (last_instruction->IsIf()) {
+ HIf* if_instruction = last_instruction->AsIf();
+ HInstruction* condition = if_instruction->InputAt(0);
+ if (!condition->IsIntConstant()) {
+ MarkReachableBlocks(if_instruction->IfTrueSuccessor(), visited);
+ MarkReachableBlocks(if_instruction->IfFalseSuccessor(), visited);
+ } else if (condition->AsIntConstant()->IsOne()) {
+ MarkReachableBlocks(if_instruction->IfTrueSuccessor(), visited);
+ } else {
+ DCHECK(condition->AsIntConstant()->IsZero());
+ MarkReachableBlocks(if_instruction->IfFalseSuccessor(), visited);
+ }
+ } else {
+ for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
+ MarkReachableBlocks(block->GetSuccessors().Get(i), visited);
+ }
+ }
+}
+
+void HDeadCodeElimination::MaybeRecordDeadBlock(HBasicBlock* block) {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(MethodCompilationStat::kRemovedDeadInstruction,
+ block->GetPhis().CountSize() + block->GetInstructions().CountSize());
+ }
+}
+
+void HDeadCodeElimination::RemoveDeadBlocks() {
+ // Classify blocks as reachable/unreachable.
+ ArenaAllocator* allocator = graph_->GetArena();
+ ArenaBitVector live_blocks(allocator, graph_->GetBlocks().Size(), false);
+ MarkReachableBlocks(graph_->GetEntryBlock(), &live_blocks);
+
+ // Remove all dead blocks. Process blocks in post-order, because removal needs
+ // the block's chain of dominators.
+ for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ if (live_blocks.IsBitSet(block->GetBlockId())) {
+ continue;
+ }
+ MaybeRecordDeadBlock(block);
+ block->DisconnectAndDelete();
+ }
+
+ // Connect successive blocks created by dead branches. Order does not matter.
+ for (HReversePostOrderIterator it(*graph_); !it.Done();) {
+ HBasicBlock* block = it.Current();
+ if (block->IsEntryBlock() || block->GetSuccessors().Size() != 1u) {
+ it.Advance();
+ continue;
+ }
+ HBasicBlock* successor = block->GetSuccessors().Get(0);
+ if (successor->IsExitBlock() || successor->GetPredecessors().Size() != 1u) {
+ it.Advance();
+ continue;
+ }
+ block->MergeWith(successor);
+
+ // Reiterate on this block in case it can be merged with its new successor.
+ }
+}
+
+void HDeadCodeElimination::RemoveDeadInstructions() {
// Process basic blocks in post-order in the dominator tree, so that
- // a dead instruction depending on another dead instruction is
- // removed.
+ // a dead instruction depending on another dead instruction is removed.
for (HPostOrderIterator b(*graph_); !b.Done(); b.Advance()) {
HBasicBlock* block = b.Current();
// Traverse this block's instructions in backward order and remove
@@ -38,11 +106,18 @@ void HDeadCodeElimination::Run() {
if (!inst->HasSideEffects()
&& !inst->CanThrow()
&& !inst->IsSuspendCheck()
+ && !inst->IsMemoryBarrier() // If we added an explicit barrier then we should keep it.
&& !inst->HasUses()) {
block->RemoveInstruction(inst);
+ MaybeRecordStat(MethodCompilationStat::kRemovedDeadInstruction);
}
}
}
}
+void HDeadCodeElimination::Run() {
+ RemoveDeadBlocks();
+ RemoveDeadInstructions();
+}
+
} // namespace art
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 3db2c3ff3f..0bea0fc1c2 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -19,6 +19,7 @@
#include "nodes.h"
#include "optimization.h"
+#include "optimizing_compiler_stats.h"
namespace art {
@@ -28,8 +29,10 @@ namespace art {
*/
class HDeadCodeElimination : public HOptimization {
public:
- explicit HDeadCodeElimination(HGraph* graph)
- : HOptimization(graph, true, kDeadCodeEliminationPassName) {}
+ HDeadCodeElimination(HGraph* graph,
+ OptimizingCompilerStats* stats = nullptr,
+ const char* name = kDeadCodeEliminationPassName)
+ : HOptimization(graph, true, name, stats) {}
void Run() OVERRIDE;
@@ -37,6 +40,10 @@ class HDeadCodeElimination : public HOptimization {
"dead_code_elimination";
private:
+ void MaybeRecordDeadBlock(HBasicBlock* block);
+ void RemoveDeadBlocks();
+ void RemoveDeadInstructions();
+
DISALLOW_COPY_AND_ASSIGN(HDeadCodeElimination);
};
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 3a56c6c68f..e1649fd3cd 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -88,23 +88,36 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
// Visit this block's list of phis.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
// Ensure this block's list of phis contains only phis.
- if (!it.Current()->IsPhi()) {
+ if (!current->IsPhi()) {
AddError(StringPrintf("Block %d has a non-phi in its phi list.",
current_block_->GetBlockId()));
}
- it.Current()->Accept(this);
+ if (current->GetNext() == nullptr && current != block->GetLastPhi()) {
+ AddError(StringPrintf("The recorded last phi of block %d does not match "
+ "the actual last phi %d.",
+ current_block_->GetBlockId(),
+ current->GetId()));
+ }
+ current->Accept(this);
}
// Visit this block's list of instructions.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done();
- it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
// Ensure this block's list of instructions does not contains phis.
- if (it.Current()->IsPhi()) {
+ if (current->IsPhi()) {
AddError(StringPrintf("Block %d has a phi in its non-phi list.",
current_block_->GetBlockId()));
}
- it.Current()->Accept(this);
+ if (current->GetNext() == nullptr && current != block->GetLastInstruction()) {
+ AddError(StringPrintf("The recorded last instruction of block %d does not match "
+ "the actual last instruction %d.",
+ current_block_->GetBlockId(),
+ current->GetId()));
+ }
+ current->Accept(this);
}
}
@@ -251,6 +264,8 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
}
}
+ const ArenaBitVector& loop_blocks = loop_header->GetLoopInformation()->GetBlocks();
+
// Ensure there is only one back edge per loop.
size_t num_back_edges =
loop_header->GetLoopInformation()->GetBackEdges().Size();
@@ -263,19 +278,41 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
"Loop defined by header %d has several back edges: %zu.",
id,
num_back_edges));
+ } else {
+ DCHECK_EQ(num_back_edges, 1u);
+ int back_edge_id = loop_header->GetLoopInformation()->GetBackEdges().Get(0)->GetBlockId();
+ if (!loop_blocks.IsBitSet(back_edge_id)) {
+ AddError(StringPrintf(
+ "Loop defined by header %d has an invalid back edge %d.",
+ id,
+ back_edge_id));
+ }
}
- // Ensure all blocks in the loop are dominated by the loop header.
- const ArenaBitVector& loop_blocks =
- loop_header->GetLoopInformation()->GetBlocks();
+ // Ensure all blocks in the loop are live and dominated by the loop header.
for (uint32_t i : loop_blocks.Indexes()) {
HBasicBlock* loop_block = GetGraph()->GetBlocks().Get(i);
- if (!loop_header->Dominates(loop_block)) {
+ if (loop_block == nullptr) {
+ AddError(StringPrintf("Loop defined by header %d contains a previously removed block %d.",
+ id,
+ i));
+ } else if (!loop_header->Dominates(loop_block)) {
AddError(StringPrintf("Loop block %d not dominated by loop header %d.",
- loop_block->GetBlockId(),
+ i,
id));
}
}
+
+ // If this is a nested loop, ensure the outer loops contain a superset of the blocks.
+ for (HLoopInformationOutwardIterator it(*loop_header); !it.Done(); it.Advance()) {
+ HLoopInformation* outer_info = it.Current();
+ if (!loop_blocks.IsSubsetOf(&outer_info->GetBlocks())) {
+ AddError(StringPrintf("Blocks of loop defined by header %d are not a subset of blocks of "
+ "an outer loop defined by header %d.",
+ id,
+ outer_info->GetHeader()->GetBlockId()));
+ }
+ }
}
void SSAChecker::VisitInstruction(HInstruction* instruction) {
@@ -393,8 +430,10 @@ void SSAChecker::HandleBooleanInput(HInstruction* instruction, size_t input_inde
static_cast<int>(input_index),
value));
}
- } else if (input->GetType() == Primitive::kPrimInt && input->IsPhi()) {
- // TODO: We need a data-flow analysis which determines if the Phi is boolean.
+ } else if (input->GetType() == Primitive::kPrimInt
+ && (input->IsPhi() || input->IsAnd() || input->IsOr() || input->IsXor())) {
+ // TODO: We need a data-flow analysis to determine if the Phi or
+ // binary operation is actually Boolean. Allow for now.
} else if (input->GetType() != Primitive::kPrimBoolean) {
AddError(StringPrintf(
"%s instruction %d has a non-Boolean input %d whose type is: %s.",
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 4c283788b5..ca9cbc3d01 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -192,6 +192,10 @@ class HGraphVisualizerPrinter : public HGraphVisitor {
output_ << " " << phi->GetRegNumber();
}
+ void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE {
+ output_ << " " << barrier->GetBarrierKind();
+ }
+
bool IsPass(const char* name) {
return strcmp(pass_name_, name) == 0;
}
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 74848d5d96..708733e28c 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -55,7 +55,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
buckets_owned_(allocator, num_buckets_, false),
num_entries_(to_copy.num_entries_) {
// ArenaAllocator returns zeroed memory, so entries of buckets_ and
- // buckets_owned_ are initialized to nullptr and false, respectively.
+ // buckets_owned_ are initialized to null and false, respectively.
DCHECK(IsPowerOfTwo(num_buckets_));
if (num_buckets_ == to_copy.num_buckets_) {
// Hash table remains the same size. We copy the bucket pointers and leave
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 6d2a8d77e2..bffd639e83 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -190,7 +190,7 @@ bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
}
// Run simple optimizations on the graph.
- HDeadCodeElimination dce(callee_graph);
+ HDeadCodeElimination dce(callee_graph, stats_);
HConstantFolding fold(callee_graph);
InstructionSimplifier simplify(callee_graph, stats_);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index afbc490150..2df7c166d8 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -43,6 +43,8 @@ class InstructionSimplifierVisitor : public HGraphVisitor {
void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
void VisitEqual(HEqual* equal) OVERRIDE;
+ void VisitNotEqual(HNotEqual* equal) OVERRIDE;
+ void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE;
void VisitArraySet(HArraySet* equal) OVERRIDE;
void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
@@ -60,6 +62,7 @@ class InstructionSimplifierVisitor : public HGraphVisitor {
void VisitSub(HSub* instruction) OVERRIDE;
void VisitUShr(HUShr* instruction) OVERRIDE;
void VisitXor(HXor* instruction) OVERRIDE;
+ void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
@@ -87,10 +90,6 @@ void InstructionSimplifierVisitor::Run() {
// current index, so don't advance the iterator.
continue;
}
- if (simplifications_at_current_position_ >= kMaxSamePositionSimplifications) {
- LOG(WARNING) << "Too many simplifications (" << simplifications_at_current_position_
- << ") occurred at the current position.";
- }
simplifications_at_current_position_ = 0;
it.Advance();
}
@@ -161,6 +160,10 @@ void InstructionSimplifierVisitor::VisitNullCheck(HNullCheck* null_check) {
void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
+ if (!check_cast->InputAt(0)->CanBeNull()) {
+ check_cast->ClearMustDoNullCheck();
+ }
+
if (!load_class->IsResolved()) {
// If the class couldn't be resolve it's not safe to compare against it. It's
// default type would be Top which might be wider that the actual class type
@@ -178,6 +181,12 @@ void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
}
}
+void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
+ if (!instruction->InputAt(0)->CanBeNull()) {
+ instruction->ClearMustDoNullCheck();
+ }
+}
+
void InstructionSimplifierVisitor::VisitSuspendCheck(HSuspendCheck* check) {
HBasicBlock* block = check->GetBlock();
// Currently always keep the suspend check at entry.
@@ -195,21 +204,62 @@ void InstructionSimplifierVisitor::VisitSuspendCheck(HSuspendCheck* check) {
}
void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) {
- HInstruction* input1 = equal->InputAt(0);
- HInstruction* input2 = equal->InputAt(1);
- if (input1->GetType() == Primitive::kPrimBoolean && input2->IsIntConstant()) {
- if (input2->AsIntConstant()->GetValue() == 1) {
- // Replace (bool_value == 1) with bool_value
- equal->ReplaceWith(equal->InputAt(0));
- equal->GetBlock()->RemoveInstruction(equal);
- } else {
- // We should replace (bool_value == 0) with !bool_value, but we unfortunately
- // do not have such instruction.
- DCHECK_EQ(input2->AsIntConstant()->GetValue(), 0);
+ HInstruction* input_const = equal->GetConstantRight();
+ if (input_const != nullptr) {
+ HInstruction* input_value = equal->GetLeastConstantLeft();
+ if (input_value->GetType() == Primitive::kPrimBoolean && input_const->IsIntConstant()) {
+ HBasicBlock* block = equal->GetBlock();
+ if (input_const->AsIntConstant()->IsOne()) {
+ // Replace (bool_value == true) with bool_value
+ equal->ReplaceWith(input_value);
+ block->RemoveInstruction(equal);
+ RecordSimplification();
+ } else {
+ // Replace (bool_value == false) with !bool_value
+ DCHECK(input_const->AsIntConstant()->IsZero());
+ block->ReplaceAndRemoveInstructionWith(
+ equal, new (block->GetGraph()->GetArena()) HBooleanNot(input_value));
+ RecordSimplification();
+ }
+ }
+ }
+}
+
+void InstructionSimplifierVisitor::VisitNotEqual(HNotEqual* not_equal) {
+ HInstruction* input_const = not_equal->GetConstantRight();
+ if (input_const != nullptr) {
+ HInstruction* input_value = not_equal->GetLeastConstantLeft();
+ if (input_value->GetType() == Primitive::kPrimBoolean && input_const->IsIntConstant()) {
+ HBasicBlock* block = not_equal->GetBlock();
+ if (input_const->AsIntConstant()->IsOne()) {
+ // Replace (bool_value != true) with !bool_value
+ block->ReplaceAndRemoveInstructionWith(
+ not_equal, new (block->GetGraph()->GetArena()) HBooleanNot(input_value));
+ RecordSimplification();
+ } else {
+ // Replace (bool_value != false) with bool_value
+ DCHECK(input_const->AsIntConstant()->IsZero());
+ not_equal->ReplaceWith(input_value);
+ block->RemoveInstruction(not_equal);
+ RecordSimplification();
+ }
}
}
}
+void InstructionSimplifierVisitor::VisitBooleanNot(HBooleanNot* bool_not) {
+ HInstruction* parent = bool_not->InputAt(0);
+ if (parent->IsBooleanNot()) {
+ HInstruction* value = parent->InputAt(0);
+ // Replace (!(!bool_value)) with bool_value
+ bool_not->ReplaceWith(value);
+ bool_not->GetBlock()->RemoveInstruction(bool_not);
+ // It is possible that `parent` is dead at this point but we leave
+ // its removal to DCE for simplicity.
+ RecordSimplification();
+ }
+}
+
void InstructionSimplifierVisitor::VisitArrayLength(HArrayLength* instruction) {
HInstruction* input = instruction->InputAt(0);
// If the array is a NewArray with constant size, replace the array length
@@ -388,9 +438,16 @@ void InstructionSimplifierVisitor::VisitMul(HMul* instruction) {
if (Primitive::IsIntOrLongType(type)) {
int64_t factor = Int64FromConstant(input_cst);
- // We expect the `0` case to have been handled in the constant folding pass.
- DCHECK_NE(factor, 0);
- if (IsPowerOfTwo(factor)) {
+ // Even though constant propagation also takes care of the zero case, other
+ // optimizations can lead to having a zero multiplication.
+ if (factor == 0) {
+ // Replace code looking like
+ // MUL dst, src, 0
+ // with
+ // 0
+ instruction->ReplaceWith(input_cst);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else if (IsPowerOfTwo(factor)) {
// Replace code looking like
// MUL dst, src, pow_of_2
// with
@@ -424,7 +481,8 @@ void InstructionSimplifierVisitor::VisitNeg(HNeg* instruction) {
return;
}
- if (input->IsSub() && input->HasOnlyOneNonEnvironmentUse()) {
+ if (input->IsSub() && input->HasOnlyOneNonEnvironmentUse() &&
+ !Primitive::IsFloatingPointType(input->GetType())) {
// Replace code looking like
// SUB tmp, a, b
// NEG dst, tmp
@@ -435,6 +493,7 @@ void InstructionSimplifierVisitor::VisitNeg(HNeg* instruction) {
// worse code. In particular, we do not want the live ranges of `a` and `b`
// to be extended if we are not sure the initial 'SUB' instruction can be
// removed.
+ // We do not perform optimization for fp because we could lose the sign of zero.
HSub* sub = input->AsSub();
HSub* new_sub =
new (GetGraph()->GetArena()) HSub(instruction->GetType(), sub->GetRight(), sub->GetLeft());
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 9a6062fedf..932192e4fd 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -863,7 +863,7 @@ void IntrinsicCodeGeneratorARM::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = locations->InAt(1).AsRegister<Register>();
__ cmp(argument, ShifterOperand(0));
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d3a4e6ca15..117d6a4279 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1007,7 +1007,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = WRegisterFrom(locations->InAt(1));
__ Cmp(argument, 0);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 3c7a2660db..a8e2cdf1f6 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -828,7 +828,7 @@ void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
return;
@@ -962,7 +962,7 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = locations->InAt(1).AsRegister<Register>();
__ testl(argument, argument);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index d9a1c31c77..5d24d1fbfb 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -704,7 +704,6 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
- locations->AddTemp(Location::RequiresFpuRegister());
return;
}
@@ -732,14 +731,12 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
// Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
Label done, nan;
X86_64Assembler* assembler = GetAssembler();
- // Generate 0.5 into inPlusPointFive.
- __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f)));
- __ movd(inPlusPointFive, out, false);
+ // Load 0.5 into inPlusPointFive.
+ __ movss(inPlusPointFive, codegen_->LiteralFloatAddress(0.5f));
// Add in the input.
__ addss(inPlusPointFive, in);
@@ -747,12 +744,8 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
// And truncate to an integer.
__ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
- __ movl(out, Immediate(kPrimIntMax));
- // maxInt = int-to-float(out)
- __ cvtsi2ss(maxInt, out);
-
// if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, maxInt);
+ __ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
__ j(kAboveEqual, &done);
// if input == NaN goto nan
@@ -782,14 +775,12 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
// Implement RoundDouble as t1 = floor(input + 0.5); convert to long.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister maxLong = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
Label done, nan;
X86_64Assembler* assembler = GetAssembler();
- // Generate 0.5 into inPlusPointFive.
- __ movq(out, Immediate(bit_cast<int64_t, double>(0.5)));
- __ movd(inPlusPointFive, out, true);
+ // Load 0.5 into inPlusPointFive.
+ __ movsd(inPlusPointFive, codegen_->LiteralDoubleAddress(0.5));
// Add in the input.
__ addsd(inPlusPointFive, in);
@@ -797,12 +788,8 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
// And truncate to an integer.
__ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1));
- __ movq(out, Immediate(kPrimLongMax));
- // maxLong = long-to-double(out)
- __ cvtsi2sd(maxLong, out, true);
-
// if inPlusPointFive >= maxLong goto done
- __ comisd(inPlusPointFive, maxLong);
+ __ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
__ j(kAboveEqual, &done);
// if input == NaN goto nan
@@ -886,7 +873,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
__ testl(argument, argument);
@@ -960,26 +947,48 @@ static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke)
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(invoke->InputAt(1)));
}
static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
+ Location value = locations->InAt(1);
// x86 allows unaligned access. We do not have to check the input or use specific instructions
// to avoid a SIGBUS.
switch (size) {
case Primitive::kPrimByte:
- __ movb(Address(address, 0), value);
+ if (value.IsConstant()) {
+ __ movb(Address(address, 0),
+ Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant())));
+ } else {
+ __ movb(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
case Primitive::kPrimShort:
- __ movw(Address(address, 0), value);
+ if (value.IsConstant()) {
+ __ movw(Address(address, 0),
+ Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant())));
+ } else {
+ __ movw(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
case Primitive::kPrimInt:
- __ movl(Address(address, 0), value);
+ if (value.IsConstant()) {
+ __ movl(Address(address, 0),
+ Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant())));
+ } else {
+ __ movl(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
case Primitive::kPrimLong:
- __ movq(Address(address, 0), value);
+ if (value.IsConstant()) {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(address, 0), Immediate(v_32));
+ } else {
+ __ movq(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
default:
LOG(FATAL) << "Type not recognized for poke: " << size;
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index de876be9ab..c3a99150c4 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -285,17 +285,26 @@ class Location : public ValueObject {
bool Contains(Location other) const {
if (Equals(other)) {
return true;
- } else if (IsFpuRegisterPair() && other.IsFpuRegister()) {
- return other.reg() == low() || other.reg() == high();
- } else if (IsRegisterPair() && other.IsRegister()) {
- return other.reg() == low() || other.reg() == high();
- } else if (IsDoubleStackSlot() && other.IsStackSlot()) {
- return (GetStackIndex() == other.GetStackIndex())
- || (GetStackIndex() + 4 == other.GetStackIndex());
+ } else if (IsPair() || IsDoubleStackSlot()) {
+ return ToLow().Equals(other) || ToHigh().Equals(other);
}
return false;
}
+ bool OverlapsWith(Location other) const {
+ // Only check the overlapping case that can happen with our register allocation algorithm.
+ bool overlap = Contains(other) || other.Contains(*this);
+ if (kIsDebugBuild && !overlap) {
+ // Note: These are also overlapping cases. But we are not able to handle them in
+ // ParallelMoveResolverWithSwap. Make sure that we do not meet such case with our compiler.
+ if ((IsPair() && other.IsPair()) || (IsDoubleStackSlot() && other.IsDoubleStackSlot())) {
+ DCHECK(!Contains(other.ToLow()));
+ DCHECK(!Contains(other.ToHigh()));
+ }
+ }
+ return overlap;
+ }
+
const char* DebugString() const {
switch (GetKind()) {
case kInvalid: return "I";
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 5fca4fab22..3205b5e991 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -416,26 +416,6 @@ static void UpdateInputsUsers(HInstruction* instruction) {
DCHECK(!instruction->HasEnvironment());
}
-void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
- DCHECK(!cursor->IsPhi());
- DCHECK(!instruction->IsPhi());
- DCHECK_EQ(instruction->GetId(), -1);
- DCHECK_NE(cursor->GetId(), -1);
- DCHECK_EQ(cursor->GetBlock(), this);
- DCHECK(!instruction->IsControlFlow());
- instruction->next_ = cursor;
- instruction->previous_ = cursor->previous_;
- cursor->previous_ = instruction;
- if (GetFirstInstruction() == cursor) {
- instructions_.first_instruction_ = instruction;
- } else {
- instruction->previous_->next_ = instruction;
- }
- instruction->SetBlock(this);
- instruction->SetId(GetGraph()->GetNextInstructionId());
- UpdateInputsUsers(instruction);
-}
-
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
@@ -463,23 +443,27 @@ void HBasicBlock::AddPhi(HPhi* phi) {
Add(&phis_, this, phi);
}
+void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(!cursor->IsPhi());
+ DCHECK(!instruction->IsPhi());
+ DCHECK_EQ(instruction->GetId(), -1);
+ DCHECK_NE(cursor->GetId(), -1);
+ DCHECK_EQ(cursor->GetBlock(), this);
+ DCHECK(!instruction->IsControlFlow());
+ instruction->SetBlock(this);
+ instruction->SetId(GetGraph()->GetNextInstructionId());
+ UpdateInputsUsers(instruction);
+ instructions_.InsertInstructionBefore(instruction, cursor);
+}
+
void HBasicBlock::InsertPhiAfter(HPhi* phi, HPhi* cursor) {
DCHECK_EQ(phi->GetId(), -1);
DCHECK_NE(cursor->GetId(), -1);
DCHECK_EQ(cursor->GetBlock(), this);
- if (cursor->next_ == nullptr) {
- cursor->next_ = phi;
- phi->previous_ = cursor;
- DCHECK(phi->next_ == nullptr);
- } else {
- phi->next_ = cursor->next_;
- phi->previous_ = cursor;
- cursor->next_ = phi;
- phi->next_->previous_ = phi;
- }
phi->SetBlock(this);
phi->SetId(GetGraph()->GetNextInstructionId());
UpdateInputsUsers(phi);
+ phis_.InsertInstructionAfter(phi, cursor);
}
static void Remove(HInstructionList* instruction_list,
@@ -546,6 +530,34 @@ void HInstructionList::AddInstruction(HInstruction* instruction) {
}
}
+void HInstructionList::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(Contains(cursor));
+ if (cursor == first_instruction_) {
+ cursor->previous_ = instruction;
+ instruction->next_ = cursor;
+ first_instruction_ = instruction;
+ } else {
+ instruction->previous_ = cursor->previous_;
+ instruction->next_ = cursor;
+ cursor->previous_ = instruction;
+ instruction->previous_->next_ = instruction;
+ }
+}
+
+void HInstructionList::InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(Contains(cursor));
+ if (cursor == last_instruction_) {
+ cursor->next_ = instruction;
+ instruction->previous_ = cursor;
+ last_instruction_ = instruction;
+ } else {
+ instruction->next_ = cursor->next_;
+ instruction->previous_ = cursor;
+ cursor->next_ = instruction;
+ instruction->next_->previous_ = instruction;
+ }
+}
+
void HInstructionList::RemoveInstruction(HInstruction* instruction) {
if (instruction->previous_ != nullptr) {
instruction->previous_->next_ = instruction->next_;
@@ -660,6 +672,11 @@ void HPhi::AddInput(HInstruction* input) {
input->AddUseAt(this, inputs_.Size() - 1);
}
+void HPhi::RemoveInputAt(size_t index) {
+ RemoveAsUserOfInput(index);
+ inputs_.DeleteAt(index);
+}
+
#define DEFINE_ACCEPT(name, super) \
void H##name::Accept(HGraphVisitor* visitor) { \
visitor->Visit##name(this); \
@@ -702,7 +719,7 @@ HConstant* HUnaryOperation::TryStaticEvaluation() const {
// TODO: Implement static evaluation of long unary operations.
//
// Do not exit with a fatal condition here. Instead, simply
- // return `nullptr' to notify the caller that this instruction
+ // return `null' to notify the caller that this instruction
// cannot (yet) be statically evaluated.
return nullptr;
}
@@ -738,7 +755,7 @@ HConstant* HBinaryOperation::GetConstantRight() const {
}
// If `GetConstantRight()` returns one of the input, this returns the other
-// one. Otherwise it returns nullptr.
+// one. Otherwise it returns null.
HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
HInstruction* most_constant_right = GetConstantRight();
if (most_constant_right == nullptr) {
@@ -855,6 +872,15 @@ bool HBasicBlock::HasSinglePhi() const {
return !GetPhis().IsEmpty() && GetFirstPhi()->GetNext() == nullptr;
}
+size_t HInstructionList::CountSize() const {
+ size_t size = 0;
+ HInstruction* current = first_instruction_;
+ for (; current != nullptr; current = current->GetNext()) {
+ size++;
+ }
+ return size;
+}
+
void HInstructionList::SetBlockOfInstructions(HBasicBlock* block) const {
for (HInstruction* current = first_instruction_;
current != nullptr;
@@ -886,40 +912,167 @@ void HInstructionList::Add(const HInstructionList& instruction_list) {
}
}
-void HBasicBlock::DisconnectFromAll() {
- DCHECK(dominated_blocks_.IsEmpty()) << "Unimplemented scenario";
+void HBasicBlock::DisconnectAndDelete() {
+ // Dominators must be removed after all the blocks they dominate. This way
+ // a loop header is removed last, a requirement for correct loop information
+ // iteration.
+ DCHECK(dominated_blocks_.IsEmpty());
+
+ // Remove the block from all loops it is included in.
+ for (HLoopInformationOutwardIterator it(*this); !it.Done(); it.Advance()) {
+ HLoopInformation* loop_info = it.Current();
+ loop_info->Remove(this);
+ if (loop_info->IsBackEdge(*this)) {
+ // This deliberately leaves the loop in an inconsistent state and will
+ // fail SSAChecker unless the entire loop is removed during the pass.
+ loop_info->RemoveBackEdge(this);
+ }
+ }
+ // Disconnect the block from its predecessors and update their control-flow
+ // instructions.
for (size_t i = 0, e = predecessors_.Size(); i < e; ++i) {
- predecessors_.Get(i)->successors_.Delete(this);
+ HBasicBlock* predecessor = predecessors_.Get(i);
+ HInstruction* last_instruction = predecessor->GetLastInstruction();
+ predecessor->RemoveInstruction(last_instruction);
+ predecessor->RemoveSuccessor(this);
+ if (predecessor->GetSuccessors().Size() == 1u) {
+ DCHECK(last_instruction->IsIf());
+ predecessor->AddInstruction(new (graph_->GetArena()) HGoto());
+ } else {
+ // The predecessor has no remaining successors and therefore must be dead.
+ // We deliberately leave it without a control-flow instruction so that the
+ // SSAChecker fails unless it is not removed during the pass too.
+ DCHECK_EQ(predecessor->GetSuccessors().Size(), 0u);
+ }
}
+ predecessors_.Reset();
+
+ // Disconnect the block from its successors and update their dominators
+ // and phis.
for (size_t i = 0, e = successors_.Size(); i < e; ++i) {
- successors_.Get(i)->predecessors_.Delete(this);
- }
- dominator_->dominated_blocks_.Delete(this);
+ HBasicBlock* successor = successors_.Get(i);
+ // Delete this block from the list of predecessors.
+ size_t this_index = successor->GetPredecessorIndexOf(this);
+ successor->predecessors_.DeleteAt(this_index);
+
+ // Check that `successor` has other predecessors, otherwise `this` is the
+ // dominator of `successor` which violates the order DCHECKed at the top.
+ DCHECK(!successor->predecessors_.IsEmpty());
+
+ // Recompute the successor's dominator.
+ HBasicBlock* old_dominator = successor->GetDominator();
+ HBasicBlock* new_dominator = successor->predecessors_.Get(0);
+ for (size_t j = 1, f = successor->predecessors_.Size(); j < f; ++j) {
+ new_dominator = graph_->FindCommonDominator(
+ new_dominator, successor->predecessors_.Get(j));
+ }
+ if (old_dominator != new_dominator) {
+ successor->SetDominator(new_dominator);
+ old_dominator->RemoveDominatedBlock(successor);
+ new_dominator->AddDominatedBlock(successor);
+ }
- predecessors_.Reset();
+ // Remove this block's entries in the successor's phis.
+ if (successor->predecessors_.Size() == 1u) {
+ // The successor has just one predecessor left. Replace phis with the only
+ // remaining input.
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* phi = phi_it.Current()->AsPhi();
+ phi->ReplaceWith(phi->InputAt(1 - this_index));
+ successor->RemovePhi(phi);
+ }
+ } else {
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
+ }
+ }
+ }
successors_.Reset();
- dominator_ = nullptr;
- graph_ = nullptr;
+
+ // Disconnect from the dominator.
+ dominator_->RemoveDominatedBlock(this);
+ SetDominator(nullptr);
+
+ // Delete from the graph. The function safely deletes remaining instructions
+ // and updates the reverse post order.
+ graph_->DeleteDeadBlock(this);
+ SetGraph(nullptr);
}
void HBasicBlock::MergeWith(HBasicBlock* other) {
- DCHECK(successors_.IsEmpty()) << "Unimplemented block merge scenario";
- DCHECK(dominated_blocks_.IsEmpty()
- || (dominated_blocks_.Size() == 1 && dominated_blocks_.Get(0) == other))
- << "Unimplemented block merge scenario";
+ DCHECK_EQ(GetGraph(), other->GetGraph());
+ DCHECK(GetDominatedBlocks().Contains(other));
+ DCHECK_EQ(GetSuccessors().Size(), 1u);
+ DCHECK_EQ(GetSuccessors().Get(0), other);
+ DCHECK_EQ(other->GetPredecessors().Size(), 1u);
+ DCHECK_EQ(other->GetPredecessors().Get(0), this);
DCHECK(other->GetPhis().IsEmpty());
+ // Move instructions from `other` to `this`.
+ DCHECK(EndsWithControlFlowInstruction());
+ RemoveInstruction(GetLastInstruction());
+ instructions_.Add(other->GetInstructions());
+ other->instructions_.SetBlockOfInstructions(this);
+ other->instructions_.Clear();
+
+ // Remove `other` from the loops it is included in.
+ for (HLoopInformationOutwardIterator it(*other); !it.Done(); it.Advance()) {
+ HLoopInformation* loop_info = it.Current();
+ loop_info->Remove(other);
+ if (loop_info->IsBackEdge(*other)) {
+ loop_info->ClearBackEdges();
+ loop_info->AddBackEdge(this);
+ }
+ }
+
+ // Update links to the successors of `other`.
successors_.Reset();
- dominated_blocks_.Reset();
+ while (!other->successors_.IsEmpty()) {
+ HBasicBlock* successor = other->successors_.Get(0);
+ successor->ReplacePredecessor(other, this);
+ }
+
+ // Update the dominator tree.
+ dominated_blocks_.Delete(other);
+ for (size_t i = 0, e = other->GetDominatedBlocks().Size(); i < e; ++i) {
+ HBasicBlock* dominated = other->GetDominatedBlocks().Get(i);
+ dominated_blocks_.Add(dominated);
+ dominated->SetDominator(this);
+ }
+ other->dominated_blocks_.Reset();
+ other->dominator_ = nullptr;
+
+ // Clear the list of predecessors of `other` in preparation of deleting it.
+ other->predecessors_.Reset();
+
+ // Delete `other` from the graph. The function updates reverse post order.
+ graph_->DeleteDeadBlock(other);
+ other->SetGraph(nullptr);
+}
+
+void HBasicBlock::MergeWithInlined(HBasicBlock* other) {
+ DCHECK_NE(GetGraph(), other->GetGraph());
+ DCHECK(GetDominatedBlocks().IsEmpty());
+ DCHECK(GetSuccessors().IsEmpty());
+ DCHECK(!EndsWithControlFlowInstruction());
+ DCHECK_EQ(other->GetPredecessors().Size(), 1u);
+ DCHECK(other->GetPredecessors().Get(0)->IsEntryBlock());
+ DCHECK(other->GetPhis().IsEmpty());
+ DCHECK(!other->IsInLoop());
+
+ // Move instructions from `other` to `this`.
instructions_.Add(other->GetInstructions());
- other->GetInstructions().SetBlockOfInstructions(this);
+ other->instructions_.SetBlockOfInstructions(this);
- while (!other->GetSuccessors().IsEmpty()) {
- HBasicBlock* successor = other->GetSuccessors().Get(0);
+ // Update links to the successors of `other`.
+ successors_.Reset();
+ while (!other->successors_.IsEmpty()) {
+ HBasicBlock* successor = other->successors_.Get(0);
successor->ReplacePredecessor(other, this);
}
+ // Update the dominator tree.
for (size_t i = 0, e = other->GetDominatedBlocks().Size(); i < e; ++i) {
HBasicBlock* dominated = other->GetDominatedBlocks().Get(i);
dominated_blocks_.Add(dominated);
@@ -961,6 +1114,24 @@ static void MakeRoomFor(GrowableArray<HBasicBlock*>* blocks,
}
}
+void HGraph::DeleteDeadBlock(HBasicBlock* block) {
+ DCHECK_EQ(block->GetGraph(), this);
+ DCHECK(block->GetSuccessors().IsEmpty());
+ DCHECK(block->GetPredecessors().IsEmpty());
+ DCHECK(block->GetDominatedBlocks().IsEmpty());
+ DCHECK(block->GetDominator() == nullptr);
+
+ for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ block->RemoveInstruction(it.Current());
+ }
+ for (HBackwardInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ block->RemovePhi(it.Current()->AsPhi());
+ }
+
+ reverse_post_order_.Delete(block);
+ blocks_.Put(block->GetBlockId(), nullptr);
+}
+
void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
if (GetBlocks().Size() == 3) {
// Simple case of an entry block, a body block, and an exit block.
@@ -993,7 +1164,7 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
HBasicBlock* first = entry_block_->GetSuccessors().Get(0);
DCHECK(!first->IsInLoop());
- at->MergeWith(first);
+ at->MergeWithInlined(first);
exit_block_->ReplaceWith(to);
// Update all predecessors of the exit block (now the `to` block)
@@ -1064,8 +1235,10 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
outer_graph->AddBlock(current);
outer_graph->reverse_post_order_.Put(++index_of_at, current);
if (info != nullptr) {
- info->Add(current);
current->SetLoopInformation(info);
+ for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
+ loop_it.Current()->Add(current);
+ }
}
}
}
@@ -1075,8 +1248,10 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
outer_graph->AddBlock(to);
outer_graph->reverse_post_order_.Put(++index_of_at, to);
if (info != nullptr) {
- info->Add(to);
to->SetLoopInformation(info);
+ for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
+ loop_it.Current()->Add(to);
+ }
if (info->IsBackEdge(*at)) {
// Only `at` can become a back edge, as the inlined blocks
// are predecessors of `at`.
@@ -1121,53 +1296,6 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
invoke->GetBlock()->RemoveInstruction(invoke);
}
-void HGraph::MergeEmptyBranches(HBasicBlock* start_block, HBasicBlock* end_block) {
- // Find the two branches of an If.
- DCHECK_EQ(start_block->GetSuccessors().Size(), 2u);
- HBasicBlock* left_branch = start_block->GetSuccessors().Get(0);
- HBasicBlock* right_branch = start_block->GetSuccessors().Get(1);
-
- // Make sure this is a diamond control-flow path.
- DCHECK_EQ(left_branch->GetSuccessors().Get(0), end_block);
- DCHECK_EQ(right_branch->GetSuccessors().Get(0), end_block);
- DCHECK_EQ(end_block->GetPredecessors().Size(), 2u);
- DCHECK_EQ(start_block, end_block->GetDominator());
-
- // Disconnect the branches and merge the two blocks. This will move
- // all instructions from 'end_block' to 'start_block'.
- DCHECK(left_branch->IsSingleGoto());
- DCHECK(right_branch->IsSingleGoto());
- left_branch->DisconnectFromAll();
- right_branch->DisconnectFromAll();
- start_block->RemoveInstruction(start_block->GetLastInstruction());
- start_block->MergeWith(end_block);
-
- // Delete the now redundant blocks from the graph.
- blocks_.Put(left_branch->GetBlockId(), nullptr);
- blocks_.Put(right_branch->GetBlockId(), nullptr);
- blocks_.Put(end_block->GetBlockId(), nullptr);
-
- // Update reverse post order.
- reverse_post_order_.Delete(left_branch);
- reverse_post_order_.Delete(right_branch);
- reverse_post_order_.Delete(end_block);
-
- // Update loops which contain the code.
- for (HLoopInformationOutwardIterator it(*start_block); !it.Done(); it.Advance()) {
- HLoopInformation* loop_info = it.Current();
- DCHECK(loop_info->Contains(*left_branch));
- DCHECK(loop_info->Contains(*right_branch));
- DCHECK(loop_info->Contains(*end_block));
- loop_info->Remove(left_branch);
- loop_info->Remove(right_branch);
- loop_info->Remove(end_block);
- if (loop_info->IsBackEdge(*end_block)) {
- loop_info->RemoveBackEdge(end_block);
- loop_info->AddBackEdge(start_block);
- }
- }
-}
-
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) {
ScopedObjectAccess soa(Thread::Current());
os << "["
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 649038b532..18a8225f55 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -19,6 +19,7 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "dex/compiler_enums.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
#include "handle_scope.h"
@@ -74,6 +75,10 @@ class HInstructionList {
void AddInstruction(HInstruction* instruction);
void RemoveInstruction(HInstruction* instruction);
+ // Insert `instruction` before/after an existing instruction `cursor`.
+ void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
+ void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
+
// Return true if this list contains `instruction`.
bool Contains(HInstruction* instruction) const;
@@ -92,6 +97,9 @@ class HInstructionList {
void AddAfter(HInstruction* cursor, const HInstructionList& instruction_list);
void Add(const HInstructionList& instruction_list);
+ // Return the number of instructions in the list. This is an expensive operation.
+ size_t CountSize() const;
+
private:
HInstruction* first_instruction_;
HInstruction* last_instruction_;
@@ -163,7 +171,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
// Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
void InlineInto(HGraph* outer_graph, HInvoke* invoke);
- void MergeEmptyBranches(HBasicBlock* start_block, HBasicBlock* end_block);
+ // Removes `block` from the graph.
+ void DeleteDeadBlock(HBasicBlock* block);
void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
void SimplifyLoop(HBasicBlock* header);
@@ -243,8 +252,9 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
return CreateConstant(value, &cached_long_constants_);
}
- private:
HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const;
+
+ private:
void VisitBlockForDominatorTree(HBasicBlock* block,
HBasicBlock* predecessor,
GrowableArray<size_t>* visits);
@@ -446,6 +456,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
HBasicBlock* GetDominator() const { return dominator_; }
void SetDominator(HBasicBlock* dominator) { dominator_ = dominator; }
void AddDominatedBlock(HBasicBlock* block) { dominated_blocks_.Add(block); }
+ void RemoveDominatedBlock(HBasicBlock* block) { dominated_blocks_.Delete(block); }
void ReplaceDominatedBlock(HBasicBlock* existing, HBasicBlock* new_block) {
for (size_t i = 0, e = dominated_blocks_.Size(); i < e; ++i) {
if (dominated_blocks_.Get(i) == existing) {
@@ -466,8 +477,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
HInstruction* GetFirstInstruction() const { return instructions_.first_instruction_; }
HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; }
const HInstructionList& GetInstructions() const { return instructions_; }
- const HInstructionList& GetPhis() const { return phis_; }
HInstruction* GetFirstPhi() const { return phis_.first_instruction_; }
+ HInstruction* GetLastPhi() const { return phis_.last_instruction_; }
+ const HInstructionList& GetPhis() const { return phis_; }
void AddSuccessor(HBasicBlock* block) {
successors_.Add(block);
@@ -544,7 +556,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
// that this method does not update the graph, reverse post order, loop
// information, nor make sure the blocks are consistent (for example ending
// with a control flow instruction).
- void MergeWith(HBasicBlock* other);
+ void MergeWithInlined(HBasicBlock* other);
// Replace `this` with `other`. Predecessors, successors, and dominated blocks
// of `this` are moved to `other`.
@@ -553,12 +565,17 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
// with a control flow instruction).
void ReplaceWith(HBasicBlock* other);
- // Disconnects `this` from all its predecessors, successors and the dominator.
- // It assumes that `this` does not dominate any blocks.
- // Note that this method does not update the graph, reverse post order, loop
- // information, nor make sure the blocks are consistent (for example ending
- // with a control flow instruction).
- void DisconnectFromAll();
+ // Merge `other` at the end of `this`. This method updates loops, reverse post
+ // order, links to predecessors, successors, dominators and deletes the block
+ // from the graph. The two blocks must be successive, i.e. `this` the only
+ // predecessor of `other` and vice versa.
+ void MergeWith(HBasicBlock* other);
+
+ // Disconnects `this` from all its predecessors, successors and dominator,
+ // removes it from all loops it is included in and eventually from the graph.
+ // The block must not dominate any other block. Predecessors and successors
+ // are safely updated.
+ void DisconnectAndDelete();
void AddInstruction(HInstruction* instruction);
void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
@@ -718,6 +735,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(LoadString, Instruction) \
M(Local, Instruction) \
M(LongConstant, Constant) \
+ M(MemoryBarrier, Instruction) \
M(MonitorOperation, Instruction) \
M(Mul, BinaryOperation) \
M(Neg, UnaryOperation) \
@@ -908,6 +926,12 @@ class HUserRecord : public ValueObject {
HUseListNode<T>* use_node_;
};
+// TODO: Add better documentation to this class and maybe refactor with more suggestive names.
+// - Has(All)SideEffects suggests that all the side effects are present but only ChangesSomething
+// flag is consider.
+// - DependsOn suggests that there is a real dependency between side effects but it only
+// checks DependendsOnSomething flag.
+//
// Represents the side effects an instruction may have.
class SideEffects : public ValueObject {
public:
@@ -1141,8 +1165,6 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
virtual bool CanThrow() const { return false; }
bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
- virtual bool ActAsNullConstant() const { return false; }
-
// Does not apply for all instructions, but having this at top level greatly
// simplifies the null check elimination.
virtual bool CanBeNull() const {
@@ -1150,7 +1172,10 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
return true;
}
- virtual bool CanDoImplicitNullCheck() const { return false; }
+ virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const {
+ UNUSED(obj);
+ return false;
+ }
void SetReferenceTypeInfo(ReferenceTypeInfo reference_type_info) {
DCHECK_EQ(GetType(), Primitive::kPrimNot);
@@ -1618,7 +1643,7 @@ class HUnaryOperation : public HExpression<1> {
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x`.
@@ -1686,7 +1711,7 @@ class HBinaryOperation : public HExpression<2> {
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x` and `y`.
@@ -1694,11 +1719,11 @@ class HBinaryOperation : public HExpression<2> {
virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
// Returns an input that can legally be used as the right input and is
- // constant, or nullptr.
+ // constant, or null.
HConstant* GetConstantRight() const;
// If `GetConstantRight()` returns one of the input, this returns the other
- // one. Otherwise it returns nullptr.
+ // one. Otherwise it returns null.
HInstruction* GetLeastConstantLeft() const;
DECLARE_INSTRUCTION(BinaryOperation);
@@ -2064,8 +2089,6 @@ class HNullConstant : public HConstant {
size_t ComputeHashCode() const OVERRIDE { return 0; }
- bool ActAsNullConstant() const OVERRIDE { return true; }
-
DECLARE_INSTRUCTION(NullConstant);
private:
@@ -2087,11 +2110,6 @@ class HIntConstant : public HConstant {
size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
- // TODO: Null is represented by the `0` constant. In most cases we replace it
- // with a HNullConstant but we don't do it when comparing (a != null). This
- // method is an workaround until we fix the above.
- bool ActAsNullConstant() const OVERRIDE { return value_ == 0; }
-
bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
bool IsZero() const OVERRIDE { return GetValue() == 0; }
bool IsOne() const OVERRIDE { return GetValue() == 1; }
@@ -2105,7 +2123,7 @@ class HIntConstant : public HConstant {
friend class HGraph;
ART_FRIEND_TEST(GraphTest, InsertInstructionBefore);
- ART_FRIEND_TEST(ParallelMoveTest, ConstantLast);
+ ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
@@ -2162,7 +2180,7 @@ class HInvoke : public HInstruction {
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
- Intrinsics GetIntrinsic() {
+ Intrinsics GetIntrinsic() const {
return intrinsic_;
}
@@ -2217,7 +2235,8 @@ class HInvokeStaticOrDirect : public HInvoke {
invoke_type_(invoke_type),
is_recursive_(is_recursive) {}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ UNUSED(obj);
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
@@ -2249,9 +2268,9 @@ class HInvokeVirtual : public HInvoke {
: HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
vtable_index_(vtable_index) {}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
// TODO: Add implicit null checks in intrinsics.
- return !GetLocations()->Intrinsified();
+ return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
}
uint32_t GetVTableIndex() const { return vtable_index_; }
@@ -2275,9 +2294,9 @@ class HInvokeInterface : public HInvoke {
: HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
imt_index_(imt_index) {}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
// TODO: Add implicit null checks in intrinsics.
- return !GetLocations()->Intrinsified();
+ return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
}
uint32_t GetImtIndex() const { return imt_index_; }
@@ -2738,6 +2757,7 @@ class HPhi : public HInstruction {
size_t InputCount() const OVERRIDE { return inputs_.Size(); }
void AddInput(HInstruction* input);
+ void RemoveInputAt(size_t index);
Primitive::Type GetType() const OVERRIDE { return type_; }
void SetType(Primitive::Type type) { type_ = type; }
@@ -2847,8 +2867,8 @@ class HInstanceFieldGet : public HExpression<1> {
return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
- return GetFieldOffset().Uint32Value() < kPageSize;
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize;
}
size_t ComputeHashCode() const OVERRIDE {
@@ -2881,8 +2901,8 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
- return GetFieldOffset().Uint32Value() < kPageSize;
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize;
}
const FieldInfo& GetFieldInfo() const { return field_info_; }
@@ -2912,7 +2932,8 @@ class HArrayGet : public HExpression<2> {
UNUSED(other);
return true;
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ UNUSED(obj);
// TODO: We can be smarter here.
// Currently, the array access is always preceded by an ArrayLength or a NullCheck
// which generates the implicit null check. There are cases when these can be removed
@@ -2954,7 +2975,8 @@ class HArraySet : public HTemplateInstruction<3> {
return needs_type_check_;
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ UNUSED(obj);
// TODO: Same as for ArrayGet.
return false;
}
@@ -3006,7 +3028,9 @@ class HArrayLength : public HExpression<1> {
UNUSED(other);
return true;
}
- bool CanDoImplicitNullCheck() const OVERRIDE { return true; }
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ return obj == InputAt(0);
+ }
DECLARE_INSTRUCTION(ArrayLength);
@@ -3343,6 +3367,7 @@ class HInstanceOf : public HExpression<2> {
uint32_t dex_pc)
: HExpression(Primitive::kPrimBoolean, SideEffects::None()),
class_is_final_(class_is_final),
+ must_do_null_check_(true),
dex_pc_(dex_pc) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
@@ -3362,10 +3387,15 @@ class HInstanceOf : public HExpression<2> {
bool IsClassFinal() const { return class_is_final_; }
+ // Used only in code generation.
+ bool MustDoNullCheck() const { return must_do_null_check_; }
+ void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+
DECLARE_INSTRUCTION(InstanceOf);
private:
const bool class_is_final_;
+ bool must_do_null_check_;
const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
@@ -3406,6 +3436,7 @@ class HCheckCast : public HTemplateInstruction<2> {
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::None()),
class_is_final_(class_is_final),
+ must_do_null_check_(true),
dex_pc_(dex_pc) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
@@ -3424,6 +3455,9 @@ class HCheckCast : public HTemplateInstruction<2> {
bool CanThrow() const OVERRIDE { return true; }
+ bool MustDoNullCheck() const { return must_do_null_check_; }
+ void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+
uint32_t GetDexPc() const { return dex_pc_; }
bool IsClassFinal() const { return class_is_final_; }
@@ -3432,11 +3466,28 @@ class HCheckCast : public HTemplateInstruction<2> {
private:
const bool class_is_final_;
+ bool must_do_null_check_;
const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HCheckCast);
};
+class HMemoryBarrier : public HTemplateInstruction<0> {
+ public:
+ explicit HMemoryBarrier(MemBarrierKind barrier_kind)
+ : HTemplateInstruction(SideEffects::None()),
+ barrier_kind_(barrier_kind) {}
+
+ MemBarrierKind GetBarrierKind() { return barrier_kind_; }
+
+ DECLARE_INSTRUCTION(MemoryBarrier);
+
+ private:
+ const MemBarrierKind barrier_kind_;
+
+ DISALLOW_COPY_AND_ASSIGN(HMemoryBarrier);
+};
+
class HMonitorOperation : public HTemplateInstruction<1> {
public:
enum OperationKind {
@@ -3502,7 +3553,7 @@ class MoveOperands : public ArenaObject<kArenaAllocMisc> {
// True if this blocks a move from the given location.
bool Blocks(Location loc) const {
- return !IsEliminated() && (source_.Contains(loc) || loc.Contains(source_));
+ return !IsEliminated() && source_.OverlapsWith(loc);
}
// A move is redundant if it's been eliminated, if its source and
@@ -3571,8 +3622,8 @@ class HParallelMove : public HTemplateInstruction<0> {
}
}
for (size_t i = 0, e = moves_.Size(); i < e; ++i) {
- DCHECK(!destination.Equals(moves_.Get(i).GetDestination()))
- << "Same destination for two moves in a parallel move.";
+ DCHECK(!destination.OverlapsWith(moves_.Get(i).GetDestination()))
+ << "Overlapped destination for two moves in a parallel move.";
}
}
moves_.Add(MoveOperands(source, destination, type, instruction));
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index b13e07eb22..c46a21955c 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -21,9 +21,9 @@
namespace art {
-void HOptimization::MaybeRecordStat(MethodCompilationStat compilation_stat) const {
+void HOptimization::MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count) const {
if (stats_ != nullptr) {
- stats_->RecordStat(compilation_stat);
+ stats_->RecordStat(compilation_stat, count);
}
}
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 8b2028177b..ccf8de9f6a 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -48,7 +48,7 @@ class HOptimization : public ValueObject {
void Check();
protected:
- void MaybeRecordStat(MethodCompilationStat compilation_stat) const;
+ void MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count = 1) const;
HGraph* const graph_;
// Used to record stats about the optimization.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a95696a468..05451bcaa6 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -208,6 +208,12 @@ class OptimizingCompiler FINAL : public Compiler {
void UnInit() const OVERRIDE;
+ void MaybeRecordStat(MethodCompilationStat compilation_stat) const {
+ if (compilation_stats_.get() != nullptr) {
+ compilation_stats_->RecordStat(compilation_stat);
+ }
+ }
+
private:
// Whether we should run any optimization or register allocation. If false, will
// just run the code generation after the graph was built.
@@ -226,7 +232,7 @@ class OptimizingCompiler FINAL : public Compiler {
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit) const;
- mutable OptimizingCompilerStats compilation_stats_;
+ std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
std::unique_ptr<std::ostream> visualizer_output_;
@@ -243,7 +249,6 @@ OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
run_optimizations_(
(driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime)
&& !driver->GetCompilerOptions().GetDebuggable()),
- compilation_stats_(),
delegate_(Create(driver, Compiler::Kind::kQuick)) {}
void OptimizingCompiler::Init() {
@@ -258,6 +263,9 @@ void OptimizingCompiler::Init() {
<< "Invoke the compiler with '-j1'.";
visualizer_output_.reset(new std::ofstream(cfg_file_name));
}
+ if (driver->GetDumpStats()) {
+ compilation_stats_.reset(new OptimizingCompilerStats());
+ }
}
void OptimizingCompiler::UnInit() const {
@@ -265,7 +273,9 @@ void OptimizingCompiler::UnInit() const {
}
OptimizingCompiler::~OptimizingCompiler() {
- compilation_stats_.Log();
+ if (compilation_stats_.get() != nullptr) {
+ compilation_stats_->Log();
+ }
}
void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
@@ -310,10 +320,11 @@ static void RunOptimizations(HGraph* graph,
const DexCompilationUnit& dex_compilation_unit,
PassInfoPrinter* pass_info_printer,
StackHandleScopeCollection* handles) {
- HDeadCodeElimination dce(graph);
+ HDeadCodeElimination dce1(graph, stats);
+ HDeadCodeElimination dce2(graph, stats, "dead_code_elimination_final");
HConstantFolding fold1(graph);
InstructionSimplifier simplify1(graph, stats);
- HBooleanSimplifier boolean_not(graph);
+ HBooleanSimplifier boolean_simplify(graph);
HInliner inliner(graph, dex_compilation_unit, dex_compilation_unit, driver, stats);
@@ -329,20 +340,21 @@ static void RunOptimizations(HGraph* graph,
HOptimization* optimizations[] = {
&intrinsics,
- &dce,
+ &dce1,
&fold1,
&simplify1,
+ &inliner,
// BooleanSimplifier depends on the InstructionSimplifier removing redundant
// suspend checks to recognize empty blocks.
- &boolean_not,
- &inliner,
+ &boolean_simplify,
&fold2,
&side_effects,
&gvn,
&licm,
&bce,
&type_propagation,
- &simplify2
+ &simplify2,
+ &dce2,
};
RunOptimizations(optimizations, arraysize(optimizations), pass_info_printer);
@@ -381,7 +393,7 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
const DexCompilationUnit& dex_compilation_unit,
PassInfoPrinter* pass_info_printer) const {
StackHandleScopeCollection handles(Thread::Current());
- RunOptimizations(graph, compiler_driver, &compilation_stats_,
+ RunOptimizations(graph, compiler_driver, compilation_stats_.get(),
dex_file, dex_compilation_unit, pass_info_printer, &handles);
AllocateRegisters(graph, codegen, pass_info_printer);
@@ -397,7 +409,7 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
std::vector<uint8_t> stack_map;
codegen->BuildStackMaps(&stack_map);
- compilation_stats_.RecordStat(MethodCompilationStat::kCompiledOptimized);
+ MaybeRecordStat(MethodCompilationStat::kCompiledOptimized);
return CompiledMethod::SwapAllocCompiledMethod(
compiler_driver,
@@ -435,7 +447,7 @@ CompiledMethod* OptimizingCompiler::CompileBaseline(
std::vector<uint8_t> gc_map;
codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
- compilation_stats_.RecordStat(MethodCompilationStat::kCompiledBaseline);
+ MaybeRecordStat(MethodCompilationStat::kCompiledBaseline);
return CompiledMethod::SwapAllocCompiledMethod(
compiler_driver,
codegen->GetInstructionSet(),
@@ -463,7 +475,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
const DexFile& dex_file) const {
UNUSED(invoke_type);
std::string method_name = PrettyMethod(method_idx, dex_file);
- compilation_stats_.RecordStat(MethodCompilationStat::kAttemptCompilation);
+ MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
InstructionSet instruction_set = compiler_driver->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
@@ -474,12 +486,12 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa);
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa);
return nullptr;
}
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledPathological);
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological);
return nullptr;
}
@@ -489,7 +501,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace)
&& (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledSpaceFilter);
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter);
return nullptr;
}
@@ -514,7 +526,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
compiler_driver->GetCompilerOptions()));
if (codegen.get() == nullptr) {
CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler";
- compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
codegen->GetAssembler()->cfi().SetEnabled(
@@ -531,7 +543,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
&dex_compilation_unit,
&dex_file,
compiler_driver,
- &compilation_stats_);
+ compilation_stats_.get());
VLOG(compiler) << "Building " << method_name;
@@ -558,7 +570,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
if (!graph->TryBuildingSsa()) {
// We could not transform the graph to SSA, bailout.
LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop";
- compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
return nullptr;
}
}
@@ -576,11 +588,11 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
VLOG(compiler) << "Compile baseline " << method_name;
if (!run_optimizations_) {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedDisabled);
+ MaybeRecordStat(MethodCompilationStat::kNotOptimizedDisabled);
} else if (!can_optimize) {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedTryCatch);
+ MaybeRecordStat(MethodCompilationStat::kNotOptimizedTryCatch);
} else if (!can_allocate_registers) {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator);
+ MaybeRecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator);
}
return CompileBaseline(codegen.get(), compiler_driver, dex_compilation_unit);
@@ -603,9 +615,9 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
method_idx, jclass_loader, dex_file);
} else {
if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
} else {
- compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
}
}
@@ -616,7 +628,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
jclass_loader, dex_file);
if (method != nullptr) {
- compilation_stats_.RecordStat(MethodCompilationStat::kCompiledQuick);
+ MaybeRecordStat(MethodCompilationStat::kCompiledQuick);
}
return method;
}
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index d4a936d1c3..65c84e6942 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -29,6 +29,7 @@ enum MethodCompilationStat {
kCompiledBaseline,
kCompiledOptimized,
kCompiledQuick,
+ kInstructionSimplifications,
kInlinedInvoke,
kNotCompiledUnsupportedIsa,
kNotCompiledPathological,
@@ -48,8 +49,8 @@ enum MethodCompilationStat {
kNotCompiledVerifyAtRuntime,
kNotCompiledClassNotVerified,
kRemovedCheckedCast,
+ kRemovedDeadInstruction,
kRemovedNullCheck,
- kInstructionSimplifications,
kLastStat
};
@@ -57,8 +58,8 @@ class OptimizingCompilerStats {
public:
OptimizingCompilerStats() {}
- void RecordStat(MethodCompilationStat stat) {
- compile_stats_[stat]++;
+ void RecordStat(MethodCompilationStat stat, size_t count = 1) {
+ compile_stats_[stat] += count;
}
void Log() const {
@@ -82,7 +83,7 @@ class OptimizingCompilerStats {
for (int i = 0; i < kLastStat; i++) {
if (compile_stats_[i] != 0) {
- VLOG(compiler) << PrintMethodCompilationStat(i) << ": " << compile_stats_[i];
+ LOG(INFO) << PrintMethodCompilationStat(i) << ": " << compile_stats_[i];
}
}
}
@@ -96,6 +97,7 @@ class OptimizingCompilerStats {
case kCompiledOptimized : return "kCompiledOptimized";
case kCompiledQuick : return "kCompiledQuick";
case kInlinedInvoke : return "kInlinedInvoke";
+ case kInstructionSimplifications: return "kInstructionSimplifications";
case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
case kNotCompiledPathological : return "kNotCompiledPathological";
case kNotCompiledHugeMethod : return "kNotCompiledHugeMethod";
@@ -114,8 +116,8 @@ class OptimizingCompilerStats {
case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime";
case kNotCompiledClassNotVerified : return "kNotCompiledClassNotVerified";
case kRemovedCheckedCast: return "kRemovedCheckedCast";
+ case kRemovedDeadInstruction: return "kRemovedDeadInstruction";
case kRemovedNullCheck: return "kRemovedNullCheck";
- case kInstructionSimplifications: return "kInstructionSimplifications";
default: LOG(FATAL) << "invalid stat";
}
return "";
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index ad92ca59a1..54ea6f19d4 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -17,11 +17,23 @@
#include "parallel_move_resolver.h"
#include "nodes.h"
-#include "locations.h"
namespace art {
-void ParallelMoveResolver::EmitNativeCode(HParallelMove* parallel_move) {
+void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ for (size_t i = 0; i < parallel_move->NumMoves(); ++i) {
+ MoveOperands* move = parallel_move->MoveOperandsAt(i);
+ if (!move->IsRedundant()) {
+ moves_.Add(move);
+ }
+ }
+}
+
+void ParallelMoveResolverWithSwap::EmitNativeCode(HParallelMove* parallel_move) {
DCHECK(moves_.IsEmpty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
@@ -50,20 +62,6 @@ void ParallelMoveResolver::EmitNativeCode(HParallelMove* parallel_move) {
moves_.Reset();
}
-
-void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- for (size_t i = 0; i < parallel_move->NumMoves(); ++i) {
- MoveOperands* move = parallel_move->MoveOperandsAt(i);
- if (!move->IsRedundant()) {
- moves_.Add(move);
- }
- }
-}
-
Location LowOf(Location location) {
if (location.IsRegisterPair()) {
return Location::RegisterLocation(location.low());
@@ -103,7 +101,7 @@ static void UpdateSourceOf(MoveOperands* move, Location updated_location, Locati
}
}
-MoveOperands* ParallelMoveResolver::PerformMove(size_t index) {
+MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
@@ -229,7 +227,7 @@ MoveOperands* ParallelMoveResolver::PerformMove(size_t index) {
}
}
-bool ParallelMoveResolver::IsScratchLocation(Location loc) {
+bool ParallelMoveResolverWithSwap::IsScratchLocation(Location loc) {
for (size_t i = 0; i < moves_.Size(); ++i) {
if (moves_.Get(i)->Blocks(loc)) {
return false;
@@ -245,10 +243,10 @@ bool ParallelMoveResolver::IsScratchLocation(Location loc) {
return false;
}
-int ParallelMoveResolver::AllocateScratchRegister(int blocked,
- int register_count,
- int if_scratch,
- bool* spilled) {
+int ParallelMoveResolverWithSwap::AllocateScratchRegister(int blocked,
+ int register_count,
+ int if_scratch,
+ bool* spilled) {
DCHECK_NE(blocked, if_scratch);
int scratch = -1;
for (int reg = 0; reg < register_count; ++reg) {
@@ -269,8 +267,8 @@ int ParallelMoveResolver::AllocateScratchRegister(int blocked,
}
-ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
- ParallelMoveResolver* resolver, int blocked, int if_scratch, int number_of_registers)
+ParallelMoveResolverWithSwap::ScratchRegisterScope::ScratchRegisterScope(
+ ParallelMoveResolverWithSwap* resolver, int blocked, int if_scratch, int number_of_registers)
: resolver_(resolver),
reg_(kNoRegister),
spilled_(false) {
@@ -282,10 +280,271 @@ ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
}
-ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() {
+ParallelMoveResolverWithSwap::ScratchRegisterScope::~ScratchRegisterScope() {
if (spilled_) {
resolver_->RestoreScratch(reg_);
}
}
+void ParallelMoveResolverNoSwap::EmitNativeCode(HParallelMove* parallel_move) {
+ DCHECK_EQ(GetNumberOfPendingMoves(), 0u);
+ DCHECK(moves_.IsEmpty());
+ DCHECK(scratches_.IsEmpty());
+
+ // Backend dependent initialization.
+ PrepareForEmitNativeCode();
+
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ const MoveOperands& move = *moves_.Get(i);
+ // Skip constants to perform them last. They don't block other moves and
+ // skipping such moves with register destinations keeps those registers
+ // free for the whole algorithm.
+ if (!move.IsEliminated() && !move.GetSource().IsConstant()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources and register destinations with UpdateMoveSource()
+ // to reduce the number of literal loads. Stack destinations are skipped since we won't be benefit
+ // from changing the constant sources to stack locations.
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ MoveOperands* move = moves_.Get(i);
+ Location destination = move->GetDestination();
+ if (!move->IsEliminated() && !destination.IsStackSlot() && !destination.IsDoubleStackSlot()) {
+ Location source = move->GetSource();
+ EmitMove(i);
+ move->Eliminate();
+ // This may introduce additional instruction dependency, but reduce number
+ // of moves and possible literal loads. For example,
+ // Original moves:
+ // 1234.5678 -> D0
+ // 1234.5678 -> D1
+ // Updated moves:
+ // 1234.5678 -> D0
+ // D0 -> D1
+ UpdateMoveSource(source, destination);
+ }
+ }
+
+ // Perform the rest of the moves.
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ MoveOperands* move = moves_.Get(i);
+ if (!move->IsEliminated()) {
+ EmitMove(i);
+ move->Eliminate();
+ }
+ }
+
+ // All pending moves that we have added for resolve cycles should be performed.
+ DCHECK_EQ(GetNumberOfPendingMoves(), 0u);
+
+ // Backend dependent cleanup.
+ FinishEmitNativeCode();
+
+ moves_.Reset();
+ scratches_.Reset();
+}
+
+Location ParallelMoveResolverNoSwap::GetScratchLocation(Location::Kind kind) {
+ for (size_t i = 0; i < scratches_.Size(); ++i) {
+ Location loc = scratches_.Get(i);
+ if (loc.GetKind() == kind && !IsBlockedByMoves(loc)) {
+ return loc;
+ }
+ }
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ Location loc = moves_.Get(i)->GetDestination();
+ if (loc.GetKind() == kind && !IsBlockedByMoves(loc)) {
+ return loc;
+ }
+ }
+ return Location::NoLocation();
+}
+
+void ParallelMoveResolverNoSwap::AddScratchLocation(Location loc) {
+ if (kIsDebugBuild) {
+ for (size_t i = 0; i < scratches_.Size(); ++i) {
+ DCHECK(!loc.Equals(scratches_.Get(i)));
+ }
+ }
+ scratches_.Add(loc);
+}
+
+void ParallelMoveResolverNoSwap::RemoveScratchLocation(Location loc) {
+ DCHECK(!IsBlockedByMoves(loc));
+ for (size_t i = 0; i < scratches_.Size(); ++i) {
+ if (loc.Equals(scratches_.Get(i))) {
+ scratches_.DeleteAt(i);
+ break;
+ }
+ }
+}
+
+void ParallelMoveResolverNoSwap::PerformMove(size_t index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We mark
+ // a move as "pending" on entry to PerformMove in order to detect cycles
+ // in the move graph. We use scratch location to resolve cycles, also
+ // additional pending moves might be added. After move has been performed,
+ // we will update source operand in the move graph to reduce dependencies in
+ // the graph.
+
+ MoveOperands* move = moves_.Get(index);
+ DCHECK(!move->IsPending());
+ DCHECK(!move->IsEliminated());
+ if (move->IsRedundant()) {
+ // Previous operations on the list of moves have caused this particular move
+ // to become a no-op, so we can safely eliminate it. Consider for example
+ // (0 -> 1) (1 -> 0) (1 -> 2). There is a cycle (0 -> 1) (1 -> 0), that we will
+ // resolve as (1 -> scratch) (0 -> 1) (scratch -> 0). If, by chance, '2' is
+ // used as the scratch location, the move (1 -> 2) will occur while resolving
+ // the cycle. When that move is emitted, the code will update moves with a '1'
+ // as their source to use '2' instead (see `UpdateMoveSource()`. In our example
+ // the initial move (1 -> 2) would then become the no-op (2 -> 2) that can be
+ // eliminated here.
+ move->Eliminate();
+ return;
+ }
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack-allocated local. Recursion may allow
+ // multiple moves to be pending.
+ DCHECK(!move->GetSource().IsInvalid());
+ Location destination = move->MarkPending();
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ const MoveOperands& other_move = *moves_.Get(i);
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ move->ClearPending(destination);
+
+ // No one else should write to the move destination when the it is pending.
+ DCHECK(!move->IsRedundant());
+
+ Location source = move->GetSource();
+ // The move may be blocked on several pending moves, in case we have a cycle.
+ if (IsBlockedByMoves(destination)) {
+ // For a cycle like: (A -> B) (B -> C) (C -> A), we change it to following
+ // sequence:
+ // (C -> scratch) # Emit right now.
+ // (A -> B) (B -> C) # Unblocked.
+ // (scratch -> A) # Add to pending_moves_, blocked by (A -> B).
+ Location::Kind kind = source.GetKind();
+ DCHECK_NE(kind, Location::kConstant);
+ Location scratch = AllocateScratchLocationFor(kind);
+ // We only care about the move size.
+ Primitive::Type type = move->Is64BitMove() ? Primitive::kPrimLong : Primitive::kPrimInt;
+ // Perform (C -> scratch)
+ move->SetDestination(scratch);
+ EmitMove(index);
+ move->Eliminate();
+ UpdateMoveSource(source, scratch);
+ // Add (scratch -> A).
+ AddPendingMove(scratch, destination, type);
+ } else {
+ // This move is not blocked.
+ EmitMove(index);
+ move->Eliminate();
+ UpdateMoveSource(source, destination);
+ }
+
+ // Moves in the pending list should not block any other moves. But performing
+ // unblocked moves in the pending list can free scratch registers, so we do this
+ // as early as possible.
+ MoveOperands* pending_move;
+ while ((pending_move = GetUnblockedPendingMove(source)) != nullptr) {
+ Location pending_source = pending_move->GetSource();
+ Location pending_destination = pending_move->GetDestination();
+ // We do not depend on the pending move index. So just delete the move instead
+ // of eliminating it to make the pending list cleaner.
+ DeletePendingMove(pending_move);
+ move->SetSource(pending_source);
+ move->SetDestination(pending_destination);
+ EmitMove(index);
+ move->Eliminate();
+ UpdateMoveSource(pending_source, pending_destination);
+ // Free any unblocked locations in the scratch location list.
+ for (size_t i = 0; i < scratches_.Size(); ++i) {
+ Location scratch = scratches_.Get(i);
+ // Only scratch overlapping with performed move source can be unblocked.
+ if (scratch.OverlapsWith(pending_source) && !IsBlockedByMoves(scratch)) {
+ FreeScratchLocation(pending_source);
+ }
+ }
+ }
+}
+
+void ParallelMoveResolverNoSwap::UpdateMoveSource(Location from, Location to) {
+ // This function is used to reduce the dependencies in the graph after
+ // (from -> to) has been performed. Since we ensure there is no move with the same
+ // destination, (to -> X) can not be blocked while (from -> X) might still be
+ // blocked. Consider for example the moves (0 -> 1) (1 -> 2) (1 -> 3). After
+ // (1 -> 2) has been performed, the moves left are (0 -> 1) and (1 -> 3). There is
+ // a dependency between the two. If we update the source location from 1 to 2, we
+ // will get (0 -> 1) and (2 -> 3). There is no dependency between the two.
+ //
+ // This is not something we must do, but we can use fewer scratch locations with
+ // this trick. For example, we can avoid using additional scratch locations for
+ // moves (0 -> 1), (1 -> 2), (1 -> 0).
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ MoveOperands* move = moves_.Get(i);
+ if (move->GetSource().Equals(from)) {
+ move->SetSource(to);
+ }
+ }
+}
+
+void ParallelMoveResolverNoSwap::AddPendingMove(Location source,
+ Location destination, Primitive::Type type) {
+ pending_moves_.Add(new (allocator_) MoveOperands(source, destination, type, nullptr));
+}
+
+void ParallelMoveResolverNoSwap::DeletePendingMove(MoveOperands* move) {
+ pending_moves_.Delete(move);
+}
+
+MoveOperands* ParallelMoveResolverNoSwap::GetUnblockedPendingMove(Location loc) {
+ for (size_t i = 0; i < pending_moves_.Size(); ++i) {
+ MoveOperands* move = pending_moves_.Get(i);
+ Location destination = move->GetDestination();
+ // Only moves with destination overlapping with input loc can be unblocked.
+ if (destination.OverlapsWith(loc) && !IsBlockedByMoves(destination)) {
+ return move;
+ }
+ }
+ return nullptr;
+}
+
+bool ParallelMoveResolverNoSwap::IsBlockedByMoves(Location loc) {
+ for (size_t i = 0; i < pending_moves_.Size(); ++i) {
+ if (pending_moves_.Get(i)->Blocks(loc)) {
+ return true;
+ }
+ }
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ if (moves_.Get(i)->Blocks(loc)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// So far it is only used for debugging purposes to make sure all pending moves
+// have been performed.
+size_t ParallelMoveResolverNoSwap::GetNumberOfPendingMoves() {
+ return pending_moves_.Size();
+}
+
} // namespace art
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index 95f8ad5b74..e89417df7d 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -19,30 +19,47 @@
#include "base/value_object.h"
#include "utils/growable_array.h"
+#include "locations.h"
namespace art {
class HParallelMove;
-class Location;
class MoveOperands;
-/**
- * Helper class to resolve a set of parallel moves. Architecture dependent code
- * generator must have their own subclass that implements the `EmitMove` and `EmitSwap`
- * operations.
- */
+// Helper classes to resolve a set of parallel moves. Architecture dependent code generator must
+// have their own subclass that implements corresponding virtual functions.
class ParallelMoveResolver : public ValueObject {
public:
explicit ParallelMoveResolver(ArenaAllocator* allocator) : moves_(allocator, 32) {}
virtual ~ParallelMoveResolver() {}
// Resolve a set of parallel moves, emitting assembler instructions.
- void EmitNativeCode(HParallelMove* parallel_move);
+ virtual void EmitNativeCode(HParallelMove* parallel_move) = 0;
+
+ protected:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(HParallelMove* parallel_move);
+
+ GrowableArray<MoveOperands*> moves_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolver);
+};
+
+// This helper class uses swap to resolve dependencies and may emit swap.
+class ParallelMoveResolverWithSwap : public ParallelMoveResolver {
+ public:
+ explicit ParallelMoveResolverWithSwap(ArenaAllocator* allocator)
+ : ParallelMoveResolver(allocator) {}
+ virtual ~ParallelMoveResolverWithSwap() {}
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
protected:
class ScratchRegisterScope : public ValueObject {
public:
- ScratchRegisterScope(ParallelMoveResolver* resolver,
+ ScratchRegisterScope(ParallelMoveResolverWithSwap* resolver,
int blocked,
int if_scratch,
int number_of_registers);
@@ -52,11 +69,12 @@ class ParallelMoveResolver : public ValueObject {
bool IsSpilled() const { return spilled_; }
private:
- ParallelMoveResolver* resolver_;
+ ParallelMoveResolverWithSwap* resolver_;
int reg_;
bool spilled_;
};
+ // Return true if the location can be scratched.
bool IsScratchLocation(Location loc);
// Allocate a scratch register for performing a move. The method will try to use
@@ -72,15 +90,9 @@ class ParallelMoveResolver : public ValueObject {
virtual void SpillScratch(int reg) = 0;
virtual void RestoreScratch(int reg) = 0;
- // List of moves not yet resolved.
- GrowableArray<MoveOperands*> moves_;
-
static constexpr int kNoRegister = -1;
private:
- // Build the initial list of moves.
- void BuildInitialMoveList(HParallelMove* parallel_move);
-
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
//
@@ -99,7 +111,83 @@ class ParallelMoveResolver : public ValueObject {
// the right value.
MoveOperands* PerformMove(size_t index);
- DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolver);
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverWithSwap);
+};
+
+// This helper class uses additional scratch registers to resolve dependencies. It supports all kind
+// of dependency cycles and does not care about the register layout.
+class ParallelMoveResolverNoSwap : public ParallelMoveResolver {
+ public:
+ explicit ParallelMoveResolverNoSwap(ArenaAllocator* allocator)
+ : ParallelMoveResolver(allocator), scratches_(allocator, 32),
+ pending_moves_(allocator, 8), allocator_(allocator) {}
+ virtual ~ParallelMoveResolverNoSwap() {}
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+
+ protected:
+ // Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent
+ // initialization here.
+ virtual void PrepareForEmitNativeCode() = 0;
+
+ // Called at the end of EmitNativeCode(). A subclass may put some architecture dependent cleanup
+ // here. All scratch locations will be removed after this call.
+ virtual void FinishEmitNativeCode() = 0;
+
+ // Allocate a scratch location to perform a move from input kind of location. A subclass should
+ // implement this to get the best fit location. If there is no suitable physical register, it can
+ // also return a stack slot.
+ virtual Location AllocateScratchLocationFor(Location::Kind kind) = 0;
+
+ // Called after a move which takes a scratch location as source. A subclass can defer the cleanup
+ // to FinishEmitNativeCode().
+ virtual void FreeScratchLocation(Location loc) = 0;
+
+ // Emit a move.
+ virtual void EmitMove(size_t index) = 0;
+
+ // Return a scratch location from the moves which exactly matches the kind.
+ // Return Location::NoLocation() if no matching scratch location can be found.
+ Location GetScratchLocation(Location::Kind kind);
+
+ // Add a location to the scratch list which can be returned from GetScratchLocation() to resolve
+ // dependency cycles.
+ void AddScratchLocation(Location loc);
+
+ // Remove a location from the scratch list.
+ void RemoveScratchLocation(Location loc);
+
+ // List of scratch locations.
+ GrowableArray<Location> scratches_;
+
+ private:
+ // Perform the move at the given index in `moves_` (possibly requiring other moves to satisfy
+ // dependencies).
+ void PerformMove(size_t index);
+
+ void UpdateMoveSource(Location from, Location to);
+
+ void AddPendingMove(Location source, Location destination, Primitive::Type type);
+
+ void DeletePendingMove(MoveOperands* move);
+
+ // Find a move that may be unblocked after (loc -> XXX) is performed.
+ MoveOperands* GetUnblockedPendingMove(Location loc);
+
+ // Return true if the location is blocked by outstanding moves.
+ bool IsBlockedByMoves(Location loc);
+
+ // Return the number of pending moves.
+ size_t GetNumberOfPendingMoves();
+
+ // Additional pending moves which might be added to resolve dependency cycle.
+ GrowableArray<MoveOperands*> pending_moves_;
+
+ // Used to allocate pending MoveOperands.
+ ArenaAllocator* const allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverNoSwap);
};
} // namespace art
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 95cca5172b..f8f70105cf 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -19,27 +19,41 @@
#include "parallel_move_resolver.h"
#include "gtest/gtest.h"
+#include "gtest/gtest-typed-test.h"
namespace art {
-class TestParallelMoveResolver : public ParallelMoveResolver {
- public:
- explicit TestParallelMoveResolver(ArenaAllocator* allocator) : ParallelMoveResolver(allocator) {}
-
- void Dump(Location location) {
- if (location.IsConstant()) {
- message_ << "C";
- } else if (location.IsPair()) {
- message_ << location.low() << "," << location.high();
- } else if (location.IsRegister()) {
- message_ << location.reg();
- } else if (location.IsStackSlot()) {
- message_ << location.GetStackIndex() << "(sp)";
- } else {
- message_ << "2x" << location.GetStackIndex() << "(sp)";
- DCHECK(location.IsDoubleStackSlot()) << location;
- }
+constexpr int kScratchRegisterStartIndexForTest = 100;
+
+static void DumpRegisterForTest(std::ostream& os, int reg) {
+ if (reg >= kScratchRegisterStartIndexForTest) {
+ os << "T" << reg - kScratchRegisterStartIndexForTest;
+ } else {
+ os << reg;
}
+}
+
+static void DumpLocationForTest(std::ostream& os, Location location) {
+ if (location.IsConstant()) {
+ os << "C";
+ } else if (location.IsPair()) {
+ DumpRegisterForTest(os, location.low());
+ os << ",";
+ DumpRegisterForTest(os, location.high());
+ } else if (location.IsRegister()) {
+ DumpRegisterForTest(os, location.reg());
+ } else if (location.IsStackSlot()) {
+ os << location.GetStackIndex() << "(sp)";
+ } else {
+ DCHECK(location.IsDoubleStackSlot())<< location;
+ os << "2x" << location.GetStackIndex() << "(sp)";
+ }
+}
+
+class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
+ public:
+ explicit TestParallelMoveResolverWithSwap(ArenaAllocator* allocator)
+ : ParallelMoveResolverWithSwap(allocator) {}
void EmitMove(size_t index) OVERRIDE {
MoveOperands* move = moves_.Get(index);
@@ -47,9 +61,9 @@ class TestParallelMoveResolver : public ParallelMoveResolver {
message_ << " ";
}
message_ << "(";
- Dump(move->GetSource());
+ DumpLocationForTest(message_, move->GetSource());
message_ << " -> ";
- Dump(move->GetDestination());
+ DumpLocationForTest(message_, move->GetDestination());
message_ << ")";
}
@@ -59,9 +73,9 @@ class TestParallelMoveResolver : public ParallelMoveResolver {
message_ << " ";
}
message_ << "(";
- Dump(move->GetSource());
+ DumpLocationForTest(message_, move->GetSource());
message_ << " <-> ";
- Dump(move->GetDestination());
+ DumpLocationForTest(message_, move->GetDestination());
message_ << ")";
}
@@ -76,7 +90,64 @@ class TestParallelMoveResolver : public ParallelMoveResolver {
std::ostringstream message_;
- DISALLOW_COPY_AND_ASSIGN(TestParallelMoveResolver);
+ DISALLOW_COPY_AND_ASSIGN(TestParallelMoveResolverWithSwap);
+};
+
+class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap {
+ public:
+ explicit TestParallelMoveResolverNoSwap(ArenaAllocator* allocator)
+ : ParallelMoveResolverNoSwap(allocator), scratch_index_(kScratchRegisterStartIndexForTest) {}
+
+ void PrepareForEmitNativeCode() OVERRIDE {
+ scratch_index_ = kScratchRegisterStartIndexForTest;
+ }
+
+ void FinishEmitNativeCode() OVERRIDE {}
+
+ Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE {
+ if (kind == Location::kStackSlot || kind == Location::kFpuRegister ||
+ kind == Location::kRegister) {
+ kind = Location::kRegister;
+ } else {
+ // Allocate register pair for double stack slot which simulates 32-bit backend's behavior.
+ kind = Location::kRegisterPair;
+ }
+ Location scratch = GetScratchLocation(kind);
+ if (scratch.Equals(Location::NoLocation())) {
+ AddScratchLocation(Location::RegisterLocation(scratch_index_));
+ AddScratchLocation(Location::RegisterLocation(scratch_index_ + 1));
+ AddScratchLocation(Location::RegisterPairLocation(scratch_index_, scratch_index_ + 1));
+ scratch = (kind == Location::kRegister) ? Location::RegisterLocation(scratch_index_)
+ : Location::RegisterPairLocation(scratch_index_, scratch_index_ + 1);
+ scratch_index_ += 2;
+ }
+ return scratch;
+ }
+
+ void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) OVERRIDE {}
+
+ void EmitMove(size_t index) OVERRIDE {
+ MoveOperands* move = moves_.Get(index);
+ if (!message_.str().empty()) {
+ message_ << " ";
+ }
+ message_ << "(";
+ DumpLocationForTest(message_, move->GetSource());
+ message_ << " -> ";
+ DumpLocationForTest(message_, move->GetDestination());
+ message_ << ")";
+ }
+
+ std::string GetMessage() const {
+ return message_.str();
+ }
+
+ private:
+ std::ostringstream message_;
+
+ int scratch_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestParallelMoveResolverNoSwap);
};
static HParallelMove* BuildParallelMove(ArenaAllocator* allocator,
@@ -93,55 +164,102 @@ static HParallelMove* BuildParallelMove(ArenaAllocator* allocator,
return moves;
}
-TEST(ParallelMoveTest, Dependency) {
+template <typename T>
+class ParallelMoveTest : public ::testing::Test {
+ public:
+ static const bool has_swap;
+};
+
+template<> const bool ParallelMoveTest<TestParallelMoveResolverWithSwap>::has_swap = true;
+template<> const bool ParallelMoveTest<TestParallelMoveResolverNoSwap>::has_swap = false;
+
+typedef ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>
+ ParallelMoveResolverTestTypes;
+
+TYPED_TEST_CASE(ParallelMoveTest, ParallelMoveResolverTestTypes);
+
+
+TYPED_TEST(ParallelMoveTest, Dependency) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
static constexpr size_t moves[][2] = {{0, 1}, {1, 2}};
resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
- ASSERT_STREQ("(1 -> 2) (0 -> 1)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(1 -> 2) (0 -> 1)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(1 -> 2) (0 -> 1)", resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
static constexpr size_t moves[][2] = {{0, 1}, {1, 2}, {2, 3}, {1, 4}};
resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
- ASSERT_STREQ("(2 -> 3) (1 -> 2) (1 -> 4) (0 -> 1)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(2 -> 3) (1 -> 2) (1 -> 4) (0 -> 1)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(2 -> 3) (1 -> 2) (0 -> 1) (2 -> 4)", resolver.GetMessage().c_str());
+ }
}
}
-TEST(ParallelMoveTest, Swap) {
+TYPED_TEST(ParallelMoveTest, Cycle) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
static constexpr size_t moves[][2] = {{0, 1}, {1, 0}};
resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
- ASSERT_STREQ("(1 <-> 0)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(1 <-> 0)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(1 -> T0) (0 -> 1) (T0 -> 0)", resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
static constexpr size_t moves[][2] = {{0, 1}, {1, 2}, {1, 0}};
resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
- ASSERT_STREQ("(1 -> 2) (1 <-> 0)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(1 -> 2) (1 <-> 0)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(1 -> 2) (0 -> 1) (2 -> 0)", resolver.GetMessage().c_str());
+ }
+ }
+
+ {
+ TypeParam resolver(&allocator);
+ static constexpr size_t moves[][2] = {{0, 1}, {1, 0}, {0, 2}};
+ resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0 -> 2) (1 <-> 0)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(0 -> 2) (1 -> 0) (2 -> 1)", resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
static constexpr size_t moves[][2] = {{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 0}};
resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
- ASSERT_STREQ("(4 <-> 0) (3 <-> 4) (2 <-> 3) (1 <-> 2)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(4 <-> 0) (3 <-> 4) (2 <-> 3) (1 <-> 2)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(4 -> T0) (3 -> 4) (2 -> 3) (1 -> 2) (0 -> 1) (T0 -> 0)",
+ resolver.GetMessage().c_str());
+ }
}
}
-TEST(ParallelMoveTest, ConstantLast) {
+TYPED_TEST(ParallelMoveTest, ConstantLast) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::ConstantLocation(new (&allocator) HIntConstant(0)),
@@ -157,12 +275,12 @@ TEST(ParallelMoveTest, ConstantLast) {
ASSERT_STREQ("(1 -> 2) (C -> 0)", resolver.GetMessage().c_str());
}
-TEST(ParallelMoveTest, Pairs) {
+TYPED_TEST(ParallelMoveTest, Pairs) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterLocation(2),
@@ -179,7 +297,7 @@ TEST(ParallelMoveTest, Pairs) {
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
@@ -196,7 +314,7 @@ TEST(ParallelMoveTest, Pairs) {
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
@@ -209,10 +327,14 @@ TEST(ParallelMoveTest, Pairs) {
Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(2 -> T0) (0,1 -> 2,3) (T0 -> 0)", resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterLocation(2),
@@ -230,10 +352,15 @@ TEST(ParallelMoveTest, Pairs) {
Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(0,1 -> T0,T1) (7 -> 1) (2 -> 7) (T0,T1 -> 2,3)",
+ resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterLocation(2),
@@ -251,10 +378,15 @@ TEST(ParallelMoveTest, Pairs) {
Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(0,1 -> T0,T1) (7 -> 1) (2 -> 7) (T0,T1 -> 2,3)",
+ resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
@@ -272,10 +404,14 @@ TEST(ParallelMoveTest, Pairs) {
Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(7 -> T0) (2 -> 7) (0,1 -> 2,3) (T0 -> 1)", resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
@@ -288,10 +424,14 @@ TEST(ParallelMoveTest, Pairs) {
Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(2,3 <-> 0,1)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(2,3 <-> 0,1)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(2,3 -> T0,T1) (0,1 -> 2,3) (T0,T1 -> 0,1)", resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterPairLocation(2, 3),
@@ -304,12 +444,85 @@ TEST(ParallelMoveTest, Pairs) {
Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(0,1 -> T0,T1) (2,3 -> 0,1) (T0,T1 -> 2,3)", resolver.GetMessage().c_str());
+ }
+ }
+}
+
+TYPED_TEST(ParallelMoveTest, MultiCycles) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+
+ {
+ TypeParam resolver(&allocator);
+ static constexpr size_t moves[][2] = {{0, 1}, {1, 0}, {2, 3}, {3, 2}};
+ resolver.EmitNativeCode(BuildParallelMove(&allocator, moves, arraysize(moves)));
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(1 <-> 0) (3 <-> 2)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(1 -> T0) (0 -> 1) (T0 -> 0) (3 -> T0) (2 -> 3) (T0 -> 2)",
+ resolver.GetMessage().c_str());
+ }
+ }
+ {
+ TypeParam resolver(&allocator);
+ HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
+ moves->AddMove(
+ Location::RegisterPairLocation(0, 1),
+ Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterLocation(2),
+ Location::RegisterLocation(0),
+ Primitive::kPrimInt,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterLocation(3),
+ Location::RegisterLocation(1),
+ Primitive::kPrimInt,
+ nullptr);
+ resolver.EmitNativeCode(moves);
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(2 -> T0) (3 -> T1) (0,1 -> 2,3) (T0 -> 0) (T1 -> 1)",
+ resolver.GetMessage().c_str());
+ }
+ }
+ {
+ TypeParam resolver(&allocator);
+ HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
+ moves->AddMove(
+ Location::RegisterLocation(2),
+ Location::RegisterLocation(0),
+ Primitive::kPrimInt,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterLocation(3),
+ Location::RegisterLocation(1),
+ Primitive::kPrimInt,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterPairLocation(0, 1),
+ Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
+ nullptr);
+ resolver.EmitNativeCode(moves);
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(3 -> T0) (0,1 -> T2,T3) (T0 -> 1) (2 -> 0) (T2,T3 -> 2,3)",
+ resolver.GetMessage().c_str());
+ }
}
{
// Test involving registers used in single context and pair context.
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterLocation(10),
@@ -327,17 +540,22 @@ TEST(ParallelMoveTest, Pairs) {
Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(2x32(sp) <-> 10,11) (4,5 <-> 2x32(sp)) (4 -> 5)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(2x32(sp) <-> 10,11) (4,5 <-> 2x32(sp)) (4 -> 5)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(2x32(sp) -> T0,T1) (4,5 -> 2x32(sp)) (10 -> 5) (T0,T1 -> 10,11)",
+ resolver.GetMessage().c_str());
+ }
}
}
// Test that we do 64bits moves before 32bits moves.
-TEST(ParallelMoveTest, CyclesWith64BitsMoves) {
+TYPED_TEST(ParallelMoveTest, CyclesWith64BitsMoves) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterLocation(0),
@@ -355,11 +573,16 @@ TEST(ParallelMoveTest, CyclesWith64BitsMoves) {
Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(0 <-> 1) (48(sp) <-> 0)", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(0 <-> 1) (48(sp) <-> 0)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(48(sp) -> T0) (1 -> 48(sp)) (0 -> 1) (T0 -> 0)",
+ resolver.GetMessage().c_str());
+ }
}
{
- TestParallelMoveResolver resolver(&allocator);
+ TypeParam resolver(&allocator);
HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
@@ -377,7 +600,12 @@ TEST(ParallelMoveTest, CyclesWith64BitsMoves) {
Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
- ASSERT_STREQ("(2x32(sp) <-> 0,1) (2,3 <-> 2x32(sp))", resolver.GetMessage().c_str());
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(2x32(sp) <-> 0,1) (2,3 <-> 2x32(sp))", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(2x32(sp) -> T0,T1) (2,3 -> 2x32(sp)) (0,1 -> 2,3) (T0,T1 -> 0,1)",
+ resolver.GetMessage().c_str());
+ }
}
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 479b87fea0..12b1c2b9bd 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -58,36 +58,40 @@ void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
}
void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
- HInstruction* lastInstruction = block->GetLastInstruction();
- if (!lastInstruction->IsIf()) {
+ HIf* ifInstruction = block->GetLastInstruction()->AsIf();
+ if (ifInstruction == nullptr) {
return;
}
- HInstruction* ifInput = lastInstruction->InputAt(0);
+ HInstruction* ifInput = ifInstruction->InputAt(0);
if (!ifInput->IsNotEqual() && !ifInput->IsEqual()) {
return;
}
HInstruction* input0 = ifInput->InputAt(0);
HInstruction* input1 = ifInput->InputAt(1);
- HInstruction* obj;
+ HInstruction* obj = nullptr;
- if ((input0->GetType() == Primitive::kPrimNot) && input1->ActAsNullConstant()) {
+ if (input1->IsNullConstant()) {
obj = input0;
- } else if ((input1->GetType() == Primitive::kPrimNot) && input0->ActAsNullConstant()) {
+ } else if (input0->IsNullConstant()) {
obj = input1;
} else {
return;
}
- HBoundType* bound_type =
- new (graph_->GetArena()) HBoundType(obj, ReferenceTypeInfo::CreateTop(false));
-
- block->InsertInstructionBefore(bound_type, lastInstruction);
+ // We only need to bound the type if we have uses in the relevant block.
+ // So start with null and create the HBoundType lazily, only if it's needed.
+ HBoundType* bound_type = nullptr;
HBasicBlock* notNullBlock = ifInput->IsNotEqual()
- ? lastInstruction->AsIf()->IfTrueSuccessor()
- : lastInstruction->AsIf()->IfFalseSuccessor();
+ ? ifInstruction->IfTrueSuccessor()
+ : ifInstruction->IfFalseSuccessor();
+
for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
HInstruction* user = it.Current()->GetUser();
if (notNullBlock->Dominates(user->GetBlock())) {
+ if (bound_type == nullptr) {
+ bound_type = new (graph_->GetArena()) HBoundType(obj, ReferenceTypeInfo::CreateTop(false));
+ notNullBlock->InsertInstructionBefore(bound_type, notNullBlock->GetFirstInstruction());
+ }
user->ReplaceInput(bound_type, it.Current()->GetIndex());
}
}
@@ -98,49 +102,58 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
// If that's the case insert an HBoundType instruction to bound the type of `x`
// to `ClassX` in the scope of the dominated blocks.
void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
- HInstruction* lastInstruction = block->GetLastInstruction();
- if (!lastInstruction->IsIf()) {
- return;
- }
- HInstruction* ifInput = lastInstruction->InputAt(0);
- // TODO: Handle more patterns here: HIf(bool) HIf(HNotEqual).
- if (!ifInput->IsEqual()) {
+ HIf* ifInstruction = block->GetLastInstruction()->AsIf();
+ if (ifInstruction == nullptr) {
return;
}
- HInstruction* instanceOf = ifInput->InputAt(0);
- HInstruction* comp_value = ifInput->InputAt(1);
- if (!instanceOf->IsInstanceOf() || !comp_value->IsIntConstant()) {
+ HInstruction* ifInput = ifInstruction->InputAt(0);
+ HInstruction* instanceOf = nullptr;
+ HBasicBlock* instanceOfTrueBlock = nullptr;
+
+ // The instruction simplifier has transformed:
+ // - `if (a instanceof A)` into an HIf with an HInstanceOf input
+ // - `if (!(a instanceof A)` into an HIf with an HBooleanNot input (which in turn
+ // has an HInstanceOf input)
+ // So we should not see the usual HEqual here.
+ if (ifInput->IsInstanceOf()) {
+ instanceOf = ifInput;
+ instanceOfTrueBlock = ifInstruction->IfTrueSuccessor();
+ } else if (ifInput->IsBooleanNot() && ifInput->InputAt(0)->IsInstanceOf()) {
+ instanceOf = ifInput->InputAt(0);
+ instanceOfTrueBlock = ifInstruction->IfFalseSuccessor();
+ } else {
return;
}
- HInstruction* obj = instanceOf->InputAt(0);
- HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
-
- ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- HBoundType* bound_type = new (graph_->GetArena()) HBoundType(obj, class_rti);
-
- // Narrow the type as much as possible.
- {
- ScopedObjectAccess soa(Thread::Current());
- if (!load_class->IsResolved() || class_rti.IsSupertypeOf(obj_rti)) {
- bound_type->SetReferenceTypeInfo(obj_rti);
- } else {
- bound_type->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
- }
- }
-
- block->InsertInstructionBefore(bound_type, lastInstruction);
- // Pick the right successor based on the value we compare against.
- HIntConstant* comp_value_int = comp_value->AsIntConstant();
- HBasicBlock* instanceOfTrueBlock = comp_value_int->GetValue() == 0
- ? lastInstruction->AsIf()->IfFalseSuccessor()
- : lastInstruction->AsIf()->IfTrueSuccessor();
+ // We only need to bound the type if we have uses in the relevant block.
+ // So start with null and create the HBoundType lazily, only if it's needed.
+ HBoundType* bound_type = nullptr;
+ HInstruction* obj = instanceOf->InputAt(0);
for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
HInstruction* user = it.Current()->GetUser();
if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
+ if (bound_type == nullptr) {
+ HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
+
+ ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ bound_type = new (graph_->GetArena()) HBoundType(obj, class_rti);
+
+ // Narrow the type as much as possible.
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!load_class->IsResolved() || class_rti.IsSupertypeOf(obj_rti)) {
+ bound_type->SetReferenceTypeInfo(obj_rti);
+ } else {
+ bound_type->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+ }
+ }
+
+ instanceOfTrueBlock->InsertInstructionBefore(
+ bound_type, instanceOfTrueBlock->GetFirstInstruction());
+ }
user->ReplaceInput(bound_type, it.Current()->GetIndex());
}
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 6350b35ca1..0fdf051957 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -378,7 +378,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
// Split just before first register use.
size_t first_register_use = current->FirstRegisterUse();
if (first_register_use != kNoLifetime) {
- LiveInterval* split = Split(current, first_register_use - 1);
+ LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
// Don't add directly to `unhandled`, it needs to be sorted and the start
// of this new interval might be after intervals already in the list.
AddSorted(&unhandled, split);
@@ -903,6 +903,10 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
return false;
}
+ // We use the first use to compare with other intervals. If this interval
+ // is used after any active intervals, we will spill this interval.
+ size_t first_use = current->FirstUseAfter(current->GetStart());
+
// First set all registers as not being used.
size_t* next_use = registers_array_;
for (size_t i = 0; i < number_of_registers_; ++i) {
@@ -917,7 +921,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
if (active->IsFixed()) {
next_use[active->GetRegister()] = current->GetStart();
} else {
- size_t use = active->FirstRegisterUseAfter(current->GetStart());
+ size_t use = active->FirstUseAfter(current->GetStart());
if (use != kNoLifetime) {
next_use[active->GetRegister()] = use;
}
@@ -945,7 +949,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
next_use[inactive->GetRegister()] =
std::min(next_intersection, next_use[inactive->GetRegister()]);
} else {
- size_t use = inactive->FirstRegisterUseAfter(current->GetStart());
+ size_t use = inactive->FirstUseAfter(current->GetStart());
if (use != kNoLifetime) {
next_use[inactive->GetRegister()] = std::min(use, next_use[inactive->GetRegister()]);
}
@@ -959,16 +963,16 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
DCHECK(current->IsHighInterval());
reg = current->GetRegister();
// When allocating the low part, we made sure the high register was available.
- DCHECK_LT(first_register_use, next_use[reg]);
+ DCHECK_LT(first_use, next_use[reg]);
} else if (current->IsLowInterval()) {
reg = FindAvailableRegisterPair(next_use, first_register_use);
// We should spill if both registers are not available.
- should_spill = (first_register_use >= next_use[reg])
- || (first_register_use >= next_use[GetHighForLowRegister(reg)]);
+ should_spill = (first_use >= next_use[reg])
+ || (first_use >= next_use[GetHighForLowRegister(reg)]);
} else {
DCHECK(!current->IsHighInterval());
reg = FindAvailableRegister(next_use);
- should_spill = (first_register_use >= next_use[reg]);
+ should_spill = (first_use >= next_use[reg]);
}
DCHECK_NE(reg, kNoRegister);
@@ -993,15 +997,17 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
// If the first use of that instruction is after the last use of the found
// register, we split this interval just before its first register use.
AllocateSpillSlotFor(current);
- LiveInterval* split = Split(current, first_register_use - 1);
+ LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
if (current == split) {
DumpInterval(std::cerr, current);
DumpAllIntervals(std::cerr);
// This situation has the potential to infinite loop, so we make it a non-debug CHECK.
+ HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2);
CHECK(false) << "There is not enough registers available for "
<< split->GetParent()->GetDefinedBy()->DebugName() << " "
<< split->GetParent()->GetDefinedBy()->GetId()
- << " at " << first_register_use - 1;
+ << " at " << first_register_use - 1 << " "
+ << (at == nullptr ? "" : at->DebugName());
}
AddSorted(unhandled_, split);
}
@@ -1094,6 +1100,31 @@ void RegisterAllocator::AddSorted(GrowableArray<LiveInterval*>* array, LiveInter
}
}
+LiveInterval* RegisterAllocator::SplitBetween(LiveInterval* interval, size_t from, size_t to) {
+ HBasicBlock* block_from = liveness_.GetBlockFromPosition(from);
+ HBasicBlock* block_to = liveness_.GetBlockFromPosition(to);
+ DCHECK(block_from != nullptr);
+ DCHECK(block_to != nullptr);
+
+ // Both locations are in the same block. We split at the given location.
+ if (block_from == block_to) {
+ return Split(interval, to);
+ }
+
+ // If `to` is in a loop, find the outermost loop header which does not contain `from`.
+ for (HLoopInformationOutwardIterator it(*block_to); !it.Done(); it.Advance()) {
+ HBasicBlock* header = it.Current()->GetHeader();
+ if (block_from->GetLifetimeStart() >= header->GetLifetimeStart()) {
+ break;
+ }
+ block_to = header;
+ }
+
+ // Split at the start of the found block, to piggy back on existing moves
+ // due to resolution if non-linear control flow (see `ConnectSplitSiblings`).
+ return Split(interval, block_to->GetLifetimeStart());
+}
+
LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
DCHECK_GE(position, interval->GetStart());
DCHECK(!interval->IsDeadAt(position));
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 717be75533..dc9c708eea 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -86,8 +86,12 @@ class RegisterAllocator {
// Add `interval` in the given sorted list.
static void AddSorted(GrowableArray<LiveInterval*>* array, LiveInterval* interval);
- // Split `interval` at the position `at`. The new interval starts at `at`.
- LiveInterval* Split(LiveInterval* interval, size_t at);
+ // Split `interval` at the position `position`. The new interval starts at `position`.
+ LiveInterval* Split(LiveInterval* interval, size_t position);
+
+ // Split `interval` at a position between `from` and `to`. The method will try
+ // to find an optimal split position.
+ LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
// Returns whether `reg` is blocked by the code generator.
bool IsBlocked(int reg) const;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 182cd0e833..8c6d904a4c 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -854,6 +854,10 @@ TEST(RegisterAllocatorTest, SpillInactive) {
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
+ // Populate the instructions in the liveness object, to please the register allocator.
+ for (size_t i = 0; i < 32; ++i) {
+ liveness.instructions_from_lifetime_position_.Add(user);
+ }
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.unhandled_core_intervals_.Add(fourth);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 8eb98a186b..97254edb5e 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -131,6 +131,9 @@ class UsePosition : public ArenaObject<kArenaAllocMisc> {
void Dump(std::ostream& stream) const {
stream << position_;
+ if (is_environment_) {
+ stream << " (env)";
+ }
}
UsePosition* Dup(ArenaAllocator* allocator) const {
@@ -330,7 +333,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
if (after_loop == nullptr) {
// Uses are only in the loop.
- first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start, end, nullptr);
} else if (after_loop->GetStart() <= end) {
first_range_ = range_search_start_ = after_loop;
// There are uses after the loop.
@@ -366,6 +370,10 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
LiveInterval* GetParent() const { return parent_; }
+ // Returns whether this interval is the parent interval, that is, the interval
+ // that starts where the HInstruction is defined.
+ bool IsParent() const { return parent_ == this; }
+
LiveRange* GetFirstRange() const { return first_range_; }
LiveRange* GetLastRange() const { return last_range_; }
@@ -442,7 +450,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
if (is_temp_) {
return position == GetStart() ? position : kNoLifetime;
}
- if (position == GetStart() && defined_by_ != nullptr) {
+ if (position == GetStart() && IsParent()) {
LocationSummary* locations = defined_by_->GetLocations();
Location location = locations->Out();
// This interval is the first interval of the instruction. If the output
@@ -491,12 +499,19 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
return position == GetStart() ? position : kNoLifetime;
}
+ if (position == GetStart() && IsParent()) {
+ if (defined_by_->GetLocations()->Out().IsValid()) {
+ return position;
+ }
+ }
+
UsePosition* use = first_use_;
size_t end = GetEnd();
while (use != nullptr && use->GetPosition() <= end) {
if (!use->GetIsEnvironment()) {
+ Location location = use->GetUser()->GetLocations()->InAt(use->GetInputIndex());
size_t use_position = use->GetPosition();
- if (use_position > position) {
+ if (use_position > position && location.IsValid()) {
return use_position;
}
}
@@ -582,7 +597,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
previous->next_ = nullptr;
new_interval->first_range_ = current;
if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
- // Search start point is inside `new_interval`. Change it to nullptr
+ // Search start point is inside `new_interval`. Change it to null
// (i.e. the end of the interval) in the original interval.
range_search_start_ = nullptr;
}
@@ -725,7 +740,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
void AddHighInterval(bool is_temp = false) {
- DCHECK_EQ(GetParent(), this);
+ DCHECK(IsParent());
DCHECK(!HasHighInterval());
DCHECK(!HasLowInterval());
high_or_low_interval_ = new (allocator_) LiveInterval(
@@ -849,7 +864,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
defined_by_(defined_by) {}
// Searches for a LiveRange that either covers the given position or is the
- // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+ // first next LiveRange. Returns null if no such LiveRange exists. Ranges
// known to end before `position` can be skipped with `search_start`.
LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
if (kIsDebugBuild) {
@@ -983,6 +998,15 @@ class SsaLivenessAnalysis : public ValueObject {
return instructions_from_lifetime_position_.Get(index);
}
+ HBasicBlock* GetBlockFromPosition(size_t index) const {
+ HInstruction* instruction = GetInstructionFromPosition(index / 2);
+ if (instruction == nullptr) {
+ // If we are at a block boundary, get the block following.
+ instruction = GetInstructionFromPosition((index / 2) + 1);
+ }
+ return instruction->GetBlock();
+ }
+
HInstruction* GetTempUser(LiveInterval* temp) const {
// A temporary shares the same lifetime start as the instruction that requires it.
DCHECK(temp->IsTemp());
@@ -1053,6 +1077,8 @@ class SsaLivenessAnalysis : public ValueObject {
GrowableArray<HInstruction*> instructions_from_lifetime_position_;
size_t number_of_ssa_values_;
+ ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
+
DISALLOW_COPY_AND_ASSIGN(SsaLivenessAnalysis);
};
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
new file mode 100644
index 0000000000..8344fc3237
--- /dev/null
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stack_map_stream.h"
+
+namespace art {
+
+void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
+ uint32_t native_pc_offset,
+ uint32_t register_mask,
+ BitVector* sp_mask,
+ uint32_t num_dex_registers,
+ uint8_t inlining_depth) {
+ DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+ current_entry_.dex_pc = dex_pc;
+ current_entry_.native_pc_offset = native_pc_offset;
+ current_entry_.register_mask = register_mask;
+ current_entry_.sp_mask = sp_mask;
+ current_entry_.num_dex_registers = num_dex_registers;
+ current_entry_.inlining_depth = inlining_depth;
+ current_entry_.dex_register_locations_start_index = dex_register_locations_.Size();
+ current_entry_.inline_infos_start_index = inline_infos_.Size();
+ current_entry_.dex_register_map_hash = 0;
+ current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound;
+ if (num_dex_registers != 0) {
+ current_entry_.live_dex_registers_mask =
+ new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
+ } else {
+ current_entry_.live_dex_registers_mask = nullptr;
+ }
+
+ if (sp_mask != nullptr) {
+ stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
+ }
+ if (inlining_depth > 0) {
+ number_of_stack_maps_with_inline_info_++;
+ }
+
+ dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
+ native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
+ register_mask_max_ = std::max(register_mask_max_, register_mask);
+}
+
+void StackMapStream::EndStackMapEntry() {
+ current_entry_.same_dex_register_map_as_ = FindEntryWithTheSameDexMap();
+ stack_maps_.Add(current_entry_);
+ current_entry_ = StackMapEntry();
+}
+
+void StackMapStream::AddDexRegisterEntry(uint16_t dex_register,
+ DexRegisterLocation::Kind kind,
+ int32_t value) {
+ DCHECK_LT(dex_register, current_entry_.num_dex_registers);
+
+ if (kind != DexRegisterLocation::Kind::kNone) {
+ // Ensure we only use non-compressed location kind at this stage.
+ DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
+ << DexRegisterLocation::PrettyDescriptor(kind);
+ DexRegisterLocation location(kind, value);
+
+ // Look for Dex register `location` in the location catalog (using the
+ // companion hash map of locations to indices). Use its index if it
+ // is already in the location catalog. If not, insert it (in the
+ // location catalog and the hash map) and use the newly created index.
+ auto it = location_catalog_entries_indices_.Find(location);
+ if (it != location_catalog_entries_indices_.end()) {
+ // Retrieve the index from the hash map.
+ dex_register_locations_.Add(it->second);
+ } else {
+ // Create a new entry in the location catalog and the hash map.
+ size_t index = location_catalog_entries_.Size();
+ location_catalog_entries_.Add(location);
+ dex_register_locations_.Add(index);
+ location_catalog_entries_indices_.Insert(std::make_pair(location, index));
+ }
+
+ current_entry_.live_dex_registers_mask->SetBit(dex_register);
+ current_entry_.dex_register_map_hash +=
+ (1 << (dex_register % (sizeof(current_entry_.dex_register_map_hash) * kBitsPerByte)));
+ current_entry_.dex_register_map_hash += static_cast<uint32_t>(value);
+ current_entry_.dex_register_map_hash += static_cast<uint32_t>(kind);
+ }
+}
+
+void StackMapStream::AddInlineInfoEntry(uint32_t method_index) {
+ InlineInfoEntry entry;
+ entry.method_index = method_index;
+ inline_infos_.Add(entry);
+}
+
+size_t StackMapStream::PrepareForFillIn() {
+ int stack_mask_number_of_bits = stack_mask_max_ + 1; // Need room for max element too.
+ stack_mask_size_ = RoundUp(stack_mask_number_of_bits, kBitsPerByte) / kBitsPerByte;
+ inline_info_size_ = ComputeInlineInfoSize();
+ dex_register_maps_size_ = ComputeDexRegisterMapsSize();
+ stack_maps_size_ = stack_maps_.Size()
+ * StackMap::ComputeStackMapSize(stack_mask_size_,
+ inline_info_size_,
+ dex_register_maps_size_,
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
+ dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
+
+ // Note: use RoundUp to word-size here if you want CodeInfo objects to be word aligned.
+ needed_size_ = CodeInfo::kFixedSize
+ + dex_register_location_catalog_size_
+ + stack_maps_size_
+ + dex_register_maps_size_
+ + inline_info_size_;
+
+ dex_register_location_catalog_start_ = CodeInfo::kFixedSize;
+ stack_maps_start_ = dex_register_location_catalog_start_ + dex_register_location_catalog_size_;
+ dex_register_maps_start_ = stack_maps_start_ + stack_maps_size_;
+ inline_infos_start_ = dex_register_maps_start_ + dex_register_maps_size_;
+
+ return needed_size_;
+}
+
+size_t StackMapStream::ComputeDexRegisterLocationCatalogSize() const {
+ size_t size = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t location_catalog_entry_index = 0;
+ location_catalog_entry_index < location_catalog_entries_.Size();
+ ++location_catalog_entry_index) {
+ DexRegisterLocation dex_register_location =
+ location_catalog_entries_.Get(location_catalog_entry_index);
+ size += DexRegisterLocationCatalog::EntrySize(dex_register_location);
+ }
+ return size;
+}
+
+size_t StackMapStream::ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
+ // Size of the map in bytes.
+ size_t size = DexRegisterMap::kFixedSize;
+ // Add the live bit mask for the Dex register liveness.
+ size += DexRegisterMap::GetLiveBitMaskSize(entry.num_dex_registers);
+ // Compute the size of the set of live Dex register entries.
+ size_t number_of_live_dex_registers = 0;
+ for (size_t dex_register_number = 0;
+ dex_register_number < entry.num_dex_registers;
+ ++dex_register_number) {
+ if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
+ ++number_of_live_dex_registers;
+ }
+ }
+ size_t map_entries_size_in_bits =
+ DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.Size())
+ * number_of_live_dex_registers;
+ size_t map_entries_size_in_bytes =
+ RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
+ size += map_entries_size_in_bytes;
+ return size;
+}
+
+size_t StackMapStream::ComputeDexRegisterMapsSize() const {
+ size_t size = 0;
+ for (size_t i = 0; i < stack_maps_.Size(); ++i) {
+ StackMapEntry entry = stack_maps_.Get(i);
+ if (entry.same_dex_register_map_as_ == kNoSameDexMapFound) {
+ // Entries with the same dex map will have the same offset.
+ size += ComputeDexRegisterMapSize(entry);
+ }
+ }
+ return size;
+}
+
+size_t StackMapStream::ComputeInlineInfoSize() const {
+ return inline_infos_.Size() * InlineInfo::SingleEntrySize()
+ // For encoding the depth.
+ + (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
+}
+
+void StackMapStream::FillIn(MemoryRegion region) {
+ DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+ DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
+
+ CodeInfo code_info(region);
+ DCHECK_EQ(region.size(), needed_size_);
+ code_info.SetOverallSize(region.size());
+
+ MemoryRegion dex_register_locations_region = region.Subregion(
+ dex_register_maps_start_, dex_register_maps_size_);
+
+ MemoryRegion inline_infos_region = region.Subregion(
+ inline_infos_start_, inline_info_size_);
+
+ code_info.SetEncoding(inline_info_size_,
+ dex_register_maps_size_,
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
+ code_info.SetNumberOfStackMaps(stack_maps_.Size());
+ code_info.SetStackMaskSize(stack_mask_size_);
+ DCHECK_EQ(code_info.GetStackMapsSize(), stack_maps_size_);
+
+ // Set the Dex register location catalog.
+ code_info.SetNumberOfDexRegisterLocationCatalogEntries(location_catalog_entries_.Size());
+ MemoryRegion dex_register_location_catalog_region = region.Subregion(
+ dex_register_location_catalog_start_, dex_register_location_catalog_size_);
+ DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
+ // Offset in `dex_register_location_catalog` where to store the next
+ // register location.
+ size_t location_catalog_offset = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t i = 0, e = location_catalog_entries_.Size(); i < e; ++i) {
+ DexRegisterLocation dex_register_location = location_catalog_entries_.Get(i);
+ dex_register_location_catalog.SetRegisterInfo(location_catalog_offset, dex_register_location);
+ location_catalog_offset += DexRegisterLocationCatalog::EntrySize(dex_register_location);
+ }
+ // Ensure we reached the end of the Dex registers location_catalog.
+ DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
+
+ uintptr_t next_dex_register_map_offset = 0;
+ uintptr_t next_inline_info_offset = 0;
+ for (size_t i = 0, e = stack_maps_.Size(); i < e; ++i) {
+ StackMap stack_map = code_info.GetStackMapAt(i);
+ StackMapEntry entry = stack_maps_.Get(i);
+
+ stack_map.SetDexPc(code_info, entry.dex_pc);
+ stack_map.SetNativePcOffset(code_info, entry.native_pc_offset);
+ stack_map.SetRegisterMask(code_info, entry.register_mask);
+ if (entry.sp_mask != nullptr) {
+ stack_map.SetStackMask(code_info, *entry.sp_mask);
+ }
+
+ if (entry.num_dex_registers == 0) {
+ // No dex map available.
+ stack_map.SetDexRegisterMapOffset(code_info, StackMap::kNoDexRegisterMap);
+ } else {
+ // Search for an entry with the same dex map.
+ if (entry.same_dex_register_map_as_ != kNoSameDexMapFound) {
+ // If we have a hit reuse the offset.
+ stack_map.SetDexRegisterMapOffset(code_info,
+ code_info.GetStackMapAt(entry.same_dex_register_map_as_)
+ .GetDexRegisterMapOffset(code_info));
+ } else {
+ // New dex registers maps should be added to the stack map.
+ MemoryRegion register_region =
+ dex_register_locations_region.Subregion(
+ next_dex_register_map_offset,
+ ComputeDexRegisterMapSize(entry));
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ stack_map.SetDexRegisterMapOffset(
+ code_info, register_region.start() - dex_register_locations_region.start());
+
+ // Set the live bit mask.
+ dex_register_map.SetLiveBitMask(entry.num_dex_registers, *entry.live_dex_registers_mask);
+
+ // Set the dex register location mapping data.
+ for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
+ dex_register_number < entry.num_dex_registers;
+ ++dex_register_number) {
+ if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
+ size_t location_catalog_entry_index =
+ dex_register_locations_.Get(entry.dex_register_locations_start_index
+ + index_in_dex_register_locations);
+ dex_register_map.SetLocationCatalogEntryIndex(
+ index_in_dex_register_locations,
+ location_catalog_entry_index,
+ entry.num_dex_registers,
+ location_catalog_entries_.Size());
+ ++index_in_dex_register_locations;
+ }
+ }
+ }
+ }
+
+ // Set the inlining info.
+ if (entry.inlining_depth != 0) {
+ MemoryRegion inline_region = inline_infos_region.Subregion(
+ next_inline_info_offset,
+ InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
+ next_inline_info_offset += inline_region.size();
+ InlineInfo inline_info(inline_region);
+
+ // Currently relative to the dex register map.
+ stack_map.SetInlineDescriptorOffset(
+ code_info, inline_region.start() - dex_register_locations_region.start());
+
+ inline_info.SetDepth(entry.inlining_depth);
+ for (size_t j = 0; j < entry.inlining_depth; ++j) {
+ InlineInfoEntry inline_entry = inline_infos_.Get(j + entry.inline_infos_start_index);
+ inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
+ }
+ } else {
+ if (inline_info_size_ != 0) {
+ stack_map.SetInlineDescriptorOffset(code_info, StackMap::kNoInlineInfo);
+ }
+ }
+ }
+}
+
+size_t StackMapStream::FindEntryWithTheSameDexMap() {
+ size_t current_entry_index = stack_maps_.Size();
+ auto entries_it = dex_map_hash_to_stack_map_indices_.find(current_entry_.dex_register_map_hash);
+ if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
+ // We don't have a perfect hash functions so we need a list to collect all stack maps
+ // which might have the same dex register map.
+ GrowableArray<uint32_t> stack_map_indices(allocator_, 1);
+ stack_map_indices.Add(current_entry_index);
+ dex_map_hash_to_stack_map_indices_.Put(current_entry_.dex_register_map_hash, stack_map_indices);
+ return kNoSameDexMapFound;
+ }
+
+ // We might have collisions, so we need to check whether or not we really have a match.
+ for (size_t i = 0; i < entries_it->second.Size(); i++) {
+ size_t test_entry_index = entries_it->second.Get(i);
+ if (HaveTheSameDexMaps(stack_maps_.Get(test_entry_index), current_entry_)) {
+ return test_entry_index;
+ }
+ }
+ entries_it->second.Add(current_entry_index);
+ return kNoSameDexMapFound;
+}
+
+bool StackMapStream::HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const {
+ if (a.live_dex_registers_mask == nullptr && b.live_dex_registers_mask == nullptr) {
+ return true;
+ }
+ if (a.live_dex_registers_mask == nullptr || b.live_dex_registers_mask == nullptr) {
+ return false;
+ }
+ if (a.num_dex_registers != b.num_dex_registers) {
+ return false;
+ }
+
+ int index_in_dex_register_locations = 0;
+ for (uint32_t i = 0; i < a.num_dex_registers; i++) {
+ if (a.live_dex_registers_mask->IsBitSet(i) != b.live_dex_registers_mask->IsBitSet(i)) {
+ return false;
+ }
+ if (a.live_dex_registers_mask->IsBitSet(i)) {
+ size_t a_loc = dex_register_locations_.Get(
+ a.dex_register_locations_start_index + index_in_dex_register_locations);
+ size_t b_loc = dex_register_locations_.Get(
+ b.dex_register_locations_start_index + index_in_dex_register_locations);
+ if (a_loc != b_loc) {
+ return false;
+ }
+ ++index_in_dex_register_locations;
+ }
+ }
+ return true;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 9a9e068a9b..0c626be89f 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -70,13 +70,18 @@ class StackMapStream : public ValueObject {
native_pc_offset_max_(0),
register_mask_max_(0),
number_of_stack_maps_with_inline_info_(0),
- dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()) {}
-
- // Compute bytes needed to encode a mask with the given maximum element.
- static uint32_t StackMaskEncodingSize(int max_element) {
- int number_of_bits = max_element + 1; // Need room for max element too.
- return RoundUp(number_of_bits, kBitsPerByte) / kBitsPerByte;
- }
+ dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()),
+ current_entry_(),
+ stack_mask_size_(0),
+ inline_info_size_(0),
+ dex_register_maps_size_(0),
+ stack_maps_size_(0),
+ dex_register_location_catalog_size_(0),
+ dex_register_location_catalog_start_(0),
+ stack_maps_start_(0),
+ dex_register_maps_start_(0),
+ inline_infos_start_(0),
+ needed_size_(0) {}
// See runtime/stack_map.h to know what these fields contain.
struct StackMapEntry {
@@ -90,380 +95,42 @@ class StackMapStream : public ValueObject {
size_t inline_infos_start_index;
BitVector* live_dex_registers_mask;
uint32_t dex_register_map_hash;
+ size_t same_dex_register_map_as_;
};
struct InlineInfoEntry {
uint32_t method_index;
};
- void AddStackMapEntry(uint32_t dex_pc,
- uint32_t native_pc_offset,
- uint32_t register_mask,
- BitVector* sp_mask,
- uint32_t num_dex_registers,
- uint8_t inlining_depth) {
- StackMapEntry entry;
- entry.dex_pc = dex_pc;
- entry.native_pc_offset = native_pc_offset;
- entry.register_mask = register_mask;
- entry.sp_mask = sp_mask;
- entry.num_dex_registers = num_dex_registers;
- entry.inlining_depth = inlining_depth;
- entry.dex_register_locations_start_index = dex_register_locations_.Size();
- entry.inline_infos_start_index = inline_infos_.Size();
- entry.dex_register_map_hash = 0;
- if (num_dex_registers != 0) {
- entry.live_dex_registers_mask =
- new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
- } else {
- entry.live_dex_registers_mask = nullptr;
- }
- stack_maps_.Add(entry);
-
- if (sp_mask != nullptr) {
- stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
- }
- if (inlining_depth > 0) {
- number_of_stack_maps_with_inline_info_++;
- }
-
- dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
- native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
- register_mask_max_ = std::max(register_mask_max_, register_mask);
- }
-
- void AddInlineInfoEntry(uint32_t method_index) {
- InlineInfoEntry entry;
- entry.method_index = method_index;
- inline_infos_.Add(entry);
- }
-
- size_t ComputeNeededSize() {
- size_t size = CodeInfo::kFixedSize
- + ComputeDexRegisterLocationCatalogSize()
- + ComputeStackMapsSize()
- + ComputeDexRegisterMapsSize()
- + ComputeInlineInfoSize();
- // Note: use RoundUp to word-size here if you want CodeInfo objects to be word aligned.
- return size;
- }
-
- size_t ComputeStackMaskSize() const {
- return StackMaskEncodingSize(stack_mask_max_);
- }
-
- size_t ComputeStackMapsSize() {
- return stack_maps_.Size() * StackMap::ComputeStackMapSize(
- ComputeStackMaskSize(),
- ComputeInlineInfoSize(),
- ComputeDexRegisterMapsSize(),
- dex_pc_max_,
- native_pc_offset_max_,
- register_mask_max_);
- }
-
- // Compute the size of the Dex register location catalog of `entry`.
- size_t ComputeDexRegisterLocationCatalogSize() const {
- size_t size = DexRegisterLocationCatalog::kFixedSize;
- for (size_t location_catalog_entry_index = 0;
- location_catalog_entry_index < location_catalog_entries_.Size();
- ++location_catalog_entry_index) {
- DexRegisterLocation dex_register_location =
- location_catalog_entries_.Get(location_catalog_entry_index);
- size += DexRegisterLocationCatalog::EntrySize(dex_register_location);
- }
- return size;
- }
-
- size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
- // Size of the map in bytes.
- size_t size = DexRegisterMap::kFixedSize;
- // Add the live bit mask for the Dex register liveness.
- size += DexRegisterMap::GetLiveBitMaskSize(entry.num_dex_registers);
- // Compute the size of the set of live Dex register entries.
- size_t number_of_live_dex_registers = 0;
- for (size_t dex_register_number = 0;
- dex_register_number < entry.num_dex_registers;
- ++dex_register_number) {
- if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- ++number_of_live_dex_registers;
- }
- }
- size_t map_entries_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.Size())
- * number_of_live_dex_registers;
- size_t map_entries_size_in_bytes =
- RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
- size += map_entries_size_in_bytes;
- return size;
- }
-
- // Compute the size of all the Dex register maps.
- size_t ComputeDexRegisterMapsSize() {
- size_t size = 0;
- for (size_t i = 0; i < stack_maps_.Size(); ++i) {
- if (FindEntryWithTheSameDexMap(i) == kNoSameDexMapFound) {
- // Entries with the same dex map will have the same offset.
- size += ComputeDexRegisterMapSize(stack_maps_.Get(i));
- }
- }
- return size;
- }
-
- // Compute the size of all the inline information pieces.
- size_t ComputeInlineInfoSize() const {
- return inline_infos_.Size() * InlineInfo::SingleEntrySize()
- // For encoding the depth.
- + (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
- }
+ void BeginStackMapEntry(uint32_t dex_pc,
+ uint32_t native_pc_offset,
+ uint32_t register_mask,
+ BitVector* sp_mask,
+ uint32_t num_dex_registers,
+ uint8_t inlining_depth);
+ void EndStackMapEntry();
- size_t ComputeDexRegisterLocationCatalogStart() const {
- return CodeInfo::kFixedSize;
- }
-
- size_t ComputeStackMapsStart() const {
- return ComputeDexRegisterLocationCatalogStart() + ComputeDexRegisterLocationCatalogSize();
- }
-
- size_t ComputeDexRegisterMapsStart() {
- return ComputeStackMapsStart() + ComputeStackMapsSize();
- }
-
- size_t ComputeInlineInfoStart() {
- return ComputeDexRegisterMapsStart() + ComputeDexRegisterMapsSize();
- }
+ void AddDexRegisterEntry(uint16_t dex_register,
+ DexRegisterLocation::Kind kind,
+ int32_t value);
- void FillIn(MemoryRegion region) {
- CodeInfo code_info(region);
- DCHECK_EQ(region.size(), ComputeNeededSize());
- code_info.SetOverallSize(region.size());
+ void AddInlineInfoEntry(uint32_t method_index);
- size_t stack_mask_size = ComputeStackMaskSize();
-
- size_t dex_register_map_size = ComputeDexRegisterMapsSize();
- size_t inline_info_size = ComputeInlineInfoSize();
-
- MemoryRegion dex_register_locations_region = region.Subregion(
- ComputeDexRegisterMapsStart(),
- dex_register_map_size);
-
- MemoryRegion inline_infos_region = region.Subregion(
- ComputeInlineInfoStart(),
- inline_info_size);
-
- code_info.SetEncoding(inline_info_size,
- dex_register_map_size,
- dex_pc_max_,
- native_pc_offset_max_,
- register_mask_max_);
- code_info.SetNumberOfStackMaps(stack_maps_.Size());
- code_info.SetStackMaskSize(stack_mask_size);
- DCHECK_EQ(code_info.GetStackMapsSize(), ComputeStackMapsSize());
-
- // Set the Dex register location catalog.
- code_info.SetNumberOfDexRegisterLocationCatalogEntries(
- location_catalog_entries_.Size());
- MemoryRegion dex_register_location_catalog_region = region.Subregion(
- ComputeDexRegisterLocationCatalogStart(),
- ComputeDexRegisterLocationCatalogSize());
- DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
- // Offset in `dex_register_location_catalog` where to store the next
- // register location.
- size_t location_catalog_offset = DexRegisterLocationCatalog::kFixedSize;
- for (size_t i = 0, e = location_catalog_entries_.Size(); i < e; ++i) {
- DexRegisterLocation dex_register_location = location_catalog_entries_.Get(i);
- dex_register_location_catalog.SetRegisterInfo(location_catalog_offset, dex_register_location);
- location_catalog_offset += DexRegisterLocationCatalog::EntrySize(dex_register_location);
- }
- // Ensure we reached the end of the Dex registers location_catalog.
- DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
-
- uintptr_t next_dex_register_map_offset = 0;
- uintptr_t next_inline_info_offset = 0;
- for (size_t i = 0, e = stack_maps_.Size(); i < e; ++i) {
- StackMap stack_map = code_info.GetStackMapAt(i);
- StackMapEntry entry = stack_maps_.Get(i);
-
- stack_map.SetDexPc(code_info, entry.dex_pc);
- stack_map.SetNativePcOffset(code_info, entry.native_pc_offset);
- stack_map.SetRegisterMask(code_info, entry.register_mask);
- if (entry.sp_mask != nullptr) {
- stack_map.SetStackMask(code_info, *entry.sp_mask);
- }
-
- if (entry.num_dex_registers == 0) {
- // No dex map available.
- stack_map.SetDexRegisterMapOffset(code_info, StackMap::kNoDexRegisterMap);
- } else {
- // Search for an entry with the same dex map.
- size_t entry_with_same_map = FindEntryWithTheSameDexMap(i);
- if (entry_with_same_map != kNoSameDexMapFound) {
- // If we have a hit reuse the offset.
- stack_map.SetDexRegisterMapOffset(code_info,
- code_info.GetStackMapAt(entry_with_same_map).GetDexRegisterMapOffset(code_info));
- } else {
- // New dex registers maps should be added to the stack map.
- MemoryRegion register_region =
- dex_register_locations_region.Subregion(
- next_dex_register_map_offset,
- ComputeDexRegisterMapSize(entry));
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- stack_map.SetDexRegisterMapOffset(
- code_info, register_region.start() - dex_register_locations_region.start());
-
- // Set the live bit mask.
- dex_register_map.SetLiveBitMask(entry.num_dex_registers, *entry.live_dex_registers_mask);
-
- // Set the dex register location mapping data.
- for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
- dex_register_number < entry.num_dex_registers;
- ++dex_register_number) {
- if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- size_t location_catalog_entry_index =
- dex_register_locations_.Get(entry.dex_register_locations_start_index
- + index_in_dex_register_locations);
- dex_register_map.SetLocationCatalogEntryIndex(
- index_in_dex_register_locations,
- location_catalog_entry_index,
- entry.num_dex_registers,
- location_catalog_entries_.Size());
- ++index_in_dex_register_locations;
- }
- }
- }
- }
-
- // Set the inlining info.
- if (entry.inlining_depth != 0) {
- MemoryRegion inline_region = inline_infos_region.Subregion(
- next_inline_info_offset,
- InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
- next_inline_info_offset += inline_region.size();
- InlineInfo inline_info(inline_region);
-
- // Currently relative to the dex register map.
- stack_map.SetInlineDescriptorOffset(
- code_info, inline_region.start() - dex_register_locations_region.start());
-
- inline_info.SetDepth(entry.inlining_depth);
- for (size_t j = 0; j < entry.inlining_depth; ++j) {
- InlineInfoEntry inline_entry = inline_infos_.Get(j + entry.inline_infos_start_index);
- inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
- }
- } else {
- if (inline_info_size != 0) {
- stack_map.SetInlineDescriptorOffset(code_info, StackMap::kNoInlineInfo);
- }
- }
- }
- }
-
- void AddDexRegisterEntry(uint16_t dex_register, DexRegisterLocation::Kind kind, int32_t value) {
- StackMapEntry entry = stack_maps_.Get(stack_maps_.Size() - 1);
- DCHECK_LT(dex_register, entry.num_dex_registers);
-
- if (kind != DexRegisterLocation::Kind::kNone) {
- // Ensure we only use non-compressed location kind at this stage.
- DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
- << DexRegisterLocation::PrettyDescriptor(kind);
- DexRegisterLocation location(kind, value);
-
- // Look for Dex register `location` in the location catalog (using the
- // companion hash map of locations to indices). Use its index if it
- // is already in the location catalog. If not, insert it (in the
- // location catalog and the hash map) and use the newly created index.
- auto it = location_catalog_entries_indices_.Find(location);
- if (it != location_catalog_entries_indices_.end()) {
- // Retrieve the index from the hash map.
- dex_register_locations_.Add(it->second);
- } else {
- // Create a new entry in the location catalog and the hash map.
- size_t index = location_catalog_entries_.Size();
- location_catalog_entries_.Add(location);
- dex_register_locations_.Add(index);
- location_catalog_entries_indices_.Insert(std::make_pair(location, index));
- }
-
- entry.live_dex_registers_mask->SetBit(dex_register);
- entry.dex_register_map_hash +=
- (1 << (dex_register % (sizeof(entry.dex_register_map_hash) * kBitsPerByte)));
- entry.dex_register_map_hash += static_cast<uint32_t>(value);
- entry.dex_register_map_hash += static_cast<uint32_t>(kind);
- stack_maps_.Put(stack_maps_.Size() - 1, entry);
- }
- }
+ // Prepares the stream to fill in a memory region. Must be called before FillIn.
+ // Returns the size (in bytes) needed to store this stream.
+ size_t PrepareForFillIn();
+ void FillIn(MemoryRegion region);
private:
- // Returns the index of an entry with the same dex register map
- // or kNoSameDexMapFound if no such entry exists.
- size_t FindEntryWithTheSameDexMap(size_t entry_index) {
- StackMapEntry entry = stack_maps_.Get(entry_index);
- auto entries_it = dex_map_hash_to_stack_map_indices_.find(entry.dex_register_map_hash);
- if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
- // We don't have a perfect hash functions so we need a list to collect all stack maps
- // which might have the same dex register map.
- GrowableArray<uint32_t> stack_map_indices(allocator_, 1);
- stack_map_indices.Add(entry_index);
- dex_map_hash_to_stack_map_indices_.Put(entry.dex_register_map_hash, stack_map_indices);
- return kNoSameDexMapFound;
- }
-
- // TODO: We don't need to add ourselves to the map if we can guarantee that
- // FindEntryWithTheSameDexMap is called just once per stack map entry.
- // A good way to do this is to cache the offset in the stack map entry. This
- // is easier to do if we add markers when the stack map constructions begins
- // and when it ends.
+ size_t ComputeDexRegisterLocationCatalogSize() const;
+ size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const;
+ size_t ComputeDexRegisterMapsSize() const;
+ size_t ComputeInlineInfoSize() const;
- // We might have collisions, so we need to check whether or not we should
- // add the entry to the map. `needs_to_be_added` keeps track of this.
- bool needs_to_be_added = true;
- size_t result = kNoSameDexMapFound;
- for (size_t i = 0; i < entries_it->second.Size(); i++) {
- size_t test_entry_index = entries_it->second.Get(i);
- if (test_entry_index == entry_index) {
- needs_to_be_added = false;
- } else if (HaveTheSameDexMaps(stack_maps_.Get(test_entry_index), entry)) {
- result = test_entry_index;
- needs_to_be_added = false;
- break;
- }
- }
- if (needs_to_be_added) {
- entries_it->second.Add(entry_index);
- }
- return result;
- }
-
- bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const {
- if (a.live_dex_registers_mask == nullptr && b.live_dex_registers_mask == nullptr) {
- return true;
- }
- if (a.live_dex_registers_mask == nullptr || b.live_dex_registers_mask == nullptr) {
- return false;
- }
- if (a.num_dex_registers != b.num_dex_registers) {
- return false;
- }
-
- int index_in_dex_register_locations = 0;
- for (uint32_t i = 0; i < a.num_dex_registers; i++) {
- if (a.live_dex_registers_mask->IsBitSet(i) != b.live_dex_registers_mask->IsBitSet(i)) {
- return false;
- }
- if (a.live_dex_registers_mask->IsBitSet(i)) {
- size_t a_loc = dex_register_locations_.Get(
- a.dex_register_locations_start_index + index_in_dex_register_locations);
- size_t b_loc = dex_register_locations_.Get(
- b.dex_register_locations_start_index + index_in_dex_register_locations);
- if (a_loc != b_loc) {
- return false;
- }
- ++index_in_dex_register_locations;
- }
- }
- return true;
- }
+ // Returns the index of an entry with the same dex register map as the current_entry,
+ // or kNoSameDexMapFound if no such entry exists.
+ size_t FindEntryWithTheSameDexMap();
+ bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const;
ArenaAllocator* allocator_;
GrowableArray<StackMapEntry> stack_maps_;
@@ -476,8 +143,7 @@ class StackMapStream : public ValueObject {
DexRegisterLocationHashFn> LocationCatalogEntriesIndices;
LocationCatalogEntriesIndices location_catalog_entries_indices_;
- // A set of concatenated maps of Dex register locations indices to
- // `location_catalog_entries_`.
+ // A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`.
GrowableArray<size_t> dex_register_locations_;
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
@@ -488,6 +154,18 @@ class StackMapStream : public ValueObject {
ArenaSafeMap<uint32_t, GrowableArray<uint32_t>> dex_map_hash_to_stack_map_indices_;
+ StackMapEntry current_entry_;
+ size_t stack_mask_size_;
+ size_t inline_info_size_;
+ size_t dex_register_maps_size_;
+ size_t stack_maps_size_;
+ size_t dex_register_location_catalog_size_;
+ size_t dex_register_location_catalog_start_;
+ size_t stack_maps_start_;
+ size_t dex_register_maps_start_;
+ size_t inline_infos_start_;
+ size_t needed_size_;
+
static constexpr uint32_t kNoSameDexMapFound = -1;
DISALLOW_COPY_AND_ASSIGN(StackMapStream);
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 8d160bc81e..3291a77021 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -40,11 +40,12 @@ TEST(StackMapTest, Test1) {
ArenaBitVector sp_mask(&arena, 0, false);
size_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Short location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -123,20 +124,22 @@ TEST(StackMapTest, Test2) {
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
stream.AddInlineInfoEntry(42);
stream.AddInlineInfoEntry(82);
+ stream.EndStackMapEntry();
ArenaBitVector sp_mask2(&arena, 0, true);
sp_mask2.SetBit(3);
sp_mask1.SetBit(8);
- stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(0, Kind::kInRegister, 18); // Short location.
stream.AddDexRegisterEntry(1, Kind::kInFpuRegister, 3); // Short location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -273,11 +276,12 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(0, Kind::kNone, 0); // No location.
stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -353,7 +357,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 1024;
// Create the first stack map (and its Dex register map).
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
uint32_t number_of_dex_live_registers_in_dex_register_map_0 = number_of_dex_registers - 8;
for (uint32_t i = 0; i < number_of_dex_live_registers_in_dex_register_map_0; ++i) {
// Use two different Dex register locations to populate this map,
@@ -362,13 +366,15 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
// art::DexRegisterMap::SingleEntrySizeInBits).
stream.AddDexRegisterEntry(i, Kind::kConstant, i % 2); // Short location.
}
+ stream.EndStackMapEntry();
// Create the second stack map (and its Dex register map).
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
for (uint32_t i = 0; i < number_of_dex_registers; ++i) {
stream.AddDexRegisterEntry(i, Kind::kConstant, 0); // Short location.
}
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -413,19 +419,22 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
// First stack map.
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
// Second stack map, which should share the same dex register map.
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
// Third stack map (doesn't share the dex register map).
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.AddDexRegisterEntry(0, Kind::kInRegister, 2); // Short location.
stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -462,9 +471,10 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 0;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index bba98926b3..fbc9d0d8fc 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -66,7 +66,7 @@ TEST_F(OutputStreamTest, File) {
SetOutputStream(output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
@@ -76,12 +76,12 @@ TEST_F(OutputStreamTest, File) {
TEST_F(OutputStreamTest, Buffered) {
ScratchFile tmp;
std::unique_ptr<FileOutputStream> file_output_stream(new FileOutputStream(tmp.GetFile()));
- CHECK(file_output_stream.get() != NULL);
+ CHECK(file_output_stream.get() != nullptr);
BufferedOutputStream buffered_output_stream(file_output_stream.release());
SetOutputStream(buffered_output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dd0dba2df4..313f365df6 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -739,17 +739,17 @@ class ArmAssembler : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b7715af6c4..e47b5314fd 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -149,14 +149,14 @@ class Arm64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 36342c61c5..b016e74aba 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -41,8 +41,8 @@ AssemblerBuffer::AssemblerBuffer() {
contents_ = NewContents(kInitialBufferCapacity);
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
- fixup_ = NULL;
- slow_path_ = NULL;
+ fixup_ = nullptr;
+ slow_path_ = nullptr;
#ifndef NDEBUG
has_ensured_capacity_ = false;
fixups_processed_ = false;
@@ -61,7 +61,7 @@ AssemblerBuffer::~AssemblerBuffer() {
void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
AssemblerFixup* fixup = fixup_;
- while (fixup != NULL) {
+ while (fixup != nullptr) {
fixup->Process(region, fixup->position());
fixup = fixup->previous();
}
@@ -127,7 +127,7 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
return new x86_64::X86_64Assembler();
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index ebafd3dd1e..2e3a47bb91 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -156,7 +156,7 @@ class AssemblerFixup {
// Parent of all queued slow paths, emitted during finalization
class SlowPath {
public:
- SlowPath() : next_(NULL) {}
+ SlowPath() : next_(nullptr) {}
virtual ~SlowPath() {}
Label* Continuation() { return &continuation_; }
@@ -216,20 +216,20 @@ class AssemblerBuffer {
}
void EnqueueSlowPath(SlowPath* slowpath) {
- if (slow_path_ == NULL) {
+ if (slow_path_ == nullptr) {
slow_path_ = slowpath;
} else {
SlowPath* cur = slow_path_;
- for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+ for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
cur->next_ = slowpath;
}
}
void EmitSlowPaths(Assembler* sp_asm) {
SlowPath* cur = slow_path_;
- SlowPath* next = NULL;
- slow_path_ = NULL;
- for ( ; cur != NULL ; cur = next) {
+ SlowPath* next = nullptr;
+ slow_path_ = nullptr;
+ for ( ; cur != nullptr ; cur = next) {
cur->Emit(sp_asm);
next = cur->next_;
delete cur;
@@ -489,14 +489,14 @@ class Assembler {
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index a171e59d98..772fa9aa4b 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -43,8 +43,6 @@ namespace arm {
static constexpr bool kPrintResults = false;
#endif
-static const char* TOOL_PREFIX = "arm-linux-androideabi-";
-
void SetAndroidData() {
const char* data = getenv("ANDROID_DATA");
if (data == nullptr) {
@@ -65,87 +63,6 @@ int CompareIgnoringSpace(const char* s1, const char* s2) {
return *s1 - *s2;
}
-std::string GetAndroidToolsDir() {
- std::string root;
- const char* android_build_top = getenv("ANDROID_BUILD_TOP");
- if (android_build_top != nullptr) {
- root += android_build_top;
- } else {
- // Not set by build server, so default to current directory
- char* cwd = getcwd(nullptr, 0);
- setenv("ANDROID_BUILD_TOP", cwd, 1);
- root += cwd;
- free(cwd);
- }
-
- // Look for "prebuilts"
- std::string toolsdir = root;
- struct stat st;
- while (toolsdir != "") {
- std::string prebuilts = toolsdir + "/prebuilts";
- if (stat(prebuilts.c_str(), &st) == 0) {
- // Found prebuilts.
- toolsdir += "/prebuilts/gcc/linux-x86/arm";
- break;
- }
- // Not present, move up one dir.
- size_t slash = toolsdir.rfind('/');
- if (slash == std::string::npos) {
- toolsdir = "";
- } else {
- toolsdir = toolsdir.substr(0, slash-1);
- }
- }
- bool statok = stat(toolsdir.c_str(), &st) == 0;
- if (!statok) {
- return ""; // Use path.
- }
-
- DIR* dir = opendir(toolsdir.c_str());
- if (dir == nullptr) {
- return ""; // Use path.
- }
-
- struct dirent* entry;
- std::string founddir;
- double maxversion = 0;
-
- // Find the latest version of the arm-eabi tools (biggest version number).
- // Suffix on toolsdir will be something like "arm-eabi-4.8"
- while ((entry = readdir(dir)) != nullptr) {
- std::string subdir = toolsdir + std::string("/") + std::string(entry->d_name);
- size_t eabi = subdir.find(TOOL_PREFIX);
- if (eabi != std::string::npos) {
- // Check if "bin/{as,objcopy,objdump}" exist under this folder.
- struct stat exec_st;
- std::string exec_path;
- exec_path = subdir + "/bin/" + TOOL_PREFIX + "as";
- if (stat(exec_path.c_str(), &exec_st) != 0)
- continue;
- exec_path = subdir + "/bin/" + TOOL_PREFIX + "objcopy";
- if (stat(exec_path.c_str(), &exec_st) != 0)
- continue;
- exec_path = subdir + "/bin/" + TOOL_PREFIX + "objdump";
- if (stat(exec_path.c_str(), &exec_st) != 0)
- continue;
-
- std::string suffix = subdir.substr(eabi + strlen(TOOL_PREFIX));
- double version = strtod(suffix.c_str(), nullptr);
- if (version > maxversion) {
- maxversion = version;
- founddir = subdir;
- }
- }
- }
- closedir(dir);
- bool found = founddir != "";
- if (!found) {
- return ""; // Use path.
- }
-
- return founddir + "/bin/";
-}
-
void dump(std::vector<uint8_t>& code, const char* testname) {
// This will only work on the host. There is no as, objcopy or objdump on the
// device.
@@ -155,7 +72,7 @@ void dump(std::vector<uint8_t>& code, const char* testname) {
if (!results_ok) {
setup_results();
- toolsdir = GetAndroidToolsDir();
+ toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
SetAndroidData();
results_ok = true;
}
@@ -187,19 +104,18 @@ void dump(std::vector<uint8_t>& code, const char* testname) {
char cmd[1024];
// Assemble the .S
- snprintf(cmd, sizeof(cmd), "%s%sas %s -o %s.o", toolsdir.c_str(), TOOL_PREFIX, filename, filename);
+ snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
system(cmd);
// Remove the $d symbols to prevent the disassembler dumping the instructions
// as .word
- snprintf(cmd, sizeof(cmd), "%s%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), TOOL_PREFIX,
- filename, filename);
+ snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
system(cmd);
// Disassemble.
- snprintf(cmd, sizeof(cmd), "%s%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
- toolsdir.c_str(), TOOL_PREFIX, filename);
+ snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
+ toolsdir.c_str(), filename);
if (kPrintResults) {
// Print the results only, don't check. This is used to generate new output for inserting
// into the .inc file.
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index b062a2aa86..a9a5781093 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -40,8 +40,8 @@ class DedupeSet {
struct HashedKey {
StoreKey* store_ptr;
union {
- HashType store_hash; // Valid if store_ptr != nullptr.
- const HashedInKey* in_key; // Valid if store_ptr == nullptr.
+ HashType store_hash; // Valid if store_ptr != null.
+ const HashedInKey* in_key; // Valid if store_ptr == null.
};
};
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 821e28b4a0..e4b1e7d0e9 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -46,6 +46,14 @@ class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
}
}
+ bool Contains(T value) const {
+ for (size_t i = 0; i < num_used_; ++i) {
+ if (elem_list_[i] == value) {
+ return true;
+ }
+ }
+ return false;
+ }
// Expand the list size to at least new length.
void Resize(size_t new_length) {
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 216cb4164e..d4acf03dc9 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,17 +238,17 @@ class MipsAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister mscratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 36e74d7cb2..b7f6a9e83a 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -235,14 +235,14 @@ class Mips64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
mscratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
new file mode 100644
index 0000000000..ab039aa215
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+#define ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+
+#include <cstring>
+#include <set>
+#include <map>
+#include <vector>
+
+#include "dex_file.h"
+#include "utils.h"
+
+namespace art {
+
+class TestDexFileBuilder {
+ public:
+ TestDexFileBuilder()
+ : strings_(), types_(), fields_(), protos_(), dex_file_data_() {
+ }
+
+ void AddString(const std::string& str) {
+ CHECK(dex_file_data_.empty());
+ auto it = strings_.emplace(str, IdxAndDataOffset()).first;
+ CHECK_LT(it->first.length(), 128u); // Don't allow multi-byte length in uleb128.
+ }
+
+ void AddType(const std::string& descriptor) {
+ CHECK(dex_file_data_.empty());
+ AddString(descriptor);
+ types_.emplace(descriptor, 0u);
+ }
+
+ void AddField(const std::string& class_descriptor, const std::string& type,
+ const std::string& name) {
+ CHECK(dex_file_data_.empty());
+ AddType(class_descriptor);
+ AddType(type);
+ AddString(name);
+ FieldKey key = { class_descriptor, type, name };
+ fields_.emplace(key, 0u);
+ }
+
+ void AddMethod(const std::string& class_descriptor, const std::string& signature,
+ const std::string& name) {
+ CHECK(dex_file_data_.empty());
+ AddType(class_descriptor);
+ AddString(name);
+
+ ProtoKey proto_key = CreateProtoKey(signature);
+ AddString(proto_key.shorty);
+ AddType(proto_key.return_type);
+ for (const auto& arg_type : proto_key.args) {
+ AddType(arg_type);
+ }
+ auto it = protos_.emplace(proto_key, IdxAndDataOffset()).first;
+ const ProtoKey* proto = &it->first; // Valid as long as the element remains in protos_.
+
+ MethodKey method_key = {
+ class_descriptor, name, proto
+ };
+ methods_.emplace(method_key, 0u);
+ }
+
+ // NOTE: The builder holds the actual data, so it must live as long as the dex file.
+ std::unique_ptr<const DexFile> Build(const std::string& dex_location) {
+ CHECK(dex_file_data_.empty());
+ union {
+ uint8_t data[sizeof(DexFile::Header)];
+ uint64_t force_alignment;
+ } header_data;
+ std::memset(header_data.data, 0, sizeof(header_data.data));
+ DexFile::Header* header = reinterpret_cast<DexFile::Header*>(&header_data.data);
+ std::copy_n(DexFile::kDexMagic, 4u, header->magic_);
+ std::copy_n(DexFile::kDexMagicVersion, 4u, header->magic_ + 4u);
+ header->header_size_ = sizeof(header);
+ header->endian_tag_ = DexFile::kDexEndianConstant;
+ header->link_size_ = 0u; // Unused.
+ header->link_off_ = 0u; // Unused.
+ header->map_off_ = 0u; // Unused.
+
+ uint32_t data_section_size = 0u;
+
+ uint32_t string_ids_offset = sizeof(DexFile::Header);
+ uint32_t string_idx = 0u;
+ for (auto& entry : strings_) {
+ entry.second.idx = string_idx;
+ string_idx += 1u;
+ entry.second.data_offset = data_section_size;
+ data_section_size += entry.first.length() + 1u /* length */ + 1u /* null-terminator */;
+ }
+ header->string_ids_size_ = strings_.size();
+ header->string_ids_off_ = strings_.empty() ? 0u : string_ids_offset;
+
+ uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(DexFile::StringId);
+ uint32_t type_idx = 0u;
+ for (auto& entry : types_) {
+ entry.second = type_idx;
+ type_idx += 1u;
+ }
+ header->type_ids_size_ = types_.size();
+ header->type_ids_off_ = types_.empty() ? 0u : type_ids_offset;
+
+ uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(DexFile::TypeId);
+ uint32_t proto_idx = 0u;
+ for (auto& entry : protos_) {
+ entry.second.idx = proto_idx;
+ proto_idx += 1u;
+ size_t num_args = entry.first.args.size();
+ if (num_args != 0u) {
+ entry.second.data_offset = RoundUp(data_section_size, 4u);
+ data_section_size = entry.second.data_offset + 4u + num_args * sizeof(DexFile::TypeItem);
+ } else {
+ entry.second.data_offset = 0u;
+ }
+ }
+ header->proto_ids_size_ = protos_.size();
+ header->proto_ids_off_ = protos_.empty() ? 0u : proto_ids_offset;
+
+ uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(DexFile::ProtoId);
+ uint32_t field_idx = 0u;
+ for (auto& entry : fields_) {
+ entry.second = field_idx;
+ field_idx += 1u;
+ }
+ header->field_ids_size_ = fields_.size();
+ header->field_ids_off_ = fields_.empty() ? 0u : field_ids_offset;
+
+ uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(DexFile::FieldId);
+ uint32_t method_idx = 0u;
+ for (auto& entry : methods_) {
+ entry.second = method_idx;
+ method_idx += 1u;
+ }
+ header->method_ids_size_ = methods_.size();
+ header->method_ids_off_ = methods_.empty() ? 0u : method_ids_offset;
+
+ // No class defs.
+ header->class_defs_size_ = 0u;
+ header->class_defs_off_ = 0u;
+
+ uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(DexFile::MethodId);
+ header->data_size_ = data_section_size;
+ header->data_off_ = (data_section_size != 0u) ? data_section_offset : 0u;
+
+ uint32_t total_size = data_section_offset + data_section_size;
+
+ dex_file_data_.resize(total_size);
+ std::memcpy(&dex_file_data_[0], header_data.data, sizeof(DexFile::Header));
+
+ for (const auto& entry : strings_) {
+ CHECK_LT(entry.first.size(), 128u);
+ uint32_t raw_offset = data_section_offset + entry.second.data_offset;
+ dex_file_data_[raw_offset] = static_cast<uint8_t>(entry.first.size());
+ std::memcpy(&dex_file_data_[raw_offset + 1], entry.first.c_str(), entry.first.size() + 1);
+ Write32(string_ids_offset + entry.second.idx * sizeof(DexFile::StringId), raw_offset);
+ }
+
+ for (const auto& entry : types_) {
+ Write32(type_ids_offset + entry.second * sizeof(DexFile::TypeId), GetStringIdx(entry.first));
+ ++type_idx;
+ }
+
+ for (const auto& entry : protos_) {
+ size_t num_args = entry.first.args.size();
+ uint32_t type_list_offset =
+ (num_args != 0u) ? data_section_offset + entry.second.data_offset : 0u;
+ uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(DexFile::ProtoId);
+ Write32(raw_offset + 0u, GetStringIdx(entry.first.shorty));
+ Write16(raw_offset + 4u, GetTypeIdx(entry.first.return_type));
+ Write32(raw_offset + 8u, type_list_offset);
+ if (num_args != 0u) {
+ CHECK_NE(entry.second.data_offset, 0u);
+ Write32(type_list_offset, num_args);
+ for (size_t i = 0; i != num_args; ++i) {
+ Write16(type_list_offset + 4u + i * sizeof(DexFile::TypeItem),
+ GetTypeIdx(entry.first.args[i]));
+ }
+ }
+ }
+
+ for (const auto& entry : fields_) {
+ uint32_t raw_offset = field_ids_offset + entry.second * sizeof(DexFile::FieldId);
+ Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+ Write16(raw_offset + 2u, GetTypeIdx(entry.first.type));
+ Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+ }
+
+ for (const auto& entry : methods_) {
+ uint32_t raw_offset = method_ids_offset + entry.second * sizeof(DexFile::MethodId);
+ Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+ auto it = protos_.find(*entry.first.proto);
+ CHECK(it != protos_.end());
+ Write16(raw_offset + 2u, it->second.idx);
+ Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+ }
+
+ // Leave checksum and signature as zeros.
+
+ std::string error_msg;
+ std::unique_ptr<const DexFile> dex_file(DexFile::Open(
+ &dex_file_data_[0], dex_file_data_.size(), dex_location, 0u, nullptr, &error_msg));
+ CHECK(dex_file != nullptr) << error_msg;
+ return std::move(dex_file);
+ }
+
+ uint32_t GetStringIdx(const std::string& type) {
+ auto it = strings_.find(type);
+ CHECK(it != strings_.end());
+ return it->second.idx;
+ }
+
+ uint32_t GetTypeIdx(const std::string& type) {
+ auto it = types_.find(type);
+ CHECK(it != types_.end());
+ return it->second;
+ }
+
+ uint32_t GetFieldIdx(const std::string& class_descriptor, const std::string& type,
+ const std::string& name) {
+ FieldKey key = { class_descriptor, type, name };
+ auto it = fields_.find(key);
+ CHECK(it != fields_.end());
+ return it->second;
+ }
+
+ uint32_t GetMethodIdx(const std::string& class_descriptor, const std::string& signature,
+ const std::string& name) {
+ ProtoKey proto_key = CreateProtoKey(signature);
+ MethodKey method_key = { class_descriptor, name, &proto_key };
+ auto it = methods_.find(method_key);
+ CHECK(it != methods_.end());
+ return it->second;
+ }
+
+ private:
+ struct IdxAndDataOffset {
+ uint32_t idx;
+ uint32_t data_offset;
+ };
+
+ struct FieldKey {
+ const std::string class_descriptor;
+ const std::string type;
+ const std::string name;
+ };
+ struct FieldKeyComparator {
+ bool operator()(const FieldKey& lhs, const FieldKey& rhs) const {
+ if (lhs.class_descriptor != rhs.class_descriptor) {
+ return lhs.class_descriptor < rhs.class_descriptor;
+ }
+ if (lhs.name != rhs.name) {
+ return lhs.name < rhs.name;
+ }
+ return lhs.type < rhs.type;
+ }
+ };
+
+ struct ProtoKey {
+ std::string shorty;
+ std::string return_type;
+ std::vector<std::string> args;
+ };
+ struct ProtoKeyComparator {
+ bool operator()(const ProtoKey& lhs, const ProtoKey& rhs) const {
+ if (lhs.return_type != rhs.return_type) {
+ return lhs.return_type < rhs.return_type;
+ }
+ size_t min_args = std::min(lhs.args.size(), rhs.args.size());
+ for (size_t i = 0; i != min_args; ++i) {
+ if (lhs.args[i] != rhs.args[i]) {
+ return lhs.args[i] < rhs.args[i];
+ }
+ }
+ return lhs.args.size() < rhs.args.size();
+ }
+ };
+
+ struct MethodKey {
+ std::string class_descriptor;
+ std::string name;
+ const ProtoKey* proto;
+ };
+ struct MethodKeyComparator {
+ bool operator()(const MethodKey& lhs, const MethodKey& rhs) const {
+ if (lhs.class_descriptor != rhs.class_descriptor) {
+ return lhs.class_descriptor < rhs.class_descriptor;
+ }
+ if (lhs.name != rhs.name) {
+ return lhs.name < rhs.name;
+ }
+ return ProtoKeyComparator()(*lhs.proto, *rhs.proto);
+ }
+ };
+
+ ProtoKey CreateProtoKey(const std::string& signature) {
+ CHECK_EQ(signature[0], '(');
+ const char* args = signature.c_str() + 1;
+ const char* args_end = std::strchr(args, ')');
+ CHECK(args_end != nullptr);
+ const char* return_type = args_end + 1;
+
+ ProtoKey key = {
+ std::string() + ((*return_type == '[') ? 'L' : *return_type),
+ return_type,
+ std::vector<std::string>()
+ };
+ while (args != args_end) {
+ key.shorty += (*args == '[') ? 'L' : *args;
+ const char* arg_start = args;
+ while (*args == '[') {
+ ++args;
+ }
+ if (*args == 'L') {
+ do {
+ ++args;
+ CHECK_NE(args, args_end);
+ } while (*args != ';');
+ }
+ ++args;
+ key.args.emplace_back(arg_start, args);
+ }
+ return key;
+ }
+
+ void Write32(size_t offset, uint32_t value) {
+ CHECK_LE(offset + 4u, dex_file_data_.size());
+ CHECK_EQ(dex_file_data_[offset + 0], 0u);
+ CHECK_EQ(dex_file_data_[offset + 1], 0u);
+ CHECK_EQ(dex_file_data_[offset + 2], 0u);
+ CHECK_EQ(dex_file_data_[offset + 3], 0u);
+ dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+ dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+ dex_file_data_[offset + 2] = static_cast<uint8_t>(value >> 16);
+ dex_file_data_[offset + 3] = static_cast<uint8_t>(value >> 24);
+ }
+
+ void Write16(size_t offset, uint32_t value) {
+ CHECK_LE(value, 0xffffu);
+ CHECK_LE(offset + 2u, dex_file_data_.size());
+ CHECK_EQ(dex_file_data_[offset + 0], 0u);
+ CHECK_EQ(dex_file_data_[offset + 1], 0u);
+ dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+ dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+ }
+
+ std::map<std::string, IdxAndDataOffset> strings_;
+ std::map<std::string, uint32_t> types_;
+ std::map<FieldKey, uint32_t, FieldKeyComparator> fields_;
+ std::map<ProtoKey, IdxAndDataOffset, ProtoKeyComparator> protos_;
+ std::map<MethodKey, uint32_t, MethodKeyComparator> methods_;
+
+ std::vector<uint8_t> dex_file_data_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
diff --git a/compiler/utils/test_dex_file_builder_test.cc b/compiler/utils/test_dex_file_builder_test.cc
new file mode 100644
index 0000000000..ee6e35dcce
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder_test.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "test_dex_file_builder.h"
+
+#include "dex_file-inl.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(TestDexFileBuilderTest, SimpleTest) {
+ TestDexFileBuilder builder;
+ builder.AddString("Arbitrary string");
+ builder.AddType("Ljava/lang/Class;");
+ builder.AddField("LTestClass;", "[I", "intField");
+ builder.AddMethod("LTestClass;", "()I", "foo");
+ builder.AddMethod("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar");
+ const char* dex_location = "TestDexFileBuilder/SimpleTest";
+ std::unique_ptr<const DexFile> dex_file(builder.Build(dex_location));
+ ASSERT_TRUE(dex_file != nullptr);
+ EXPECT_STREQ(dex_location, dex_file->GetLocation().c_str());
+
+ static const char* const expected_strings[] = {
+ "Arbitrary string",
+ "I",
+ "LLL", // shorty
+ "LTestClass;",
+ "Ljava/lang/Class;",
+ "Ljava/lang/Object;",
+ "[I",
+ "[Ljava/lang/Object;",
+ "bar",
+ "foo",
+ "intField",
+ };
+ ASSERT_EQ(arraysize(expected_strings), dex_file->NumStringIds());
+ for (size_t i = 0; i != arraysize(expected_strings); ++i) {
+ EXPECT_STREQ(expected_strings[i], dex_file->GetStringData(dex_file->GetStringId(i))) << i;
+ }
+
+ static const char* const expected_types[] = {
+ "I",
+ "LTestClass;",
+ "Ljava/lang/Class;",
+ "Ljava/lang/Object;",
+ "[I",
+ "[Ljava/lang/Object;",
+ };
+ ASSERT_EQ(arraysize(expected_types), dex_file->NumTypeIds());
+ for (size_t i = 0; i != arraysize(expected_types); ++i) {
+ EXPECT_STREQ(expected_types[i], dex_file->GetTypeDescriptor(dex_file->GetTypeId(i))) << i;
+ }
+
+ ASSERT_EQ(1u, dex_file->NumFieldIds());
+ EXPECT_STREQ("[I TestClass.intField", PrettyField(0u, *dex_file).c_str());
+
+ ASSERT_EQ(2u, dex_file->NumProtoIds());
+ ASSERT_EQ(2u, dex_file->NumMethodIds());
+ EXPECT_STREQ("TestClass TestClass.bar(java.lang.Object, java.lang.Object[])",
+ PrettyMethod(0u, *dex_file).c_str());
+ EXPECT_STREQ("int TestClass.foo()",
+ PrettyMethod(1u, *dex_file).c_str());
+
+ EXPECT_EQ(0u, builder.GetStringIdx("Arbitrary string"));
+ EXPECT_EQ(2u, builder.GetTypeIdx("Ljava/lang/Class;"));
+ EXPECT_EQ(0u, builder.GetFieldIdx("LTestClass;", "[I", "intField"));
+ EXPECT_EQ(1u, builder.GetMethodIdx("LTestClass;", "()I", "foo"));
+ EXPECT_EQ(0u, builder.GetMethodIdx("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar"));
+}
+
+} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a933474a39..7fc8ef0815 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -576,17 +576,17 @@ class X86Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 32204a9970..c0ca7ef437 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -128,6 +128,16 @@ void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) {
}
+void X86_64Assembler::movq(const Address& dst, const Immediate& imm) {
+ CHECK(imm.is_int32());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0xC7);
+ EmitOperand(0, dst);
+ EmitImmediate(imm);
+}
+
+
void X86_64Assembler::movq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
// 0x89 is movq r/m64 <- r64, with op1 in r/m and op2 in reg: so reverse EmitRex64
@@ -388,7 +398,7 @@ void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) {
void X86_64Assembler::movsxd(CpuRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst);
+ EmitRex64(dst, src);
EmitUint8(0x63);
EmitOperand(dst.LowBits(), src);
}
@@ -652,6 +662,21 @@ void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit) {
}
+void X86_64Assembler::cvtsi2ss(XmmRegister dst, const Address& src, bool is64bit) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src) {
cvtsi2sd(dst, src, false);
}
@@ -672,6 +697,21 @@ void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit) {
}
+void X86_64Assembler::cvtsi2sd(XmmRegister dst, const Address& src, bool is64bit) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtss2si(CpuRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -692,6 +732,16 @@ void X86_64Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) {
}
+void X86_64Assembler::cvtss2sd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtsd2si(CpuRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
@@ -752,6 +802,16 @@ void X86_64Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) {
}
+void X86_64Assembler::cvtsd2ss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -771,6 +831,15 @@ void X86_64Assembler::comiss(XmmRegister a, XmmRegister b) {
}
+void X86_64Assembler::comiss(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::comisd(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -780,6 +849,17 @@ void X86_64Assembler::comisd(XmmRegister a, XmmRegister b) {
EmitXmmRegisterOperand(a.LowBits(), b);
}
+
+void X86_64Assembler::comisd(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::ucomiss(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(a, b);
@@ -789,6 +869,15 @@ void X86_64Assembler::ucomiss(XmmRegister a, XmmRegister b) {
}
+void X86_64Assembler::ucomiss(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::ucomisd(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -799,6 +888,16 @@ void X86_64Assembler::ucomisd(XmmRegister a, XmmRegister b) {
}
+void X86_64Assembler::ucomisd(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1161,7 +1260,7 @@ void X86_64Assembler::cmpq(CpuRegister reg, const Immediate& imm) {
void X86_64Assembler::cmpq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(reg);
+ EmitRex64(reg, address);
EmitUint8(0x3B);
EmitOperand(reg.LowBits(), address);
}
@@ -1243,7 +1342,7 @@ void X86_64Assembler::testq(CpuRegister reg1, CpuRegister reg2) {
void X86_64Assembler::testq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(reg);
+ EmitRex64(reg, address);
EmitUint8(0x85);
EmitOperand(reg.LowBits(), address);
}
@@ -1288,6 +1387,14 @@ void X86_64Assembler::andq(CpuRegister dst, CpuRegister src) {
}
+void X86_64Assembler::andq(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x23);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1327,6 +1434,14 @@ void X86_64Assembler::orq(CpuRegister dst, CpuRegister src) {
}
+void X86_64Assembler::orq(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x0B);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::xorl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1365,6 +1480,14 @@ void X86_64Assembler::xorq(CpuRegister dst, const Immediate& imm) {
EmitComplex(6, Operand(dst), imm);
}
+void X86_64Assembler::xorq(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x33);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
#if 0
void X86_64Assembler::rex(bool force, bool w, Register* r, Register* x, Register* b) {
// REX.WRXB
@@ -1435,7 +1558,7 @@ void X86_64Assembler::addq(CpuRegister reg, const Immediate& imm) {
void X86_64Assembler::addq(CpuRegister dst, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst);
+ EmitRex64(dst, address);
EmitUint8(0x03);
EmitOperand(dst.LowBits(), address);
}
@@ -1498,7 +1621,7 @@ void X86_64Assembler::subq(CpuRegister dst, CpuRegister src) {
void X86_64Assembler::subq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(reg);
+ EmitRex64(reg, address);
EmitUint8(0x2B);
EmitOperand(reg.LowBits() & 7, address);
}
@@ -2182,9 +2305,15 @@ void X86_64Assembler::EmitRex64(CpuRegister dst, const Operand& operand) {
if (dst.NeedsRex()) {
rex |= 0x44; // REX.0R00
}
- if (rex != 0) {
- EmitUint8(rex);
+ EmitUint8(rex);
+}
+
+void X86_64Assembler::EmitRex64(XmmRegister dst, const Operand& operand) {
+ uint8_t rex = 0x48 | operand.rex(); // REX.W000
+ if (dst.NeedsRex()) {
+ rex |= 0x44; // REX.0R00
}
+ EmitUint8(rex);
}
void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src) {
@@ -2622,7 +2751,7 @@ void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
- // Use out_reg as indicator of NULL
+ // Use out_reg as indicator of null.
in_reg = out_reg;
// TODO: movzwl
movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 16ef70b85e..f5327a8d02 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -328,6 +328,7 @@ class X86_64Assembler FINAL : public Assembler {
void movq(CpuRegister dst, const Address& src);
void movl(CpuRegister dst, const Address& src);
void movq(const Address& dst, CpuRegister src);
+ void movq(const Address& dst, const Immediate& src);
void movl(const Address& dst, CpuRegister src);
void movl(const Address& dst, const Immediate& imm);
@@ -391,14 +392,18 @@ class X86_64Assembler FINAL : public Assembler {
void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
void cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit);
+ void cvtsi2ss(XmmRegister dst, const Address& src, bool is64bit);
void cvtsi2sd(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
void cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit);
+ void cvtsi2sd(XmmRegister dst, const Address& src, bool is64bit);
void cvtss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtss2sd(XmmRegister dst, XmmRegister src);
+ void cvtss2sd(XmmRegister dst, const Address& src);
void cvtsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtsd2ss(XmmRegister dst, XmmRegister src);
+ void cvtsd2ss(XmmRegister dst, const Address& src);
void cvttss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvttss2si(CpuRegister dst, XmmRegister src, bool is64bit);
@@ -408,9 +413,13 @@ class X86_64Assembler FINAL : public Assembler {
void cvtdq2pd(XmmRegister dst, XmmRegister src);
void comiss(XmmRegister a, XmmRegister b);
+ void comiss(XmmRegister a, const Address& b);
void comisd(XmmRegister a, XmmRegister b);
+ void comisd(XmmRegister a, const Address& b);
void ucomiss(XmmRegister a, XmmRegister b);
+ void ucomiss(XmmRegister a, const Address& b);
void ucomisd(XmmRegister a, XmmRegister b);
+ void ucomisd(XmmRegister a, const Address& b);
void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
@@ -487,18 +496,21 @@ class X86_64Assembler FINAL : public Assembler {
void andl(CpuRegister reg, const Address& address);
void andq(CpuRegister dst, const Immediate& imm);
void andq(CpuRegister dst, CpuRegister src);
+ void andq(CpuRegister reg, const Address& address);
void orl(CpuRegister dst, const Immediate& imm);
void orl(CpuRegister dst, CpuRegister src);
void orl(CpuRegister reg, const Address& address);
void orq(CpuRegister dst, CpuRegister src);
void orq(CpuRegister dst, const Immediate& imm);
+ void orq(CpuRegister reg, const Address& address);
void xorl(CpuRegister dst, CpuRegister src);
void xorl(CpuRegister dst, const Immediate& imm);
void xorl(CpuRegister reg, const Address& address);
void xorq(CpuRegister dst, const Immediate& imm);
void xorq(CpuRegister dst, CpuRegister src);
+ void xorq(CpuRegister reg, const Address& address);
void addl(CpuRegister dst, CpuRegister src);
void addl(CpuRegister reg, const Immediate& imm);
@@ -699,17 +711,17 @@ class X86_64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
@@ -789,6 +801,7 @@ class X86_64Assembler FINAL : public Assembler {
void EmitRex64(const Operand& operand);
void EmitRex64(CpuRegister dst, CpuRegister src);
void EmitRex64(CpuRegister dst, const Operand& operand);
+ void EmitRex64(XmmRegister dst, const Operand& operand);
void EmitRex64(XmmRegister dst, CpuRegister src);
void EmitRex64(CpuRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 5ca0373a6a..9e4144ac26 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -667,6 +667,135 @@ TEST_F(AssemblerX86_64Test, Movw) {
DriverStr(expected, "movw");
}
+TEST_F(AssemblerX86_64Test, MovqAddrImm) {
+ GetAssembler()->movq(x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ x86_64::Immediate(-5));
+ const char* expected = "movq $-5, 0(%RAX)\n";
+ DriverStr(expected, "movq");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2ssAddr) {
+ GetAssembler()->cvtsi2ss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ false);
+ GetAssembler()->cvtsi2ss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ true);
+ const char* expected = "cvtsi2ss 0(%RAX), %xmm0\n"
+ "cvtsi2ssq 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtsi2ss");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2sdAddr) {
+ GetAssembler()->cvtsi2sd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ false);
+ GetAssembler()->cvtsi2sd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ true);
+ const char* expected = "cvtsi2sd 0(%RAX), %xmm0\n"
+ "cvtsi2sdq 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtsi2sd");
+}
+
+TEST_F(AssemblerX86_64Test, CmpqAddr) {
+ GetAssembler()->cmpq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "cmpq 0(%R9), %R12\n";
+ DriverStr(expected, "cmpq");
+}
+
+TEST_F(AssemblerX86_64Test, MovsxdAddr) {
+ GetAssembler()->movsxd(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "movslq 0(%R9), %R12\n";
+ DriverStr(expected, "movsxd");
+}
+
+TEST_F(AssemblerX86_64Test, TestqAddr) {
+ GetAssembler()->testq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "testq 0(%R9), %R12\n";
+ DriverStr(expected, "testq");
+}
+
+TEST_F(AssemblerX86_64Test, AddqAddr) {
+ GetAssembler()->addq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "addq 0(%R9), %R12\n";
+ DriverStr(expected, "addq");
+}
+
+TEST_F(AssemblerX86_64Test, SubqAddr) {
+ GetAssembler()->subq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "subq 0(%R9), %R12\n";
+ DriverStr(expected, "subq");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtss2sdAddr) {
+ GetAssembler()->cvtss2sd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "cvtss2sd 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtss2sd");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsd2ssAddr) {
+ GetAssembler()->cvtsd2ss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "cvtsd2ss 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtsd2ss");
+}
+
+TEST_F(AssemblerX86_64Test, ComissAddr) {
+ GetAssembler()->comiss(x86_64::XmmRegister(x86_64::XMM14),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "comiss 0(%RAX), %xmm14\n";
+ DriverStr(expected, "comiss");
+}
+
+TEST_F(AssemblerX86_64Test, ComisdAddr) {
+ GetAssembler()->comisd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "comisd 0(%R9), %xmm0\n";
+ DriverStr(expected, "comisd");
+}
+
+TEST_F(AssemblerX86_64Test, UComissAddr) {
+ GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "ucomiss 0(%RAX), %xmm0\n";
+ DriverStr(expected, "ucomiss");
+}
+
+TEST_F(AssemblerX86_64Test, UComisdAddr) {
+ GetAssembler()->ucomisd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "ucomisd 0(%RAX), %xmm0\n";
+ DriverStr(expected, "ucomisd");
+}
+
+TEST_F(AssemblerX86_64Test, Andq) {
+ GetAssembler()->andq(x86_64::CpuRegister(x86_64::R9),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "andq 0(%RAX), %r9\n";
+ DriverStr(expected, "andq");
+}
+
+TEST_F(AssemblerX86_64Test, Orq) {
+ GetAssembler()->orq(x86_64::CpuRegister(x86_64::R9),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "orq 0(%RAX), %r9\n";
+ DriverStr(expected, "orq");
+}
+
+TEST_F(AssemblerX86_64Test, Xorq) {
+ GetAssembler()->xorq(x86_64::CpuRegister(x86_64::R9),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "xorq 0(%RAX), %r9\n";
+ DriverStr(expected, "xorq");
+}
+
TEST_F(AssemblerX86_64Test, Movsxd) {
DriverStr(RepeatRr(&x86_64::X86_64Assembler::movsxd, "movsxd %{reg2}, %{reg1}"), "movsxd");
}
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index 7839aa8d4c..85debe4d38 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -31,25 +31,26 @@ namespace art {
// Determine whether or not the specified method is public.
static bool IsMethodPublic(JNIEnv* env, jclass c, jmethodID method_id) {
ScopedLocalRef<jobject> reflected(env, env->ToReflectedMethod(c, method_id, JNI_FALSE));
- if (reflected.get() == NULL) {
+ if (reflected.get() == nullptr) {
fprintf(stderr, "Failed to get reflected method\n");
return false;
}
// We now have a Method instance. We need to call its
// getModifiers() method.
jclass method_class = env->FindClass("java/lang/reflect/Method");
- if (method_class == NULL) {
+ if (method_class == nullptr) {
fprintf(stderr, "Failed to find class java.lang.reflect.Method\n");
return false;
}
jmethodID mid = env->GetMethodID(method_class, "getModifiers", "()I");
- if (mid == NULL) {
+ if (mid == nullptr) {
fprintf(stderr, "Failed to find java.lang.reflect.Method.getModifiers\n");
return false;
}
int modifiers = env->CallIntMethod(reflected.get(), mid);
static const int PUBLIC = 0x0001; // java.lang.reflect.Modifiers.PUBLIC
if ((modifiers & PUBLIC) == 0) {
+ fprintf(stderr, "Modifiers mismatch\n");
return false;
}
return true;
@@ -60,7 +61,7 @@ static int InvokeMain(JNIEnv* env, char** argv) {
// it. Create an array and populate it. Note argv[0] is not
// included.
ScopedLocalRef<jobjectArray> args(env, toStringArray(env, argv + 1));
- if (args.get() == NULL) {
+ if (args.get() == nullptr) {
env->ExceptionDescribe();
return EXIT_FAILURE;
}
@@ -72,14 +73,14 @@ static int InvokeMain(JNIEnv* env, char** argv) {
std::replace(class_name.begin(), class_name.end(), '.', '/');
ScopedLocalRef<jclass> klass(env, env->FindClass(class_name.c_str()));
- if (klass.get() == NULL) {
+ if (klass.get() == nullptr) {
fprintf(stderr, "Unable to locate class '%s'\n", class_name.c_str());
env->ExceptionDescribe();
return EXIT_FAILURE;
}
jmethodID method = env->GetStaticMethodID(klass.get(), "main", "([Ljava/lang/String;)V");
- if (method == NULL) {
+ if (method == nullptr) {
fprintf(stderr, "Unable to find static main(String[]) in '%s'\n", class_name.c_str());
env->ExceptionDescribe();
return EXIT_FAILURE;
@@ -105,7 +106,7 @@ static int InvokeMain(JNIEnv* env, char** argv) {
// Parse arguments. Most of it just gets passed through to the runtime.
// The JNI spec defines a handful of standard arguments.
static int dalvikvm(int argc, char** argv) {
- setvbuf(stdout, NULL, _IONBF, 0);
+ setvbuf(stdout, nullptr, _IONBF, 0);
// Skip over argv[0].
argv++;
@@ -124,8 +125,8 @@ static int dalvikvm(int argc, char** argv) {
//
// [Do we need to catch & handle "-jar" here?]
bool need_extra = false;
- const char* lib = NULL;
- const char* what = NULL;
+ const char* lib = nullptr;
+ const char* what = nullptr;
int curr_opt, arg_idx;
for (curr_opt = arg_idx = 0; arg_idx < argc; arg_idx++) {
if (argv[arg_idx][0] != '-' && !need_extra) {
@@ -171,8 +172,8 @@ static int dalvikvm(int argc, char** argv) {
init_args.ignoreUnrecognized = JNI_FALSE;
// Start the runtime. The current thread becomes the main thread.
- JavaVM* vm = NULL;
- JNIEnv* env = NULL;
+ JavaVM* vm = nullptr;
+ JNIEnv* env = nullptr;
if (JNI_CreateJavaVM(&vm, &env, &init_args) != JNI_OK) {
fprintf(stderr, "Failed to initialize runtime (check log for details)\n");
return EXIT_FAILURE;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 7e32b43e66..2a3a346a4c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -23,6 +23,7 @@
#include <iostream>
#include <sstream>
#include <string>
+#include <unordered_set>
#include <vector>
#if defined(__linux__) && defined(__arm__)
@@ -444,6 +445,8 @@ class Dex2Oat FINAL {
image_classes_filename_(nullptr),
compiled_classes_zip_filename_(nullptr),
compiled_classes_filename_(nullptr),
+ compiled_methods_zip_filename_(nullptr),
+ compiled_methods_filename_(nullptr),
image_(false),
is_host_(false),
dump_stats_(false),
@@ -563,6 +566,10 @@ class Dex2Oat FINAL {
compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
} else if (option.starts_with("--compiled-classes-zip=")) {
compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
+ } else if (option.starts_with("--compiled-methods=")) {
+ compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data();
+ } else if (option.starts_with("--compiled-methods-zip=")) {
+ compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data();
} else if (option.starts_with("--base=")) {
const char* image_base_str = option.substr(strlen("--base=")).data();
char* end;
@@ -1091,8 +1098,8 @@ class Dex2Oat FINAL {
std::string error_msg;
if (image_classes_zip_filename_ != nullptr) {
image_classes_.reset(ReadImageClassesFromZip(image_classes_zip_filename_,
- image_classes_filename_,
- &error_msg));
+ image_classes_filename_,
+ &error_msg));
} else {
image_classes_.reset(ReadImageClassesFromFile(image_classes_filename_));
}
@@ -1102,7 +1109,7 @@ class Dex2Oat FINAL {
return false;
}
} else if (image_) {
- image_classes_.reset(new std::set<std::string>);
+ image_classes_.reset(new std::unordered_set<std::string>);
}
// If --compiled-classes was specified, calculate the full list of classes to compile in the
// image.
@@ -1120,9 +1127,29 @@ class Dex2Oat FINAL {
<< compiled_classes_filename_ << "': " << error_msg;
return false;
}
- } else if (image_) {
+ } else {
compiled_classes_.reset(nullptr); // By default compile everything.
}
+ // If --compiled-methods was specified, read the methods to compile from the given file(s).
+ if (compiled_methods_filename_ != nullptr) {
+ std::string error_msg;
+ if (compiled_methods_zip_filename_ != nullptr) {
+ compiled_methods_.reset(ReadCommentedInputFromZip(compiled_methods_zip_filename_,
+ compiled_methods_filename_,
+ nullptr, // No post-processing.
+ &error_msg));
+ } else {
+ compiled_methods_.reset(ReadCommentedInputFromFile(compiled_methods_filename_,
+ nullptr)); // No post-processing.
+ }
+ if (compiled_methods_.get() == nullptr) {
+ LOG(ERROR) << "Failed to create list of compiled methods from '"
+ << compiled_methods_filename_ << "': " << error_msg;
+ return false;
+ }
+ } else {
+ compiled_methods_.reset(nullptr); // By default compile everything.
+ }
if (boot_image_option_.empty()) {
dex_files_ = Runtime::Current()->GetClassLinker()->GetBootClassPath();
@@ -1257,6 +1284,7 @@ class Dex2Oat FINAL {
image_,
image_classes_.release(),
compiled_classes_.release(),
+ nullptr,
thread_count_,
dump_stats_,
dump_passes_,
@@ -1490,7 +1518,7 @@ class Dex2Oat FINAL {
static size_t OpenDexFiles(const std::vector<const char*>& dex_filenames,
const std::vector<const char*>& dex_locations,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is nullptr";
size_t failure_count = 0;
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i];
@@ -1531,7 +1559,7 @@ class Dex2Oat FINAL {
static void OpenClassPathFiles(const std::string& class_path,
std::vector<const DexFile*> dex_files,
std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
- DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is NULL";
+ DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is nullptr";
std::vector<std::string> parsed;
Split(class_path, ':', &parsed);
// Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained.
@@ -1615,58 +1643,88 @@ class Dex2Oat FINAL {
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- static std::set<std::string>* ReadImageClassesFromFile(const char* image_classes_filename) {
- std::unique_ptr<std::ifstream> image_classes_file(new std::ifstream(image_classes_filename,
- std::ifstream::in));
- if (image_classes_file.get() == nullptr) {
- LOG(ERROR) << "Failed to open image classes file " << image_classes_filename;
- return nullptr;
- }
- std::unique_ptr<std::set<std::string>> result(ReadImageClasses(*image_classes_file));
- image_classes_file->close();
- return result.release();
+ static std::unordered_set<std::string>* ReadImageClassesFromFile(
+ const char* image_classes_filename) {
+ std::function<std::string(const char*)> process = DotToDescriptor;
+ return ReadCommentedInputFromFile(image_classes_filename, &process);
}
- static std::set<std::string>* ReadImageClasses(std::istream& image_classes_stream) {
- std::unique_ptr<std::set<std::string>> image_classes(new std::set<std::string>);
- while (image_classes_stream.good()) {
- std::string dot;
- std::getline(image_classes_stream, dot);
- if (StartsWith(dot, "#") || dot.empty()) {
- continue;
- }
- std::string descriptor(DotToDescriptor(dot.c_str()));
- image_classes->insert(descriptor);
+ // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
+ static std::unordered_set<std::string>* ReadImageClassesFromZip(
+ const char* zip_filename,
+ const char* image_classes_filename,
+ std::string* error_msg) {
+ std::function<std::string(const char*)> process = DotToDescriptor;
+ return ReadCommentedInputFromZip(zip_filename, image_classes_filename, &process, error_msg);
+ }
+
+ // Read lines from the given file, dropping comments and empty lines. Post-process each line with
+ // the given function.
+ static std::unordered_set<std::string>* ReadCommentedInputFromFile(
+ const char* input_filename, std::function<std::string(const char*)>* process) {
+ std::unique_ptr<std::ifstream> input_file(new std::ifstream(input_filename, std::ifstream::in));
+ if (input_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open input file " << input_filename;
+ return nullptr;
}
- return image_classes.release();
+ std::unique_ptr<std::unordered_set<std::string>> result(
+ ReadCommentedInputStream(*input_file, process));
+ input_file->close();
+ return result.release();
}
- // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- static std::set<std::string>* ReadImageClassesFromZip(const char* zip_filename,
- const char* image_classes_filename,
- std::string* error_msg) {
+ // Read lines from the given file from the given zip file, dropping comments and empty lines.
+ // Post-process each line with the given function.
+ static std::unordered_set<std::string>* ReadCommentedInputFromZip(
+ const char* zip_filename,
+ const char* input_filename,
+ std::function<std::string(const char*)>* process,
+ std::string* error_msg) {
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename, error_msg));
if (zip_archive.get() == nullptr) {
return nullptr;
}
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename, error_msg));
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(input_filename, error_msg));
if (zip_entry.get() == nullptr) {
- *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", image_classes_filename,
+ *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", input_filename,
zip_filename, error_msg->c_str());
return nullptr;
}
- std::unique_ptr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(zip_filename,
- image_classes_filename,
- error_msg));
- if (image_classes_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", image_classes_filename,
+ std::unique_ptr<MemMap> input_file(zip_entry->ExtractToMemMap(zip_filename,
+ input_filename,
+ error_msg));
+ if (input_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", input_filename,
zip_filename, error_msg->c_str());
return nullptr;
}
- const std::string image_classes_string(reinterpret_cast<char*>(image_classes_file->Begin()),
- image_classes_file->Size());
- std::istringstream image_classes_stream(image_classes_string);
- return ReadImageClasses(image_classes_stream);
+ const std::string input_string(reinterpret_cast<char*>(input_file->Begin()),
+ input_file->Size());
+ std::istringstream input_stream(input_string);
+ return ReadCommentedInputStream(input_stream, process);
+ }
+
+ // Read lines from the given stream, dropping comments and empty lines. Post-process each line
+ // with the given function.
+ static std::unordered_set<std::string>* ReadCommentedInputStream(
+ std::istream& in_stream,
+ std::function<std::string(const char*)>* process) {
+ std::unique_ptr<std::unordered_set<std::string>> image_classes(
+ new std::unordered_set<std::string>);
+ while (in_stream.good()) {
+ std::string dot;
+ std::getline(in_stream, dot);
+ if (StartsWith(dot, "#") || dot.empty()) {
+ continue;
+ }
+ if (process != nullptr) {
+ std::string descriptor((*process)(dot.c_str()));
+ image_classes->insert(descriptor);
+ } else {
+ image_classes->insert(dot);
+ }
+ }
+ return image_classes.release();
}
void LogCompletionTime() {
@@ -1720,8 +1778,11 @@ class Dex2Oat FINAL {
const char* image_classes_filename_;
const char* compiled_classes_zip_filename_;
const char* compiled_classes_filename_;
- std::unique_ptr<std::set<std::string>> image_classes_;
- std::unique_ptr<std::set<std::string>> compiled_classes_;
+ const char* compiled_methods_zip_filename_;
+ const char* compiled_methods_filename_;
+ std::unique_ptr<std::unordered_set<std::string>> image_classes_;
+ std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
+ std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
bool image_;
std::unique_ptr<ImageWriter> image_writer_;
bool is_host_;
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index c05c3ed8ec..6334717fe5 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -42,7 +42,7 @@ Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerO
return new x86::DisassemblerX86(options, true);
} else {
UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 34a4c14f07..1056fe15e0 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -97,7 +97,8 @@ class ImgDiagDumper {
{
struct stat sts;
- std::string proc_pid_str = StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string proc_pid_str =
+ StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
if (stat(proc_pid_str.c_str(), &sts) == -1) {
os << "Process does not exist";
return false;
@@ -144,7 +145,8 @@ class ImgDiagDumper {
const size_t pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
- std::string file_name = StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string file_name =
+ StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
size_t boot_map_size = boot_map.end - boot_map.start;
@@ -197,8 +199,8 @@ class ImgDiagDumper {
return false;
}
- std::string page_map_file_name = StringPrintf("/proc/%ld/pagemap",
- static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string page_map_file_name = StringPrintf(
+ "/proc/%ld/pagemap", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
auto page_map_file = std::unique_ptr<File>(OS::OpenFileForReading(page_map_file_name.c_str()));
if (page_map_file == nullptr) {
os << "Failed to open " << page_map_file_name << " for reading: " << strerror(errno);
@@ -226,8 +228,10 @@ class ImgDiagDumper {
return false;
}
- std::set<size_t> dirty_page_set_remote; // Set of the remote virtual page indices that are dirty
- std::set<size_t> dirty_page_set_local; // Set of the local virtual page indices that are dirty
+ // Set of the remote virtual page indices that are dirty
+ std::set<size_t> dirty_page_set_remote;
+ // Set of the local virtual page indices that are dirty
+ std::set<size_t> dirty_page_set_local;
size_t different_int32s = 0;
size_t different_bytes = 0;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a36e5b1ba0..f2e35af097 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -92,8 +92,7 @@ class OatSymbolizer FINAL : public CodeOutput {
elf_output_ = OS::CreateEmptyFile(output_name_.c_str());
- builder_.reset(new ElfBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>(
+ builder_.reset(new ElfBuilder<ElfTypes32>(
this,
elf_output_,
oat_file_->GetOatHeader().GetInstructionSet(),
@@ -145,7 +144,7 @@ class OatSymbolizer FINAL : public CodeOutput {
std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file_->GetOatDexFiles();
for (size_t i = 0; i < oat_dex_files.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
- CHECK(oat_dex_file != NULL);
+ CHECK(oat_dex_file != nullptr);
WalkOatDexFile(oat_dex_file, callback);
}
}
@@ -270,8 +269,7 @@ class OatSymbolizer FINAL : public CodeOutput {
pretty_name = "[Dedup]" + pretty_name;
}
- ElfSymtabBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr,
- Elf32_Sym, Elf32_Shdr>* symtab = builder_->GetSymtabBuilder();
+ ElfSymtabBuilder<ElfTypes32>* symtab = builder_->GetSymtabBuilder();
symtab->AddSymbol(pretty_name, &builder_->GetTextBuilder(),
oat_method.GetCodeOffset() - oat_file_->GetOatHeader().GetExecutableOffset(),
@@ -300,8 +298,7 @@ class OatSymbolizer FINAL : public CodeOutput {
}
const OatFile* oat_file_;
- std::unique_ptr<ElfBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr> > builder_;
+ std::unique_ptr<ElfBuilder<ElfTypes32> > builder_;
File* elf_output_;
std::unordered_map<uint32_t, uint32_t> state_;
const std::string output_name_;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index d3488fc71f..86201ba308 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -100,11 +100,13 @@ LIBART_COMMON_SRC_FILES := \
linear_alloc.cc \
mem_map.cc \
memory_region.cc \
+ mirror/abstract_method.cc \
mirror/art_method.cc \
mirror/array.cc \
mirror/class.cc \
mirror/dex_cache.cc \
mirror/field.cc \
+ mirror/method.cc \
mirror/object.cc \
mirror/reference.cc \
mirror/stack_trace_element.cc \
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index 5bdeda7f81..a58aecbc6b 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -80,7 +80,7 @@ class ArmContext : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations, initialized to NULL or the specific registers below.
+ // Pointers to register locations, initialized to null or the specific registers below.
uintptr_t* gprs_[kNumberOfCoreRegisters];
uint32_t* fprs_[kNumberOfSRegisters];
// Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 055b5ab1c6..f14dfc27ae 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -143,11 +143,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 3e8b36719e..d84cb5369b 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -56,7 +56,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
sc->arm_r0 = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
sc->arm_r1 = 1;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 9bd8ba7921..8f6162ffa0 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -320,7 +320,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * If unsuccessful, the helper will return null/null. There will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -359,7 +359,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
* Quick invocation stub internal.
* On entry:
* r0 = method pointer
- * r1 = argument array or NULL for no argument methods
+ * r1 = argument array or null for no argument methods
* r2 = size of argument array in bytes
* r3 = (managed) thread pointer
* [sp] = JValue* result
@@ -409,7 +409,7 @@ ENTRY art_quick_invoke_stub_internal
add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
bl memcpy @ memcpy (dest, src, bytes)
mov ip, #0 @ set ip to 0
- str ip, [sp] @ store NULL for method* at bottom of frame
+ str ip, [sp] @ store null for method* at bottom of frame
ldr ip, [r11, #48] @ load fp register argument array pointer
vldm ip, {s0-s15} @ copy s0 - s15
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index f4867797f0..0383ad628a 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -80,7 +80,7 @@ class Arm64Context : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations, initialized to NULL or the specific registers below.
+ // Pointers to register locations, initialized to null or the specific registers below.
uintptr_t* gprs_[kNumberOfXRegisters];
uint64_t * fprs_[kNumberOfDRegisters];
// Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 6c787e312f..4b12f00d0d 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -105,7 +105,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // TODO nullptr entrypoints not needed for ARM64 - generate inline.
+ // TODO null entrypoints not needed for ARM64 - generate inline.
qpoints->pCmpgDouble = nullptr;
qpoints->pCmpgFloat = nullptr;
qpoints->pCmplDouble = nullptr;
@@ -135,11 +135,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index c914d85db2..0448c760ee 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -45,7 +45,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
sc->regs[0] = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
sc->regs[1] = 1;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 4079436c47..cbd4b7c337 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -466,7 +466,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
* The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
* of the target Method* in x0 and method->code_ in x1.
*
- * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -565,7 +565,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
// W2 - args length
// X9 - destination address.
// W10 - temporary
- add x9, sp, #4 // Destination address is bottom of stack + NULL.
+ add x9, sp, #4 // Destination address is bottom of stack + null.
// Use \@ to differentiate between macro invocations.
.LcopyParams\@:
@@ -579,7 +579,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
.LendCopyParams\@:
- // Store NULL into StackReference<Method>* at bottom of frame.
+ // Store null into StackReference<Method>* at bottom of frame.
str wzr, [sp]
#if (STACK_REFERENCE_SIZE != 4)
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index cbad3f963a..d01b95e5f6 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -79,7 +79,7 @@ class MipsContext : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ // Pointers to registers in the stack, initialized to null except for the special cases below.
uintptr_t* gprs_[kNumberOfCoreRegisters];
uint32_t* fprs_[kNumberOfFRegisters];
// Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e3ec27c100..a980a86135 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -199,7 +199,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
qpoints->pF2iz = art_f2i;
static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
- qpoints->pIdivmod = NULL;
+ qpoints->pIdivmod = nullptr;
qpoints->pD2l = art_d2l;
static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
qpoints->pF2l = art_f2l;
@@ -228,19 +228,24 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 0c2250eab8..622c48f55f 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -446,7 +446,7 @@ END art_quick_throw_no_such_method
* The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -484,7 +484,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
* Invocation stub for quick code.
* On entry:
* a0 = method pointer
- * a1 = argument array or NULL for no argument methods
+ * a1 = argument array or null for no argument methods
* a2 = size of argument array in bytes
* a3 = (managed) thread pointer
* [sp + 16] = JValue* result
@@ -520,7 +520,7 @@ ENTRY art_quick_invoke_stub
lw $a3, 12($sp) # copy arg value for a3
lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
move $sp, $fp # restore the stack
lw $s0, 0($sp)
.cfi_restore 16
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index 2cc2b8df3e..ebc036cf98 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -79,7 +79,7 @@ class Mips64Context : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ // Pointers to registers in the stack, initialized to null except for the special cases below.
uintptr_t* gprs_[kNumberOfGpuRegisters];
uint64_t* fprs_[kNumberOfFpuRegisters];
// Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 4a3bf02f2f..b328708409 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -135,15 +135,15 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pL2f = art_l2f;
qpoints->pD2iz = art_d2i;
qpoints->pF2iz = art_f2i;
- qpoints->pIdivmod = NULL;
+ qpoints->pIdivmod = nullptr;
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
qpoints->pLdiv = artLdiv;
qpoints->pLmod = artLmod;
qpoints->pLmul = artLmul;
- qpoints->pShlLong = NULL;
- qpoints->pShrLong = NULL;
- qpoints->pUshrLong = NULL;
+ qpoints->pShlLong = nullptr;
+ qpoints->pShrLong = nullptr;
+ qpoints->pUshrLong = nullptr;
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
@@ -154,11 +154,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 3d502e6b12..bf18dd5f82 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -503,7 +503,7 @@ END art_quick_throw_no_such_method
* The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the ra
@@ -656,7 +656,7 @@ call_fn:
# call method (a0 and a1 have been untouched)
lwu $a1, 0($a1) # make a1 = this ptr
sw $a1, 4($sp) # copy this ptr (skip 4 bytes for method*)
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
@@ -758,7 +758,7 @@ ENTRY art_quick_invoke_static_stub
call_sfn:
# call method (a0 has been untouched)
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9cccf7c761..0d9a88830b 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -117,7 +117,7 @@ class StubTest : public CommonRuntimeTest {
"add sp, sp, #20\n\t"
"blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop nullptr and padding
+ "add sp, sp, #12\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -12\n\t"
"pop {r1-r12, lr}\n\t" // Restore state
".cfi_adjust_cfa_offset -52\n\t"
@@ -269,7 +269,7 @@ class StubTest : public CommonRuntimeTest {
"pushq (%%rsp)\n\t" // & 16B alignment padding
".cfi_adjust_cfa_offset 16\n\t"
"call *%%rax\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop nullptr and padding
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
: "=a" (result)
// Use the result from rax
@@ -344,7 +344,7 @@ class StubTest : public CommonRuntimeTest {
"add sp, sp, #24\n\t"
"blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop nullptr and padding
+ "add sp, sp, #12\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -12\n\t"
"pop {r1-r12, lr}\n\t" // Restore state
".cfi_adjust_cfa_offset -52\n\t"
@@ -495,7 +495,7 @@ class StubTest : public CommonRuntimeTest {
"pushq (%%rsp)\n\t" // & 16B alignment padding
".cfi_adjust_cfa_offset 16\n\t"
"call *%%rbx\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop nullptr and padding
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
: "=a" (result)
// Use the result from rax
@@ -1032,7 +1032,7 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
@@ -1046,7 +1046,7 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
@@ -1166,7 +1166,7 @@ TEST_F(StubTest, AllocObjectArray) {
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
reinterpret_cast<size_t>(nullptr),
@@ -1788,9 +1788,9 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
JNIEnv* env = Thread::Current()->GetJniEnv();
jclass jc = env->FindClass("AllFields");
- CHECK(jc != NULL);
+ CHECK(jc != nullptr);
jobject o = env->AllocObject(jc);
- CHECK(o != NULL);
+ CHECK(o != nullptr);
ScopedObjectAccess soa(self);
StackHandleScope<4> hs(self);
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index ace4670136..a783d48ed2 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -92,7 +92,7 @@ class X86Context : public Context {
XMM7_0, XMM7_1,
kNumberOfFloatRegisters};
- // Pointers to register locations. Values are initialized to NULL or the special registers below.
+ // Pointers to register locations. Values are initialized to null or the special registers below.
uintptr_t* gprs_[kNumberOfCpuRegisters];
uint32_t* fprs_[kNumberOfFloatRegisters];
// Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index c0121736eb..a371632367 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -96,17 +96,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // points->pCmpgDouble = NULL; // Not needed on x86.
- // points->pCmpgFloat = NULL; // Not needed on x86.
- // points->pCmplDouble = NULL; // Not needed on x86.
- // points->pCmplFloat = NULL; // Not needed on x86.
- // qpoints->pFmod = NULL; // Not needed on x86.
- // qpoints->pL2d = NULL; // Not needed on x86.
- // qpoints->pFmodf = NULL; // Not needed on x86.
- // qpoints->pL2f = NULL; // Not needed on x86.
- // points->pD2iz = NULL; // Not needed on x86.
- // points->pF2iz = NULL; // Not needed on x86.
- // qpoints->pIdivmod = NULL; // Not needed on x86.
qpoints->pD2l = art_quick_d2l;
qpoints->pF2l = art_quick_f2l;
qpoints->pLdiv = art_quick_ldiv;
@@ -125,11 +114,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 27a4adf032..2de69aa679 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -191,6 +191,7 @@ static uint32_t GetInstructionSize(const uint8_t* pc) {
break;
case 0x81: // group 1, word immediate.
+ case 0xc7: // mov
modrm = *pc++;
has_modrm = true;
immediate_size = operand_size_prefix ? 2 : 4;
@@ -239,7 +240,7 @@ void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
// this code the same for both 32 and 64 bit.
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
uc->CTX_JMP_BUF = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c5a020a2da..c5d8b8fc9b 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -285,7 +285,7 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromC
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * If unsuccessful, the helper will return null/null will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -408,7 +408,7 @@ END_MACRO
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
@@ -442,7 +442,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
- movl LITERAL(0), (%esp) // store NULL for method*
+ movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
@@ -506,7 +506,7 @@ END_FUNCTION art_quick_invoke_stub
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
@@ -539,7 +539,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
- movl LITERAL(0), (%esp) // store NULL for method*
+ movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
@@ -1352,7 +1352,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
- test %eax, %eax // if code pointer is NULL goto deliver pending exception
+ test %eax, %eax // if code pointer is null goto deliver pending exception
jz 1f
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP
1:
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d03aa45237..c9b0ff6b72 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -79,7 +79,7 @@ class X86_64Context : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations. Values are initialized to NULL or the special registers below.
+ // Pointers to register locations. Values are initialized to null or the special registers below.
uintptr_t* gprs_[kNumberOfCpuRegisters];
uint64_t* fprs_[kNumberOfFloatRegisters];
// Hold values for rsp and rip if they are not located within a stack frame. RIP is somewhat
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 3bc0dc41bd..0cddec4102 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -101,17 +101,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // points->pCmpgDouble = NULL; // Not needed on x86.
- // points->pCmpgFloat = NULL; // Not needed on x86.
- // points->pCmplDouble = NULL; // Not needed on x86.
- // points->pCmplFloat = NULL; // Not needed on x86.
- // qpoints->pFmod = NULL; // Not needed on x86.
- // qpoints->pL2d = NULL; // Not needed on x86.
- // qpoints->pFmodf = NULL; // Not needed on x86.
- // qpoints->pL2f = NULL; // Not needed on x86.
- // points->pD2iz = NULL; // Not needed on x86.
- // points->pF2iz = NULL; // Not needed on x86.
- // qpoints->pIdivmod = NULL; // Not needed on x86.
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
qpoints->pLdiv = art_quick_ldiv;
@@ -122,7 +111,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUshrLong = art_quick_lushr;
// Intrinsics
- // qpoints->pIndexOf = NULL; // Not needed on x86.
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = art_quick_memcpy;
@@ -130,11 +118,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ce21f01cfa..8185deb9e2 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -348,7 +348,7 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromC
* The helper will attempt to locate the target and return a 128-bit result in rax/rdx consisting
* of the target Method* in rax and method->code_ in rdx.
*
- * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the return
@@ -506,7 +506,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
@@ -554,7 +554,7 @@ END_FUNCTION art_quick_invoke_stub
* On entry:
* [sp] = return address
* rdi = method pointer
- * rsi = argument array or NULL if no arguments.
+ * rsi = argument array or null if no arguments.
* rdx = size of argument array in bytes
* rcx = (managed) thread pointer
* r8 = JValue* result
@@ -600,7 +600,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
@@ -1302,7 +1302,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
- testq %r10, %r10 // If code pointer is NULL goto deliver pending exception.
+ testq %r10, %r10 // If code pointer is null goto deliver pending exception.
jz 1f
jmp *%r10 // Tail call into method.
1:
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index a2625e2d42..4991ad7513 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -80,7 +80,7 @@ inline void ArtField::Set32(mirror::Object* object, uint32_t new_value) {
}
inline uint64_t ArtField::Get64(mirror::Object* object) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
return object->GetField64Volatile(GetOffset());
@@ -90,7 +90,7 @@ inline uint64_t ArtField::Get64(mirror::Object* object) {
template<bool kTransactionActive>
inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
object->SetField64Volatile<kTransactionActive>(GetOffset(), new_value);
@@ -100,7 +100,7 @@ inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
}
inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
return object->GetFieldObjectVolatile<mirror::Object>(GetOffset());
@@ -110,7 +110,7 @@ inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
template<bool kTransactionActive>
inline void ArtField::SetObj(mirror::Object* object, mirror::Object* new_value) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 16c46f0b9f..c0620bf3c3 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -158,7 +158,7 @@ class ArtField {
return (GetAccessFlags() & kAccVolatile) != 0;
}
- // Returns an instance field with this offset in the given class or nullptr if not found.
+ // Returns an instance field with this offset in the given class or null if not found.
static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index c3e24a7912..65cb02839a 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -79,6 +79,32 @@ bool BitVector::SameBitsSet(const BitVector *src) const {
return (memcmp(storage_, src->GetRawStorage(), our_highest_index * kWordBytes) == 0);
}
+bool BitVector::IsSubsetOf(const BitVector *other) const {
+ int this_highest = GetHighestBitSet();
+ int other_highest = other->GetHighestBitSet();
+
+ // If the highest bit set is -1, this is empty and a trivial subset.
+ if (this_highest < 0) {
+ return true;
+ }
+
+ // If the highest bit set is higher, this cannot be a subset.
+ if (this_highest > other_highest) {
+ return false;
+ }
+
+ // Compare each 32-bit word.
+ size_t this_highest_index = BitsToWords(this_highest + 1);
+ for (size_t i = 0; i < this_highest_index; ++i) {
+ uint32_t this_storage = storage_[i];
+ uint32_t other_storage = other->storage_[i];
+ if ((this_storage | other_storage) != other_storage) {
+ return false;
+ }
+ }
+ return true;
+}
+
void BitVector::Intersect(const BitVector* src) {
uint32_t src_storage_size = src->storage_size_;
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 557a2ec110..be4d363bf5 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -173,6 +173,8 @@ class BitVector {
*/
bool SameBitsSet(const BitVector *src) const;
+ bool IsSubsetOf(const BitVector *other) const;
+
// Count the number of bits that are set.
uint32_t NumSetBits() const;
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index fe3313d122..c51b9b0570 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -167,4 +167,48 @@ TEST(BitVector, UnionIfNotIn) {
}
}
+TEST(BitVector, Subset) {
+ {
+ BitVector first(2, true, Allocator::GetMallocAllocator());
+ BitVector second(5, true, Allocator::GetMallocAllocator());
+
+ EXPECT_TRUE(first.IsSubsetOf(&second));
+ second.SetBit(4);
+ EXPECT_TRUE(first.IsSubsetOf(&second));
+ }
+
+ {
+ BitVector first(5, true, Allocator::GetMallocAllocator());
+ BitVector second(5, true, Allocator::GetMallocAllocator());
+
+ first.SetBit(5);
+ EXPECT_FALSE(first.IsSubsetOf(&second));
+ second.SetBit(4);
+ EXPECT_FALSE(first.IsSubsetOf(&second));
+ }
+
+ {
+ BitVector first(5, true, Allocator::GetMallocAllocator());
+ BitVector second(5, true, Allocator::GetMallocAllocator());
+
+ first.SetBit(16);
+ first.SetBit(32);
+ first.SetBit(48);
+ second.SetBit(16);
+ second.SetBit(32);
+ second.SetBit(48);
+
+ EXPECT_TRUE(first.IsSubsetOf(&second));
+ second.SetBit(8);
+ EXPECT_TRUE(first.IsSubsetOf(&second));
+ second.SetBit(40);
+ EXPECT_TRUE(first.IsSubsetOf(&second));
+ second.SetBit(52);
+ EXPECT_TRUE(first.IsSubsetOf(&second));
+
+ first.SetBit(9);
+ EXPECT_FALSE(first.IsSubsetOf(&second));
+ }
+}
+
} // namespace art
diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc
index 5423ff0b5b..bce6b53f4d 100644
--- a/runtime/base/hex_dump.cc
+++ b/runtime/base/hex_dump.cc
@@ -27,7 +27,7 @@ void HexDump::Dump(std::ostream& os) const {
return;
}
- if (address_ == NULL) {
+ if (address_ == nullptr) {
os << "00000000:";
return;
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 0764b877a1..0ae7863382 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -91,7 +91,7 @@ void InitLogging(char* argv[]) {
gProgramInvocationShortName.reset(new std::string((last_slash != nullptr) ? last_slash + 1
: argv[0]));
} else {
- // TODO: fall back to /proc/self/cmdline when argv is NULL on Linux.
+ // TODO: fall back to /proc/self/cmdline when argv is null on Linux.
gCmdLine.reset(new std::string("<unset>"));
}
const char* tags = getenv("ANDROID_LOG_TAGS");
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 014f4ab5bf..8b34374560 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -72,7 +72,7 @@ extern unsigned int gAborting;
// This can be used to reveal or conceal logs with specific tags.
extern void InitLogging(char* argv[]);
-// Returns the command line used to invoke the current tool or nullptr if InitLogging hasn't been
+// Returns the command line used to invoke the current tool or null if InitLogging hasn't been
// performed.
extern const char* GetCmdLine();
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 6c33232b5f..c00ae78be8 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -46,6 +46,11 @@
#define ART_FRIEND_TEST(test_set_name, individual_test)\
friend class test_set_name##_##individual_test##_Test
+// Declare a friend relationship in a class with a typed test.
+#define ART_FRIEND_TYPED_TEST(test_set_name, individual_test)\
+template<typename T> ART_FRIEND_TEST(test_set_name, individual_test)
+
+
// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
// declarations in a class.
#if !defined(DISALLOW_COPY_AND_ASSIGN)
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index cb698175df..a727992687 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -39,13 +39,14 @@
namespace art {
#if ART_USE_FUTEXES
-static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
+static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
+ volatile int *uaddr2, int val3) {
return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
}
#endif // ART_USE_FUTEXES
static inline uint64_t SafeGetTid(const Thread* self) {
- if (self != NULL) {
+ if (self != nullptr) {
return static_cast<uint64_t>(self->GetTid());
} else {
return static_cast<uint64_t>(GetTid());
@@ -77,7 +78,7 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY
}
inline void BaseMutex::RegisterAsLocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -86,7 +87,7 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
bool bad_mutexes_held = false;
for (int i = level_; i >= 0; --i) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (UNLIKELY(held_mutex != NULL)) {
+ if (UNLIKELY(held_mutex != nullptr)) {
LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << " - " << i
<< ") while locking \"" << name_ << "\" "
@@ -109,7 +110,7 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
}
inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -117,12 +118,12 @@ inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
}
- self->SetHeldMutex(level_, NULL);
+ self->SetHeldMutex(level_, nullptr);
}
}
inline void ReaderWriterMutex::SharedLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -143,7 +144,7 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) {
}
inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
AssertSharedHeld(self);
RegisterAsUnlocked(self);
@@ -161,7 +162,7 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
if (num_pending_writers_.LoadRelaxed() > 0 ||
num_pending_readers_.LoadRelaxed() > 0) {
// Wake any exclusive waiters as there are now no readers.
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -174,11 +175,11 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
}
inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+ if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
@@ -190,11 +191,11 @@ inline uint64_t Mutex::GetExclusiveOwnerTid() const {
}
inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity that if the pthread thinks we own the lock the Thread agrees.
- if (self != NULL && result) {
+ if (self != nullptr && result) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 13dcb8c634..99c7246d61 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -67,7 +67,7 @@ struct AllMutexData {
Atomic<const BaseMutex*> all_mutexes_guard;
// All created mutexes guarded by all_mutexes_guard_.
std::set<BaseMutex*>* all_mutexes;
- AllMutexData() : all_mutexes(NULL) {}
+ AllMutexData() : all_mutexes(nullptr) {}
};
static struct AllMutexData gAllMutexData[kAllMutexDataSize];
@@ -114,7 +114,7 @@ class ScopedAllMutexesLock FINAL {
class ScopedContentionRecorder FINAL : public ValueObject {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
- : mutex_(kLogLockContentions ? mutex : NULL),
+ : mutex_(kLogLockContentions ? mutex : nullptr),
blocked_tid_(kLogLockContentions ? blocked_tid : 0),
owner_tid_(kLogLockContentions ? owner_tid : 0),
start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
@@ -144,7 +144,7 @@ BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(n
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
- if (*all_mutexes_ptr == NULL) {
+ if (*all_mutexes_ptr == nullptr) {
// We leak the global set of all mutexes to avoid ordering issues in global variable
// construction/destruction.
*all_mutexes_ptr = new std::set<BaseMutex*>();
@@ -165,7 +165,7 @@ void BaseMutex::DumpAll(std::ostream& os) {
os << "Mutex logging:\n";
ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
- if (all_mutexes == NULL) {
+ if (all_mutexes == nullptr) {
// No mutexes have been created yet during at startup.
return;
}
@@ -190,7 +190,7 @@ void BaseMutex::DumpAll(std::ostream& os) {
}
void BaseMutex::CheckSafeToWait(Thread* self) {
- if (self == NULL) {
+ if (self == nullptr) {
CheckUnattachedThread(level_);
return;
}
@@ -202,7 +202,7 @@ void BaseMutex::CheckSafeToWait(Thread* self) {
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
// We expect waits to happen while holding the thread list suspend thread lock.
- if (held_mutex != NULL) {
+ if (held_mutex != nullptr) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -354,7 +354,7 @@ Mutex::~Mutex() {
}
void Mutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -370,7 +370,7 @@ void Mutex::ExclusiveLock(Thread* self) {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
num_contenders_++;
- if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -397,7 +397,7 @@ void Mutex::ExclusiveLock(Thread* self) {
}
bool Mutex::ExclusiveTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -474,7 +474,7 @@ void Mutex::ExclusiveUnlock(Thread* self) {
if (LIKELY(done)) { // Spurious fail?
// Wake a contender.
if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
}
}
} else {
@@ -537,14 +537,14 @@ ReaderWriterMutex::~ReaderWriterMutex() {
// TODO: should we just not log at all if shutting down? this could be the logging mutex!
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
}
#endif
}
void ReaderWriterMutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertNotExclusiveHeld(self);
#if ART_USE_FUTEXES
bool done = false;
@@ -557,7 +557,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -578,7 +578,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
}
void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertExclusiveHeld(self);
RegisterAsUnlocked(self);
DCHECK_NE(exclusive_owner_, 0U);
@@ -598,7 +598,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
// Wake any waiters.
if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
num_pending_writers_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -613,7 +613,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
#if HAVE_TIMED_RWLOCK
bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
timespec end_abs_ts;
@@ -633,7 +633,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
}
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
--num_pending_writers_;
return false; // Timed out.
@@ -671,7 +671,7 @@ void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_sta
// Owner holds it exclusively, hang up.
ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
++num_pending_readers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
if (errno != EAGAIN) {
PLOG(FATAL) << "futex wait failed for " << name_;
}
@@ -681,7 +681,7 @@ void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_sta
#endif
bool ReaderWriterMutex::SharedTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -710,9 +710,9 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) {
}
bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result;
- if (UNLIKELY(self == NULL)) { // Handle unattached threads.
+ if (UNLIKELY(self == nullptr)) { // Handle unattached threads.
result = IsExclusiveHeld(self); // TODO: a better best effort here.
} else {
result = (self->GetHeldMutex(level_) == this);
@@ -770,14 +770,14 @@ ConditionVariable::~ConditionVariable() {
errno = rc;
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
+ bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
}
#endif
}
void ConditionVariable::Broadcast(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
// TODO: enable below, there's a race in thread creation that causes false failures currently.
// guard_.AssertExclusiveHeld(self);
DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
@@ -805,14 +805,14 @@ void ConditionVariable::Broadcast(Thread* self) {
}
void ConditionVariable::Signal(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
#if ART_USE_FUTEXES
if (num_waiters_ > 0) {
sequence_++; // Indicate a signal occurred.
// Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
// to avoid this, however, requeueing can only move all waiters.
- int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
// Check something was woken or else we changed sequence_ before they had chance to wait.
CHECK((num_woken == 0) || (num_woken == 1));
}
@@ -827,7 +827,7 @@ void ConditionVariable::Wait(Thread* self) {
}
void ConditionVariable::WaitHoldingLocks(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
unsigned int old_recursion_count = guard_.recursion_count_;
#if ART_USE_FUTEXES
@@ -837,7 +837,7 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) {
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
// Futex failed, check it is an expected error.
// EAGAIN == EWOULDBLK, so we let the caller try again.
// EINTR implies a signal was sent to this thread.
@@ -862,7 +862,7 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) {
}
bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool timed_out = false;
guard_.AssertExclusiveHeld(self);
guard_.CheckSafeToWait(self);
@@ -876,7 +876,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
// Timed out we're done.
timed_out = true;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 6e4b96c2e6..f2be85e277 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -344,8 +344,8 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
// Assert the current thread has shared access to the ReaderWriterMutex.
void AssertSharedHeld(const Thread* self) {
if (kDebugLocking && (gAborting == 0)) {
- // TODO: we can only assert this well when self != NULL.
- CHECK(IsSharedHeld(self) || self == NULL) << *this;
+ // TODO: we can only assert this well when self != null.
+ CHECK(IsSharedHeld(self) || self == nullptr) << *this;
}
}
void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 289d3ef8c1..3750c815e9 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -106,7 +106,7 @@ struct RecursiveLockWait {
state->mu.Lock(Thread::Current());
state->cv.Signal(Thread::Current());
state->mu.Unlock(Thread::Current());
- return NULL;
+ return nullptr;
}
Mutex mu;
@@ -120,14 +120,15 @@ static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS {
state.mu.Lock(Thread::Current());
pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread, NULL, RecursiveLockWait::Callback, &state);
+ int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
+ &state);
ASSERT_EQ(0, pthread_create_result);
state.cv.Wait(Thread::Current());
state.mu.Unlock(Thread::Current());
state.mu.Unlock(Thread::Current());
- EXPECT_EQ(pthread_join(pthread, NULL), 0);
+ EXPECT_EQ(pthread_join(pthread, nullptr), 0);
}
// This ensures we don't hang when waiting on a recursively locked mutex,
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 0e93eee627..71e0590272 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -31,7 +31,7 @@ bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
UNUSED(file_->FlushCloseOrErase()); // Ignore result.
}
file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
- if (file_.get() == NULL) {
+ if (file_.get() == nullptr) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
return false;
}
@@ -71,14 +71,15 @@ bool ScopedFlock::Init(File* file, std::string* error_msg) {
}
if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
file_.reset();
- *error_msg = StringPrintf("Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
+ *error_msg = StringPrintf(
+ "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
return false;
}
return true;
}
File* ScopedFlock::GetFile() {
- CHECK(file_.get() != NULL);
+ CHECK(file_.get() != nullptr);
return file_.get();
}
@@ -89,7 +90,7 @@ bool ScopedFlock::HasFile() {
ScopedFlock::ScopedFlock() { }
ScopedFlock::~ScopedFlock() {
- if (file_.get() != NULL) {
+ if (file_.get() != nullptr) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
CHECK_EQ(0, flock_result);
if (file_->FlushCloseOrErase() != 0) {
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index 3c5565cdc0..901f25fa0a 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -54,28 +54,30 @@ void STLDeleteContainerPointers(ForwardIterator begin,
// hash_set, or any other STL container which defines sensible begin(), end(),
// and clear() methods.
//
-// If container is NULL, this function is a no-op.
+// If container is null, this function is a no-op.
//
// As an alternative to calling STLDeleteElements() directly, consider
// using a container of std::unique_ptr, which ensures that your container's
// elements are deleted when the container goes out of scope.
template <class T>
void STLDeleteElements(T *container) {
- if (!container) return;
- STLDeleteContainerPointers(container->begin(), container->end());
- container->clear();
+ if (container != nullptr) {
+ STLDeleteContainerPointers(container->begin(), container->end());
+ container->clear();
+ }
}
// Given an STL container consisting of (key, value) pairs, STLDeleteValues
// deletes all the "value" components and clears the container. Does nothing
-// in the case it's given a NULL pointer.
+// in the case it's given a null pointer.
template <class T>
void STLDeleteValues(T *v) {
- if (!v) return;
- for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
- delete i->second;
+ if (v != nullptr) {
+ for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
+ delete i->second;
+ }
+ v->clear();
}
- v->clear();
}
template <class T>
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index 8655a9e2e8..1d7596aee3 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -31,7 +31,7 @@ namespace art {
//
// struct VariantMap {
// template <typename TValue>
-// TValue* Get(Key<T> key); // nullptr if the value was never set, otherwise the value.
+// TValue* Get(Key<T> key); // null if the value was never set, otherwise the value.
//
// template <typename TValue>
// void Set(Key<T> key, TValue value);
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index f306a48e53..ccb22eb64d 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -18,7 +18,7 @@
#include "gtest/gtest.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(NULL));
+ static_cast<void*>(nullptr));
namespace art {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index c6940d3b5b..30084d2b51 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -183,7 +183,7 @@ class ScopedCheck {
}
/*
- * Verify that the pointer value is non-NULL.
+ * Verify that the pointer value is non-null.
*/
bool CheckNonNull(const void* ptr) {
if (UNLIKELY(ptr == nullptr)) {
@@ -612,7 +612,7 @@ class ScopedCheck {
};
/*
- * Verify that "jobj" is a valid non-NULL object reference, and points to
+ * Verify that "jobj" is a valid non-null object reference, and points to
* an instance of expectedClass.
*
* Because we're looking at an object on the GC heap, we have to switch
@@ -941,7 +941,7 @@ class ScopedCheck {
}
}
/*
- * Verify that "array" is non-NULL and points to an Array object.
+ * Verify that "array" is non-null and points to an Array object.
*
* Since we're dealing with objects, switch to "running" mode.
*/
@@ -1277,7 +1277,7 @@ class GuardedCopy {
* Verify the guard area and, if "modOkay" is false, that the data itself
* has not been altered.
*
- * The caller has already checked that "dataBuf" is non-NULL.
+ * The caller has already checked that "dataBuf" is non-null.
*/
static bool Check(const char* function_name, const void* embedded_buf, bool mod_okay) {
const GuardedCopy* copy = FromEmbedded(embedded_buf);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 87d1c4cd66..1428749aca 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -60,7 +60,7 @@ inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
mirror::ArtMethod* referrer) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::String* resolved_string = declaring_class->GetDexCacheStrings()->Get(string_idx);
- if (UNLIKELY(resolved_string == NULL)) {
+ if (UNLIKELY(resolved_string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
@@ -92,7 +92,7 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtField* refe
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
- if (UNLIKELY(resolved_type == NULL)) {
+ if (UNLIKELY(resolved_type == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -146,7 +146,7 @@ inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::ArtMethod
bool is_static) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
- if (UNLIKELY(resolved_field == NULL)) {
+ if (UNLIKELY(resolved_field == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -196,7 +196,7 @@ inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
DCHECK(!class_roots_.IsNull());
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
- DCHECK(klass != NULL);
+ DCHECK(klass != nullptr);
return klass;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4e59217af6..dc8bf2ac5e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -59,6 +59,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/field.h"
#include "mirror/iftable-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
@@ -91,8 +92,28 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) {
va_end(args);
}
-static void ThrowEarlierClassFailure(mirror::Class* c)
+static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ?
+ method->GetDeclaringClass()->GetClassLoader()
+ : nullptr));
+ mirror::Class* exception_class = class_linker->FindClass(self, descriptor, class_loader);
+
+ if (exception_class == nullptr) {
+ // No exc class ~ no <init>-with-string.
+ CHECK(self->IsExceptionPending());
+ self->ClearException();
+ return false;
+ }
+
+ mirror::ArtMethod* exception_init_method =
+ exception_class->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V");
+ return exception_init_method != nullptr;
+}
+
+void ClassLinker::ThrowEarlierClassFailure(mirror::Class* c) {
// The class failed to initialize on a previous attempt, so we want to throw
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
// failed in verification, in which case v2 5.4.1 says we need to re-throw
@@ -109,11 +130,17 @@ static void ThrowEarlierClassFailure(mirror::Class* c)
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
self->SetException(pre_allocated);
} else {
- if (c->GetVerifyErrorClass() != NULL) {
+ if (c->GetVerifyErrorClass() != nullptr) {
// TODO: change the verifier to store an _instance_, with a useful detail message?
+ // It's possible the exception doesn't have a <init>(String).
std::string temp;
- self->ThrowNewException(c->GetVerifyErrorClass()->GetDescriptor(&temp),
- PrettyDescriptor(c).c_str());
+ const char* descriptor = c->GetVerifyErrorClass()->GetDescriptor(&temp);
+
+ if (HasInitWithString(self, this, descriptor)) {
+ self->ThrowNewException(descriptor, PrettyDescriptor(c).c_str());
+ } else {
+ self->ThrowNewException(descriptor, nullptr);
+ }
} else {
self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
PrettyDescriptor(c).c_str());
@@ -258,8 +285,8 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
CHECK(!init_done_);
// java_lang_Class comes first, it's needed for AllocClass
- Thread* self = Thread::Current();
- gc::Heap* heap = Runtime::Current()->GetHeap();
+ Thread* const self = Thread::Current();
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
// The GC can't handle an object with a null class since we can't get the size of this object.
heap->IncrementDisableMovingGC(self);
StackHandleScope<64> hs(self); // 64 is picked arbitrarily.
@@ -436,20 +463,19 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Object, String and DexCache need to be rerun through FindSystemClass to finish init
mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusNotReady, self);
- mirror::Class* Object_class = FindSystemClass(self, "Ljava/lang/Object;");
- CHECK_EQ(java_lang_Object.Get(), Object_class);
+ CHECK_EQ(java_lang_Object.Get(), FindSystemClass(self, "Ljava/lang/Object;"));
CHECK_EQ(java_lang_Object->GetObjectSize(), mirror::Object::InstanceSize());
mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusNotReady, self);
mirror::Class* String_class = FindSystemClass(self, "Ljava/lang/String;");
- std::ostringstream os1, os2;
- java_lang_String->DumpClass(os1, mirror::Class::kDumpClassFullDetail);
- String_class->DumpClass(os2, mirror::Class::kDumpClassFullDetail);
- CHECK_EQ(java_lang_String.Get(), String_class) << os1.str() << "\n\n" << os2.str();
+ if (java_lang_String.Get() != String_class) {
+ std::ostringstream os1, os2;
+ java_lang_String->DumpClass(os1, mirror::Class::kDumpClassFullDetail);
+ String_class->DumpClass(os2, mirror::Class::kDumpClassFullDetail);
+ LOG(FATAL) << os1.str() << "\n\n" << os2.str();
+ }
CHECK_EQ(java_lang_String->GetObjectSize(), mirror::String::InstanceSize());
mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusNotReady, self);
- mirror::Class* DexCache_class = FindSystemClass(self, "Ljava/lang/DexCache;");
- CHECK_EQ(java_lang_String.Get(), String_class);
- CHECK_EQ(java_lang_DexCache.Get(), DexCache_class);
+ CHECK_EQ(java_lang_DexCache.Get(), FindSystemClass(self, "Ljava/lang/DexCache;"));
CHECK_EQ(java_lang_DexCache->GetObjectSize(), mirror::DexCache::InstanceSize());
// Setup the primitive array type classes - can't be done until Object has a vtable.
@@ -459,17 +485,14 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
SetClassRoot(kByteArrayClass, FindSystemClass(self, "[B"));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
- mirror::Class* found_char_array_class = FindSystemClass(self, "[C");
- CHECK_EQ(char_array_class.Get(), found_char_array_class);
+ CHECK_EQ(char_array_class.Get(), FindSystemClass(self, "[C"));
SetClassRoot(kShortArrayClass, FindSystemClass(self, "[S"));
mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
- mirror::Class* found_int_array_class = FindSystemClass(self, "[I");
- CHECK_EQ(int_array_class.Get(), found_int_array_class);
+ CHECK_EQ(int_array_class.Get(), FindSystemClass(self, "[I"));
- mirror::Class* found_long_array_class = FindSystemClass(self, "[J");
- CHECK_EQ(long_array_class.Get(), found_long_array_class);
+ CHECK_EQ(long_array_class.Get(), FindSystemClass(self, "[J"));
SetClassRoot(kFloatArrayClass, FindSystemClass(self, "[F"));
mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
@@ -477,97 +500,101 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
SetClassRoot(kDoubleArrayClass, FindSystemClass(self, "[D"));
mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
- mirror::Class* found_class_array_class = FindSystemClass(self, "[Ljava/lang/Class;");
- CHECK_EQ(class_array_class.Get(), found_class_array_class);
+ CHECK_EQ(class_array_class.Get(), FindSystemClass(self, "[Ljava/lang/Class;"));
- mirror::Class* found_object_array_class = FindSystemClass(self, "[Ljava/lang/Object;");
- CHECK_EQ(object_array_class.Get(), found_object_array_class);
+ CHECK_EQ(object_array_class.Get(), FindSystemClass(self, "[Ljava/lang/Object;"));
// Setup the single, global copy of "iftable".
- mirror::Class* java_lang_Cloneable = FindSystemClass(self, "Ljava/lang/Cloneable;");
- CHECK(java_lang_Cloneable != nullptr);
- mirror::Class* java_io_Serializable = FindSystemClass(self, "Ljava/io/Serializable;");
- CHECK(java_io_Serializable != nullptr);
+ auto java_lang_Cloneable = hs.NewHandle(FindSystemClass(self, "Ljava/lang/Cloneable;"));
+ CHECK(java_lang_Cloneable.Get() != nullptr);
+ auto java_io_Serializable = hs.NewHandle(FindSystemClass(self, "Ljava/io/Serializable;"));
+ CHECK(java_io_Serializable.Get() != nullptr);
// We assume that Cloneable/Serializable don't have superinterfaces -- normally we'd have to
// crawl up and explicitly list all of the supers as well.
- {
- mirror::IfTable* array_iftable = array_iftable_.Read();
- array_iftable->SetInterface(0, java_lang_Cloneable);
- array_iftable->SetInterface(1, java_io_Serializable);
- }
-
- // Sanity check Class[] and Object[]'s interfaces.
- CHECK_EQ(java_lang_Cloneable, mirror::Class::GetDirectInterface(self, class_array_class, 0));
- CHECK_EQ(java_io_Serializable, mirror::Class::GetDirectInterface(self, class_array_class, 1));
- CHECK_EQ(java_lang_Cloneable, mirror::Class::GetDirectInterface(self, object_array_class, 0));
- CHECK_EQ(java_io_Serializable, mirror::Class::GetDirectInterface(self, object_array_class, 1));
+ array_iftable_.Read()->SetInterface(0, java_lang_Cloneable.Get());
+ array_iftable_.Read()->SetInterface(1, java_io_Serializable.Get());
+
+ // Sanity check Class[] and Object[]'s interfaces. GetDirectInterface may cause thread
+ // suspension.
+ CHECK_EQ(java_lang_Cloneable.Get(),
+ mirror::Class::GetDirectInterface(self, class_array_class, 0));
+ CHECK_EQ(java_io_Serializable.Get(),
+ mirror::Class::GetDirectInterface(self, class_array_class, 1));
+ CHECK_EQ(java_lang_Cloneable.Get(),
+ mirror::Class::GetDirectInterface(self, object_array_class, 0));
+ CHECK_EQ(java_io_Serializable.Get(),
+ mirror::Class::GetDirectInterface(self, object_array_class, 1));
// Run Class, ArtField, and ArtMethod through FindSystemClass. This initializes their
// dex_cache_ fields and register them in class_table_.
- mirror::Class* Class_class = FindSystemClass(self, "Ljava/lang/Class;");
- CHECK_EQ(java_lang_Class.Get(), Class_class);
+ CHECK_EQ(java_lang_Class.Get(), FindSystemClass(self, "Ljava/lang/Class;"));
mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusNotReady, self);
- mirror::Class* Art_method_class = FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;");
- CHECK_EQ(java_lang_reflect_ArtMethod.Get(), Art_method_class);
-
- mirror::Class* String_array_class =
- FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass));
- CHECK_EQ(object_array_string.Get(), String_array_class);
-
- mirror::Class* Art_method_array_class =
- FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass));
- CHECK_EQ(object_array_art_method.Get(), Art_method_array_class);
+ CHECK_EQ(java_lang_reflect_ArtMethod.Get(),
+ FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;"));
+ CHECK_EQ(object_array_string.Get(),
+ FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass)));
+ CHECK_EQ(object_array_art_method.Get(),
+ FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass)));
// End of special init trickery, subsequent classes may be loaded via FindSystemClass.
// Create java.lang.reflect.Proxy root.
- mirror::Class* java_lang_reflect_Proxy = FindSystemClass(self, "Ljava/lang/reflect/Proxy;");
- SetClassRoot(kJavaLangReflectProxy, java_lang_reflect_Proxy);
+ SetClassRoot(kJavaLangReflectProxy, FindSystemClass(self, "Ljava/lang/reflect/Proxy;"));
// Create java.lang.reflect.Field.class root.
- mirror::Class* java_lang_reflect_Field = FindSystemClass(self, "Ljava/lang/reflect/Field;");
- CHECK(java_lang_reflect_Field != nullptr);
- SetClassRoot(kJavaLangReflectField, java_lang_reflect_Field);
- mirror::Field::SetClass(java_lang_reflect_Field);
+ auto* class_root = FindSystemClass(self, "Ljava/lang/reflect/Field;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectField, class_root);
+ mirror::Field::SetClass(class_root);
// Create java.lang.reflect.Field array root.
- mirror::Class* java_lang_reflect_Field_array =
- FindSystemClass(self, "[Ljava/lang/reflect/Field;");
- CHECK(java_lang_reflect_Field_array != nullptr);
- SetClassRoot(kJavaLangReflectFieldArrayClass, java_lang_reflect_Field_array);
- mirror::Field::SetArrayClass(java_lang_reflect_Field_array);
+ class_root = FindSystemClass(self, "[Ljava/lang/reflect/Field;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectFieldArrayClass, class_root);
+ mirror::Field::SetArrayClass(class_root);
+
+ // Create java.lang.reflect.Constructor.class root and array root.
+ class_root = FindSystemClass(self, "Ljava/lang/reflect/Constructor;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectConstructor, class_root);
+ mirror::Constructor::SetClass(class_root);
+ class_root = FindSystemClass(self, "[Ljava/lang/reflect/Constructor;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectConstructorArrayClass, class_root);
+ mirror::Constructor::SetArrayClass(class_root);
+
+ // Create java.lang.reflect.Method.class root and array root.
+ class_root = FindSystemClass(self, "Ljava/lang/reflect/Method;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectMethod, class_root);
+ mirror::Method::SetClass(class_root);
+ class_root = FindSystemClass(self, "[Ljava/lang/reflect/Method;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectMethodArrayClass, class_root);
+ mirror::Method::SetArrayClass(class_root);
// java.lang.ref classes need to be specially flagged, but otherwise are normal classes
// finish initializing Reference class
mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self);
- mirror::Class* Reference_class = FindSystemClass(self, "Ljava/lang/ref/Reference;");
- CHECK_EQ(java_lang_ref_Reference.Get(), Reference_class);
+ CHECK_EQ(java_lang_ref_Reference.Get(), FindSystemClass(self, "Ljava/lang/ref/Reference;"));
CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), mirror::Reference::InstanceSize());
CHECK_EQ(java_lang_ref_Reference->GetClassSize(), mirror::Reference::ClassSize());
- mirror::Class* java_lang_ref_FinalizerReference =
- FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
- java_lang_ref_FinalizerReference->SetAccessFlags(
- java_lang_ref_FinalizerReference->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsFinalizerReference);
- mirror::Class* java_lang_ref_PhantomReference =
- FindSystemClass(self, "Ljava/lang/ref/PhantomReference;");
- java_lang_ref_PhantomReference->SetAccessFlags(
- java_lang_ref_PhantomReference->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsPhantomReference);
- mirror::Class* java_lang_ref_SoftReference =
- FindSystemClass(self, "Ljava/lang/ref/SoftReference;");
- java_lang_ref_SoftReference->SetAccessFlags(
- java_lang_ref_SoftReference->GetAccessFlags() | kAccClassIsReference);
- mirror::Class* java_lang_ref_WeakReference =
- FindSystemClass(self, "Ljava/lang/ref/WeakReference;");
- java_lang_ref_WeakReference->SetAccessFlags(
- java_lang_ref_WeakReference->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsWeakReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() |
+ kAccClassIsReference | kAccClassIsFinalizerReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/PhantomReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
+ kAccClassIsPhantomReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/SoftReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/WeakReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
+ kAccClassIsWeakReference);
// Setup the ClassLoader, verifying the object_size_.
- mirror::Class* java_lang_ClassLoader = FindSystemClass(self, "Ljava/lang/ClassLoader;");
- CHECK_EQ(java_lang_ClassLoader->GetObjectSize(), mirror::ClassLoader::InstanceSize());
- SetClassRoot(kJavaLangClassLoader, java_lang_ClassLoader);
+ class_root = FindSystemClass(self, "Ljava/lang/ClassLoader;");
+ CHECK_EQ(class_root->GetObjectSize(), mirror::ClassLoader::InstanceSize());
+ SetClassRoot(kJavaLangClassLoader, class_root);
// Set up java.lang.Throwable, java.lang.ClassNotFoundException, and
// java.lang.StackTraceElement as a convenience.
@@ -911,6 +938,10 @@ void ClassLinker::InitFromImage() {
// String class root was set above
mirror::Field::SetClass(GetClassRoot(kJavaLangReflectField));
mirror::Field::SetArrayClass(GetClassRoot(kJavaLangReflectFieldArrayClass));
+ mirror::Constructor::SetClass(GetClassRoot(kJavaLangReflectConstructor));
+ mirror::Constructor::SetArrayClass(GetClassRoot(kJavaLangReflectConstructorArrayClass));
+ mirror::Method::SetClass(GetClassRoot(kJavaLangReflectMethod));
+ mirror::Method::SetArrayClass(GetClassRoot(kJavaLangReflectMethodArrayClass));
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
@@ -1096,22 +1127,26 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* ar
}
ClassLinker::~ClassLinker() {
- mirror::Class::ResetClass();
- mirror::String::ResetClass();
- mirror::Reference::ResetClass();
mirror::ArtMethod::ResetClass();
+ mirror::Class::ResetClass();
+ mirror::Constructor::ResetClass();
mirror::Field::ResetClass();
- mirror::Field::ResetArrayClass();
+ mirror::Method::ResetClass();
+ mirror::Reference::ResetClass();
+ mirror::StackTraceElement::ResetClass();
+ mirror::String::ResetClass();
+ mirror::Throwable::ResetClass();
mirror::BooleanArray::ResetArrayClass();
mirror::ByteArray::ResetArrayClass();
mirror::CharArray::ResetArrayClass();
+ mirror::Constructor::ResetArrayClass();
mirror::DoubleArray::ResetArrayClass();
+ mirror::Field::ResetArrayClass();
mirror::FloatArray::ResetArrayClass();
+ mirror::Method::ResetArrayClass();
mirror::IntArray::ResetArrayClass();
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
- mirror::Throwable::ResetClass();
- mirror::StackTraceElement::ResetClass();
STLDeleteElements(&oat_files_);
}
@@ -2262,7 +2297,7 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl
// the right context. It does NOT become the class loader for the
// array class; that always comes from the base element class.
//
-// Returns nullptr with an exception raised on failure.
+// Returns null with an exception raised on failure.
mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader) {
// Identify the underlying component type
@@ -2947,7 +2982,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
jobjectArray interfaces, jobject loader,
jobjectArray methods, jobjectArray throws) {
Thread* self = soa.Self();
- StackHandleScope<8> hs(self);
+ StackHandleScope<9> hs(self);
MutableHandle<mirror::Class> klass(hs.NewHandle(
AllocClass(self, GetClassRoot(kJavaLangClass), sizeof(mirror::Class))));
if (klass.Get() == nullptr) {
@@ -3001,8 +3036,10 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
}
// Create virtual method using specified prototypes.
- size_t num_virtual_methods =
- soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods)->GetLength();
+ auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods));
+ DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass())
+ << PrettyClass(h_methods->GetClass());
+ const size_t num_virtual_methods = h_methods->GetLength();
{
mirror::ObjectArray<mirror::ArtMethod>* virtuals = AllocArtMethodArray(self,
num_virtual_methods);
@@ -3014,9 +3051,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
StackHandleScope<1> hs2(self);
- mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
- soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod()));
mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype);
if (UNLIKELY(clone == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -3066,9 +3101,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
StackHandleScope<2> hs2(self);
- mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
- soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod()));
Handle<mirror::ArtMethod> virtual_method(hs2.NewHandle(klass->GetVirtualMethod(i)));
CheckProxyMethod(virtual_method, prototype);
}
@@ -3104,23 +3137,22 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
mirror::ArtMethod* proxy_method) {
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
- // Locate the dex cache of the original interface/Object
- mirror::DexCache* dex_cache = nullptr;
{
ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* a_dex_cache = GetDexCache(i);
- if (proxy_method->HasSameDexCacheResolvedTypes(a_dex_cache->GetResolvedTypes())) {
- dex_cache = a_dex_cache;
- break;
+ // Locate the dex cache of the original interface/Object
+ for (const GcRoot<mirror::DexCache>& root : dex_caches_) {
+ auto* dex_cache = root.Read();
+ if (proxy_method->HasSameDexCacheResolvedTypes(dex_cache->GetResolvedTypes())) {
+ mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
+ proxy_method->GetDexMethodIndex());
+ CHECK(resolved_method != nullptr);
+ return resolved_method;
}
}
}
- CHECK(dex_cache != nullptr);
- uint32_t method_idx = proxy_method->GetDexMethodIndex();
- mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx);
- CHECK(resolved_method != nullptr);
- return resolved_method;
+ LOG(FATAL) << "Didn't find dex cache for " << PrettyClass(proxy_class) << " "
+ << PrettyMethod(proxy_method);
+ UNREACHABLE();
}
@@ -3163,8 +3195,11 @@ mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
Handle<mirror::ArtMethod> prototype) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
- prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
- prototype.Get());
+ auto* dex_cache = prototype->GetDeclaringClass()->GetDexCache();
+ // Avoid dirtying the dex cache unless we need to.
+ if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex()) != prototype.Get()) {
+ dex_cache->SetResolvedMethod(prototype->GetDexMethodIndex(), prototype.Get());
+ }
// We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
// as necessary
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(prototype->Clone(self));
@@ -3198,6 +3233,7 @@ static void CheckProxyMethod(Handle<mirror::ArtMethod> method,
// interface prototype. The exception to this are Constructors and the Class of the Proxy itself.
CHECK(prototype->HasSameDexCacheResolvedMethods(method.Get()));
CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
+ CHECK_EQ(prototype->GetDeclaringClass()->GetDexCache(), method->GetDexCache());
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
CHECK_STREQ(method->GetName(), prototype->GetName());
@@ -5210,11 +5246,15 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) {
"Ljava/lang/DexCache;",
"Ljava/lang/ref/Reference;",
"Ljava/lang/reflect/ArtMethod;",
+ "Ljava/lang/reflect/Constructor;",
"Ljava/lang/reflect/Field;",
+ "Ljava/lang/reflect/Method;",
"Ljava/lang/reflect/Proxy;",
"[Ljava/lang/String;",
"[Ljava/lang/reflect/ArtMethod;",
+ "[Ljava/lang/reflect/Constructor;",
"[Ljava/lang/reflect/Field;",
+ "[Ljava/lang/reflect/Method;",
"Ljava/lang/ClassLoader;",
"Ljava/lang/Throwable;",
"Ljava/lang/ClassNotFoundException;",
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 68624b08b6..1bd9f0a7e9 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -71,11 +71,15 @@ class ClassLinker {
kJavaLangDexCache,
kJavaLangRefReference,
kJavaLangReflectArtMethod,
+ kJavaLangReflectConstructor,
kJavaLangReflectField,
+ kJavaLangReflectMethod,
kJavaLangReflectProxy,
kJavaLangStringArrayClass,
kJavaLangReflectArtMethodArrayClass,
+ kJavaLangReflectConstructorArrayClass,
kJavaLangReflectFieldArrayClass,
+ kJavaLangReflectMethodArrayClass,
kJavaLangClassLoader,
kJavaLangThrowable,
kJavaLangClassNotFoundException,
@@ -148,7 +152,7 @@ class ClassLinker {
const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
+ // Finds a class by its descriptor, returning null if it isn't wasn't loaded
// by the given 'class_loader'.
mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash,
mirror::ClassLoader* class_loader)
@@ -428,7 +432,7 @@ class ClassLinker {
void SetEntryPointsToInterpreter(mirror::ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Attempts to insert a class into a class table. Returns NULL if
+ // Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
@@ -440,7 +444,7 @@ class ClassLinker {
mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
- DCHECK(class_roots != NULL);
+ DCHECK(class_roots != nullptr);
return class_roots;
}
@@ -653,6 +657,12 @@ class ClassLinker {
// Return the quick generic JNI stub for testing.
const void* GetRuntimeQuickGenericJniStub() const;
+ // Throw the class initialization failure recorded when first trying to initialize the given
+ // class.
+ // Note: Currently we only store the descriptor, so we cannot throw the exact throwable, only
+ // a recreation with a custom string.
+ void ThrowEarlierClassFailure(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a31a7852c7..7bee98f8f1 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -25,6 +25,7 @@
#include "dex_file.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
+#include "mirror/abstract_method.h"
#include "mirror/accessible_object.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -463,6 +464,10 @@ struct CheckOffsets {
return !error;
};
+ void addOffset(size_t offset, const char* name) {
+ offsets.push_back(CheckOffset(offset, name));
+ }
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CheckOffsets);
};
@@ -472,142 +477,162 @@ struct CheckOffsets {
struct ObjectOffsets : public CheckOffsets<mirror::Object> {
ObjectOffsets() : CheckOffsets<mirror::Object>(false, "Ljava/lang/Object;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, klass_), "shadow$_klass_"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_"));
+ addOffset(OFFSETOF_MEMBER(mirror::Object, klass_), "shadow$_klass_");
+ addOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_");
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_rb_ptr_), "shadow$_x_rb_ptr_"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_xpadding_), "shadow$_x_xpadding_"));
+ addOffset(OFFSETOF_MEMBER(mirror::Object, x_rb_ptr_), "shadow$_x_rb_ptr_");
+ addOffset(OFFSETOF_MEMBER(mirror::Object, x_xpadding_), "shadow$_x_xpadding_");
#endif
};
};
struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_), "dexCacheResolvedMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_), "dexCacheResolvedTypes"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"));
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_),
+ "dexCacheResolvedMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_),
+ "dexCacheResolvedTypes");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex");
};
};
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, component_type_), "componentType"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_), "dexCache"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_), "dexCacheStrings"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_), "numReferenceInstanceFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_), "numReferenceStaticFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), "referenceInstanceOffsets"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable"));
+ addOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, component_type_), "componentType");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_), "dexCache");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_), "dexCacheStrings");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_),
+ "numReferenceInstanceFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_),
+ "numReferenceStaticFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_),
+ "referenceInstanceOffsets");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable");
};
};
struct StringOffsets : public CheckOffsets<mirror::String> {
StringOffsets() : CheckOffsets<mirror::String>(false, "Ljava/lang/String;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, count_), "count"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hashCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, offset_), "offset"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, array_), "value"));
+ addOffset(OFFSETOF_MEMBER(mirror::String, count_), "count");
+ addOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hashCode");
+ addOffset(OFFSETOF_MEMBER(mirror::String, offset_), "offset");
+ addOffset(OFFSETOF_MEMBER(mirror::String, array_), "value");
};
};
struct ThrowableOffsets : public CheckOffsets<mirror::Throwable> {
ThrowableOffsets() : CheckOffsets<mirror::Throwable>(false, "Ljava/lang/Throwable;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, cause_), "cause"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, detail_message_), "detailMessage"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_state_), "stackState"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_trace_), "stackTrace"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, suppressed_exceptions_), "suppressedExceptions"));
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, cause_), "cause");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, detail_message_), "detailMessage");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_state_), "stackState");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_trace_), "stackTrace");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, suppressed_exceptions_), "suppressedExceptions");
};
};
struct StackTraceElementOffsets : public CheckOffsets<mirror::StackTraceElement> {
- StackTraceElementOffsets() : CheckOffsets<mirror::StackTraceElement>(false, "Ljava/lang/StackTraceElement;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, file_name_), "fileName"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, line_number_), "lineNumber"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName"));
+ StackTraceElementOffsets() : CheckOffsets<mirror::StackTraceElement>(
+ false, "Ljava/lang/StackTraceElement;") {
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, file_name_), "fileName");
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, line_number_), "lineNumber");
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName");
};
};
struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> {
ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache"));
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages");
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent");
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache");
};
};
struct ProxyOffsets : public CheckOffsets<mirror::Proxy> {
ProxyOffsets() : CheckOffsets<mirror::Proxy>(false, "Ljava/lang/reflect/Proxy;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Proxy, h_), "h"));
+ addOffset(OFFSETOF_MEMBER(mirror::Proxy, h_), "h");
};
};
struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_types_), "resolvedTypes"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, strings_), "strings"));
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_types_), "resolvedTypes");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, strings_), "strings");
};
};
struct ReferenceOffsets : public CheckOffsets<mirror::Reference> {
ReferenceOffsets() : CheckOffsets<mirror::Reference>(false, "Ljava/lang/ref/Reference;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_), "pendingNext"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_), "queue"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_), "queueNext"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, referent_), "referent"));
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_), "pendingNext");
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, queue_), "queue");
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_), "queueNext");
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, referent_), "referent");
};
};
struct FinalizerReferenceOffsets : public CheckOffsets<mirror::FinalizerReference> {
- FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(false, "Ljava/lang/ref/FinalizerReference;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_), "next"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_), "prev"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie"));
+ FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(
+ false, "Ljava/lang/ref/FinalizerReference;") {
+ addOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_), "next");
+ addOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_), "prev");
+ addOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie");
};
};
struct AccessibleObjectOffsets : public CheckOffsets<mirror::AccessibleObject> {
- AccessibleObjectOffsets() : CheckOffsets<mirror::AccessibleObject>(false, "Ljava/lang/reflect/AccessibleObject;") {
- offsets.push_back(CheckOffset(mirror::AccessibleObject::FlagOffset().Uint32Value(), "flag"));
+ AccessibleObjectOffsets() : CheckOffsets<mirror::AccessibleObject>(
+ false, "Ljava/lang/reflect/AccessibleObject;") {
+ addOffset(mirror::AccessibleObject::FlagOffset().Uint32Value(), "flag");
};
};
struct FieldOffsets : public CheckOffsets<mirror::Field> {
FieldOffsets() : CheckOffsets<mirror::Field>(false, "Ljava/lang/reflect/Field;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, dex_field_index_), "dexFieldIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, offset_), "offset"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, type_), "type"));
+ addOffset(OFFSETOF_MEMBER(mirror::Field, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, dex_field_index_), "dexFieldIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, offset_), "offset");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, type_), "type");
+ };
+};
+
+struct AbstractMethodOffsets : public CheckOffsets<mirror::AbstractMethod> {
+ AbstractMethodOffsets() : CheckOffsets<mirror::AbstractMethod>(
+ false, "Ljava/lang/reflect/AbstractMethod;") {
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, art_method_), "artMethod");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_of_overridden_method_),
+ "declaringClassOfOverriddenMethod");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, dex_method_index_), "dexMethodIndex");
};
};
@@ -629,6 +654,7 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
EXPECT_TRUE(FinalizerReferenceOffsets().Check());
EXPECT_TRUE(AccessibleObjectOffsets().Check());
EXPECT_TRUE(FieldOffsets().Check());
+ EXPECT_TRUE(AbstractMethodOffsets().Check());
}
TEST_F(ClassLinkerTest, FindClassNonexistent) {
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 60b7fa218f..7a711cc713 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -16,6 +16,7 @@
#include "common_runtime_test.h"
+#include <cstdio>
#include <dirent.h>
#include <dlfcn.h>
#include <fcntl.h>
@@ -80,7 +81,7 @@ ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
}
ScratchFile::ScratchFile(File* file) {
- CHECK(file != NULL);
+ CHECK(file != nullptr);
filename_ = file->GetPath();
file_.reset(file);
}
@@ -188,6 +189,82 @@ void CommonRuntimeTest::TearDownAndroidData(const std::string& android_data, boo
}
}
+// Helper - find directory with the following format:
+// ${ANDROID_BUILD_TOP}/${subdir1}/${subdir2}-${version}/${subdir3}/bin/
+static std::string GetAndroidToolsDir(const std::string& subdir1,
+ const std::string& subdir2,
+ const std::string& subdir3) {
+ std::string root;
+ const char* android_build_top = getenv("ANDROID_BUILD_TOP");
+ if (android_build_top != nullptr) {
+ root = android_build_top;
+ } else {
+ // Not set by build server, so default to current directory
+ char* cwd = getcwd(nullptr, 0);
+ setenv("ANDROID_BUILD_TOP", cwd, 1);
+ root = cwd;
+ free(cwd);
+ }
+
+ std::string toolsdir = root + "/" + subdir1;
+ std::string founddir;
+ DIR* dir;
+ if ((dir = opendir(toolsdir.c_str())) != nullptr) {
+ float maxversion = 0;
+ struct dirent* entry;
+ while ((entry = readdir(dir)) != nullptr) {
+ std::string format = subdir2 + "-%f";
+ float version;
+ if (std::sscanf(entry->d_name, format.c_str(), &version) == 1) {
+ if (version > maxversion) {
+ maxversion = version;
+ founddir = toolsdir + "/" + entry->d_name + "/" + subdir3 + "/bin/";
+ }
+ }
+ }
+ closedir(dir);
+ }
+
+ if (founddir.empty()) {
+ ADD_FAILURE() << "Can not find Android tools directory.";
+ }
+ return founddir;
+}
+
+std::string CommonRuntimeTest::GetAndroidHostToolsDir() {
+ return GetAndroidToolsDir("prebuilts/gcc/linux-x86/host",
+ "x86_64-linux-glibc2.15",
+ "x86_64-linux");
+}
+
+std::string CommonRuntimeTest::GetAndroidTargetToolsDir(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ return GetAndroidToolsDir("prebuilts/gcc/linux-x86/arm",
+ "arm-linux-androideabi",
+ "arm-linux-androideabi");
+ case kArm64:
+ return GetAndroidToolsDir("prebuilts/gcc/linux-x86/aarch64",
+ "aarch64-linux-android",
+ "aarch64-linux-android");
+ case kX86:
+ case kX86_64:
+ return GetAndroidToolsDir("prebuilts/gcc/linux-x86/x86",
+ "x86_64-linux-android",
+ "x86_64-linux-android");
+ case kMips:
+ case kMips64:
+ return GetAndroidToolsDir("prebuilts/gcc/linux-x86/mips",
+ "mips64el-linux-android",
+ "mips64el-linux-android");
+ case kNone:
+ break;
+ }
+ ADD_FAILURE() << "Invalid isa " << isa;
+ return "";
+}
+
std::string CommonRuntimeTest::GetCoreArtLocation() {
return GetCoreFileLocation("art");
}
@@ -482,7 +559,7 @@ std::string CommonRuntimeTest::GetCoreFileLocation(const char* suffix) {
std::string location;
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != NULL);
+ CHECK(host_dir != nullptr);
location = StringPrintf("%s/framework/core.%s", host_dir, suffix);
} else {
location = StringPrintf("/data/art-test/core.%s", suffix);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 5fbc2ee680..991737893a 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -22,6 +22,7 @@
#include <string>
+#include "arch/instruction_set.h"
#include "base/mutex.h"
#include "globals.h"
#include "os.h"
@@ -79,6 +80,12 @@ class CommonRuntimeTest : public testing::Test {
// Gets the path of the libcore dex file.
static std::string GetLibCoreDexFileName();
+ // Returns bin directory which contains host's prebuild tools.
+ static std::string GetAndroidHostToolsDir();
+
+ // Returns bin directory which contains target's prebuild tools.
+ static std::string GetAndroidTargetToolsDir(InstructionSet isa);
+
protected:
static bool IsHost() {
return !kIsTargetBuild;
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 407746fe95..0808999e24 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -35,7 +35,7 @@ namespace art {
static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (referrer != NULL) {
+ if (referrer != nullptr) {
std::string location(referrer->GetLocation());
if (!location.empty()) {
os << " (declaration of '" << PrettyDescriptor(referrer)
@@ -45,10 +45,10 @@ static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
}
static void ThrowException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+ mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
- if (args != NULL) {
+ if (args != nullptr) {
std::string vmsg;
StringAppendV(&vmsg, fmt, *args);
msg << vmsg;
@@ -61,10 +61,10 @@ static void ThrowException(const char* exception_descriptor,
}
static void ThrowWrappedException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+ mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
- if (args != NULL) {
+ if (args != nullptr) {
std::string vmsg;
StringAppendV(&vmsg, fmt, *args);
msg << vmsg;
@@ -79,7 +79,7 @@ static void ThrowWrappedException(const char* exception_descriptor,
// AbstractMethodError
void ThrowAbstractMethodError(mirror::ArtMethod* method) {
- ThrowException("Ljava/lang/AbstractMethodError;", NULL,
+ ThrowException("Ljava/lang/AbstractMethodError;", nullptr,
StringPrintf("abstract method \"%s\"",
PrettyMethod(method).c_str()).c_str());
}
@@ -87,20 +87,20 @@ void ThrowAbstractMethodError(mirror::ArtMethod* method) {
// ArithmeticException
void ThrowArithmeticExceptionDivideByZero() {
- ThrowException("Ljava/lang/ArithmeticException;", NULL, "divide by zero");
+ ThrowException("Ljava/lang/ArithmeticException;", nullptr, "divide by zero");
}
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length) {
- ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
+ ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", nullptr,
StringPrintf("length=%d; index=%d", length, index).c_str());
}
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
- ThrowException("Ljava/lang/ArrayStoreException;", NULL,
+ ThrowException("Ljava/lang/ArrayStoreException;", nullptr,
StringPrintf("%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
PrettyDescriptor(array_class).c_str()).c_str());
@@ -109,14 +109,14 @@ void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
- ThrowException("Ljava/lang/ClassCastException;", NULL,
+ ThrowException("Ljava/lang/ClassCastException;", nullptr,
StringPrintf("%s cannot be cast to %s",
PrettyDescriptor(src_type).c_str(),
PrettyDescriptor(dest_type).c_str()).c_str());
}
void ThrowClassCastException(const char* msg) {
- ThrowException("Ljava/lang/ClassCastException;", NULL, msg);
+ ThrowException("Ljava/lang/ClassCastException;", nullptr, msg);
}
// ClassCircularityError
@@ -174,7 +174,7 @@ void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
ThrowException("Ljava/lang/IllegalAccessError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -188,13 +188,13 @@ void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) {
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg) {
- ThrowException("Ljava/lang/IllegalAccessException;", NULL, msg);
+ ThrowException("Ljava/lang/IllegalAccessException;", nullptr, msg);
}
// IllegalArgumentException
void ThrowIllegalArgumentException(const char* msg) {
- ThrowException("Ljava/lang/IllegalArgumentException;", NULL, msg);
+ ThrowException("Ljava/lang/IllegalArgumentException;", nullptr, msg);
}
@@ -207,7 +207,7 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun
msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
<< expected_type << " but instead was found to be of type " << found_type;
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -216,14 +216,14 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMetho
mirror::ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
- CHECK(this_object != NULL);
+ CHECK(this_object != nullptr);
std::ostringstream msg;
msg << "Class '" << PrettyDescriptor(this_object->GetClass())
<< "' does not implement interface '"
<< PrettyDescriptor(interface_method->GetDeclaringClass())
<< "' in call to '" << PrettyMethod(interface_method) << "'";
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -249,14 +249,14 @@ void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt,
void ThrowIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException("Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowException("Ljava/io/IOException;", nullptr, fmt, &args);
va_end(args);
}
void ThrowWrappedIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowWrappedException("Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowWrappedException("Ljava/io/IOException;", nullptr, fmt, &args);
va_end(args);
}
@@ -272,12 +272,12 @@ void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) {
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size) {
- ThrowException("Ljava/lang/NegativeArraySizeException;", NULL,
+ ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr,
StringPrintf("%d", size).c_str());
}
void ThrowNegativeArraySizeException(const char* msg) {
- ThrowException("Ljava/lang/NegativeArraySizeException;", NULL, msg);
+ ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr, msg);
}
// NoSuchFieldError
@@ -319,7 +319,7 @@ void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) {
std::ostringstream msg;
msg << "Attempt to " << (is_read ? "read from" : "write to")
<< " field '" << PrettyField(field, true) << "' on a null object reference";
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
}
static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
@@ -329,7 +329,7 @@ static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
}
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
@@ -379,7 +379,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// method is invoked at this location.
mirror::ArtMethod* invoked_method =
verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc);
- if (invoked_method != NULL) {
+ if (invoked_method != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForMethodAccess(invoked_method, kVirtual);
} else {
@@ -411,7 +411,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// field is accessed at this location.
ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
- if (field != NULL) {
+ if (field != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
} else {
@@ -443,7 +443,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// field is accessed at this location.
ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
- if (field != NULL) {
+ if (field != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
} else {
@@ -459,7 +459,7 @@ void ThrowNullPointerExceptionFromDexPC() {
case Instruction::AGET_BYTE:
case Instruction::AGET_CHAR:
case Instruction::AGET_SHORT:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to read from null array");
break;
case Instruction::APUT:
@@ -469,11 +469,11 @@ void ThrowNullPointerExceptionFromDexPC() {
case Instruction::APUT_BYTE:
case Instruction::APUT_CHAR:
case Instruction::APUT_SHORT:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to write to null array");
break;
case Instruction::ARRAY_LENGTH:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to get length of null array");
break;
default: {
@@ -481,7 +481,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// message/logging is so we can improve any cases we've missed in the future.
const DexFile* dex_file =
method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
StringPrintf("Null pointer exception during instruction '%s'",
instr->DumpString(dex_file).c_str()).c_str());
break;
@@ -490,7 +490,7 @@ void ThrowNullPointerExceptionFromDexPC() {
}
void ThrowNullPointerException(const char* msg) {
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg);
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg);
}
// RuntimeException
@@ -498,7 +498,7 @@ void ThrowNullPointerException(const char* msg) {
void ThrowRuntimeException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException("Ljava/lang/RuntimeException;", NULL, fmt, &args);
+ ThrowException("Ljava/lang/RuntimeException;", nullptr, fmt, &args);
va_end(args);
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c074b5410a..f3ce552230 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -174,7 +174,8 @@ class AllocRecord {
jobject type_; // This is a weak global.
size_t byte_count_;
uint16_t thin_lock_id_;
- AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have nullptr method.
+ // Unused entries have null method.
+ AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];
};
class Breakpoint {
@@ -714,7 +715,7 @@ std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
if (o == nullptr) {
if (error == JDWP::ERR_NONE) {
- return "NULL";
+ return "null";
} else {
return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
}
@@ -727,7 +728,7 @@ std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
std::string Dbg::GetClassName(mirror::Class* klass) {
if (klass == nullptr) {
- return "NULL";
+ return "null";
}
std::string temp;
return DescriptorToName(klass->GetDescriptor(&temp));
@@ -1409,7 +1410,7 @@ void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, ui
std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
mirror::ArtMethod* m = FromMethodId(method_id);
if (m == nullptr) {
- return "NULL";
+ return "null";
}
return m->GetName();
}
@@ -1417,7 +1418,7 @@ std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
ArtField* f = FromFieldId(field_id);
if (f == nullptr) {
- return "NULL";
+ return "null";
}
return f->GetName();
}
@@ -1721,7 +1722,7 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje
if (receiver_class == nullptr && o != nullptr) {
receiver_class = o->GetClass();
}
- // TODO: should we give up now if receiver_class is nullptr?
+ // TODO: should we give up now if receiver_class is null?
if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
return JDWP::ERR_INVALID_FIELDID;
@@ -2176,7 +2177,7 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>*
}
mirror::Object* peer = t->GetPeer();
if (peer == nullptr) {
- // peer might be NULL if the thread is still starting up. We can't tell the debugger about
+ // peer might be null if the thread is still starting up. We can't tell the debugger about
// this thread yet.
// TODO: if we identified threads to the debugger by their Thread*
// rather than their peer's mirror::Object*, we could fix this.
@@ -3390,7 +3391,7 @@ bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::Art
}
bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
- // The upcall can be nullptr and in that case we don't need to do anything.
+ // The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
}
@@ -3427,7 +3428,7 @@ bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror:
}
bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) {
- // The upcall can be nullptr and in that case we don't need to do anything.
+ // The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index c287121f84..fe90eb613e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -68,7 +68,7 @@ struct DebugInvokeReq {
GcRoot<mirror::Class> klass;
GcRoot<mirror::ArtMethod> method;
const uint32_t arg_count;
- uint64_t* const arg_values; // will be NULL if arg_count_ == 0
+ uint64_t* const arg_values; // will be null if arg_count_ == 0
const uint32_t options;
/* result */
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index c68fdca03e..760006a480 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -32,7 +32,7 @@ inline int32_t DexFile::GetStringLength(const StringId& string_id) const {
inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
uint32_t* utf16_length) const {
- DCHECK(utf16_length != NULL) << GetLocation();
+ DCHECK(utf16_length != nullptr) << GetLocation();
const uint8_t* ptr = begin_ + string_id.string_data_off_;
*utf16_length = DecodeUnsignedLeb128(&ptr);
return reinterpret_cast<const char*>(ptr);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 03a47a3f04..0589cdd3a2 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -57,7 +57,7 @@ const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
const uint8_t DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
- CHECK(magic != NULL);
+ CHECK(magic != nullptr);
ScopedFd fd(open(filename, O_RDONLY, 0));
if (fd.get() == -1) {
*error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
@@ -77,7 +77,7 @@ static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string*
}
bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
- CHECK(checksum != NULL);
+ CHECK(checksum != nullptr);
uint32_t magic;
// Strip ":...", which is the location
@@ -98,14 +98,15 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
return false;
}
if (IsZipMagic(magic)) {
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
- if (zip_archive.get() == NULL) {
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
+ if (zip_archive.get() == nullptr) {
*error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", file_part,
error_msg->c_str());
return false;
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name, error_msg));
- if (zip_entry.get() == NULL) {
+ if (zip_entry.get() == nullptr) {
*error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", file_part,
zip_entry_name, error_msg->c_str());
return false;
@@ -114,8 +115,9 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
return true;
}
if (IsDexMagic(magic)) {
- std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.release(), filename, false, error_msg));
- if (dex_file.get() == NULL) {
+ std::unique_ptr<const DexFile> dex_file(
+ DexFile::OpenFile(fd.release(), filename, false, error_msg));
+ if (dex_file.get() == nullptr) {
return false;
}
*checksum = dex_file->GetHeader().checksum_;
@@ -127,7 +129,7 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
bool DexFile::Open(const char* filename, const char* location, std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
uint32_t magic;
ScopedFd fd(OpenAndReadMagic(filename, &magic, error_msg));
if (fd.get() == -1) {
@@ -152,7 +154,7 @@ bool DexFile::Open(const char* filename, const char* location, std::string* erro
}
int DexFile::GetPermissions() const {
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return 0;
} else {
return mem_map_->GetProtect();
@@ -165,7 +167,7 @@ bool DexFile::IsReadOnly() const {
bool DexFile::EnableWrite() const {
CHECK(IsReadOnly());
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return false;
} else {
return mem_map_->Protect(PROT_READ | PROT_WRITE);
@@ -174,7 +176,7 @@ bool DexFile::EnableWrite() const {
bool DexFile::DisableWrite() const {
CHECK(!IsReadOnly());
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return false;
} else {
return mem_map_->Protect(PROT_READ);
@@ -233,7 +235,7 @@ const char* DexFile::kClassesDex = "classes.dex";
bool DexFile::OpenZip(int fd, const std::string& location, std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
if (zip_archive.get() == nullptr) {
DCHECK(!error_msg->empty());
@@ -260,12 +262,12 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, cons
ZipOpenErrorCode* error_code) {
CHECK(!location.empty());
std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
- if (zip_entry.get() == NULL) {
+ if (zip_entry.get() == nullptr) {
*error_code = ZipOpenErrorCode::kEntryNotFound;
return nullptr;
}
std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- if (map.get() == NULL) {
+ if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
*error_code = ZipOpenErrorCode::kExtractToMemoryError;
@@ -297,7 +299,7 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, cons
bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& location,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(Open(zip_archive, kClassesDex, location, error_msg,
&error_code));
@@ -371,7 +373,7 @@ DexFile::DexFile(const uint8_t* base, size_t size,
find_class_def_misses_(0),
class_def_index_(nullptr),
oat_dex_file_(oat_dex_file) {
- CHECK(begin_ != NULL) << GetLocation();
+ CHECK(begin_ != nullptr) << GetLocation();
CHECK_GT(size_, 0U) << GetLocation();
}
@@ -487,7 +489,7 @@ const DexFile::ClassDef* DexFile::FindClassDef(uint16_t type_idx) const {
return &class_def;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
@@ -522,7 +524,7 @@ const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_kl
}
}
}
- return NULL;
+ return nullptr;
}
const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_klass,
@@ -557,7 +559,7 @@ const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_
}
}
}
- return NULL;
+ return nullptr;
}
const DexFile::StringId* DexFile::FindStringId(const char* string) const {
@@ -576,7 +578,7 @@ const DexFile::StringId* DexFile::FindStringId(const char* string) const {
return &str_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t length) const {
@@ -595,7 +597,7 @@ const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t le
return &str_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
@@ -612,7 +614,7 @@ const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
return &type_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
@@ -648,7 +650,7 @@ const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
return &proto;
}
}
- return NULL;
+ return nullptr;
}
// Given a signature place the type ids into the given vector
@@ -687,11 +689,11 @@ bool DexFile::CreateTypeList(const StringPiece& signature, uint16_t* return_type
// TODO: avoid creating a std::string just to get a 0-terminated char array
std::string descriptor(signature.data() + start_offset, offset - start_offset);
const DexFile::StringId* string_id = FindStringId(descriptor.c_str());
- if (string_id == NULL) {
+ if (string_id == nullptr) {
return false;
}
const DexFile::TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
- if (type_id == NULL) {
+ if (type_id == nullptr) {
return false;
}
uint16_t type_idx = GetIndexForTypeId(*type_id);
@@ -713,7 +715,7 @@ const Signature DexFile::CreateSignature(const StringPiece& signature) const {
return Signature::NoSignature();
}
const ProtoId* proto_id = FindProtoId(return_type_idx, param_type_indices);
- if (proto_id == NULL) {
+ if (proto_id == nullptr) {
return Signature::NoSignature();
}
return Signature(this, *proto_id);
@@ -727,12 +729,12 @@ int32_t DexFile::GetLineNumFromPC(mirror::ArtMethod* method, uint32_t rel_pc) co
}
const CodeItem* code_item = GetCodeItem(method->GetCodeItemOffset());
- DCHECK(code_item != NULL) << PrettyMethod(method) << " " << GetLocation();
+ DCHECK(code_item != nullptr) << PrettyMethod(method) << " " << GetLocation();
// A method with no line number info should return -1
LineNumFromPcContext context(rel_pc, -1);
DecodeDebugInfo(code_item, method->IsStatic(), method->GetDexMethodIndex(), LineNumForPcCb,
- NULL, &context);
+ nullptr, &context);
return context.line_num_;
}
@@ -771,19 +773,20 @@ int32_t DexFile::FindCatchHandlerOffset(const CodeItem &code_item, uint32_t addr
void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context, const uint8_t* stream, LocalInfo* local_in_reg) const {
+ void* context, const uint8_t* stream, LocalInfo* local_in_reg)
+ const {
uint32_t line = DecodeUnsignedLeb128(&stream);
uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
uint32_t address = 0;
- bool need_locals = (local_cb != NULL);
+ bool need_locals = (local_cb != nullptr);
if (!is_static) {
if (need_locals) {
const char* descriptor = GetMethodDeclaringClassDescriptor(GetMethodId(method_idx));
local_in_reg[arg_reg].name_ = "this";
local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = NULL;
+ local_in_reg[arg_reg].signature_ = nullptr;
local_in_reg[arg_reg].start_address_ = 0;
local_in_reg[arg_reg].is_live_ = true;
}
@@ -803,7 +806,7 @@ void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32
const char* name = StringDataByIdx(id);
local_in_reg[arg_reg].name_ = name;
local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = NULL;
+ local_in_reg[arg_reg].signature_ = nullptr;
local_in_reg[arg_reg].start_address_ = address;
local_in_reg[arg_reg].is_live_ = true;
}
@@ -895,7 +898,7 @@ void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32
}
if (need_locals) {
- if (local_in_reg[reg].name_ == NULL || local_in_reg[reg].descriptor_ == NULL) {
+ if (local_in_reg[reg].name_ == nullptr || local_in_reg[reg].descriptor_ == nullptr) {
LOG(ERROR) << "invalid stream - no name or descriptor in " << GetLocation();
return;
}
@@ -920,7 +923,7 @@ void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32
address += adjopcode / DBG_LINE_RANGE;
line += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
- if (position_cb != NULL) {
+ if (position_cb != nullptr) {
if (position_cb(context, address, line)) {
// early exit
return;
@@ -937,14 +940,16 @@ void DexFile::DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_
void* context) const {
DCHECK(code_item != nullptr);
const uint8_t* stream = GetDebugInfoStream(code_item);
- std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != NULL ?
+ std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != nullptr ?
new LocalInfo[code_item->registers_size_] :
- NULL);
- if (stream != NULL) {
- DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream, &local_in_reg[0]);
+ nullptr);
+ if (stream != nullptr) {
+ DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream,
+ &local_in_reg[0]);
}
for (int reg = 0; reg < code_item->registers_size_; reg++) {
- InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0], local_cb);
+ InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0],
+ local_cb);
}
}
@@ -1051,7 +1056,7 @@ std::ostream& operator<<(std::ostream& os, const Signature& sig) {
// Decodes the header section from the class data bytes.
void ClassDataItemIterator::ReadClassDataHeader() {
- CHECK(ptr_pos_ != NULL);
+ CHECK(ptr_pos_ != nullptr);
header_.static_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
header_.instance_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
header_.direct_methods_size_ = DecodeUnsignedLeb128(&ptr_pos_);
@@ -1129,17 +1134,16 @@ static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_ri
return val;
}
-EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
- Handle<mirror::DexCache>* dex_cache,
- Handle<mirror::ClassLoader>* class_loader,
- ClassLinker* linker,
- const DexFile::ClassDef& class_def)
+EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
+ const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker,
+ const DexFile::ClassDef& class_def)
: dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
array_size_(), pos_(-1), type_(kByte) {
DCHECK(dex_cache != nullptr);
DCHECK(class_loader != nullptr);
ptr_ = dex_file.GetEncodedStaticFieldValuesArray(class_def);
- if (ptr_ == NULL) {
+ if (ptr_ == nullptr) {
array_size_ = 0;
} else {
array_size_ = DecodeUnsignedLeb128(&ptr_);
@@ -1199,7 +1203,7 @@ void EncodedStaticFieldValueIterator::Next() {
UNIMPLEMENTED(FATAL) << ": type " << type_;
UNREACHABLE();
case kNull:
- jval_.l = NULL;
+ jval_.l = nullptr;
width = 0;
break;
default:
@@ -1212,7 +1216,8 @@ void EncodedStaticFieldValueIterator::Next() {
template<bool kTransactionActive>
void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const {
switch (type_) {
- case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); break;
+ case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z);
+ break;
case kByte: field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break;
case kShort: field->SetShort<kTransactionActive>(field->GetDeclaringClass(), jval_.s); break;
case kChar: field->SetChar<kTransactionActive>(field->GetDeclaringClass(), jval_.c); break;
@@ -1220,7 +1225,7 @@ void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const {
case kLong: field->SetLong<kTransactionActive>(field->GetDeclaringClass(), jval_.j); break;
case kFloat: field->SetFloat<kTransactionActive>(field->GetDeclaringClass(), jval_.f); break;
case kDouble: field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break;
- case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), NULL); break;
+ case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), nullptr); break;
case kString: {
mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_);
field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved);
@@ -1275,7 +1280,7 @@ void CatchHandlerIterator::Init(const DexFile::CodeItem& code_item,
Init(DexFile::GetCatchHandlerData(code_item, offset));
} else {
// Not found, initialize as empty
- current_data_ = NULL;
+ current_data_ = nullptr;
remaining_count_ = -1;
catch_all_ = false;
DCHECK(!HasNext());
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 5bdd9b6c94..0d07358283 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -394,7 +394,7 @@ class DexFile {
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
std::string* error_msg) {
- return OpenMemory(base, size, location, location_checksum, NULL, oat_dex_file, error_msg);
+ return OpenMemory(base, size, location, location_checksum, nullptr, oat_dex_file, error_msg);
}
// Open all classesXXX.dex files from a zip archive.
@@ -448,7 +448,7 @@ class DexFile {
}
const Header& GetHeader() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return *header_;
}
@@ -463,7 +463,7 @@ class DexFile {
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->string_ids_size_;
}
@@ -495,7 +495,7 @@ class DexFile {
const char* StringDataAndUtf16LengthByIdx(uint32_t idx, uint32_t* utf16_length) const {
if (idx == kDexNoIndex) {
*utf16_length = 0;
- return NULL;
+ return nullptr;
}
const StringId& string_id = GetStringId(idx);
return GetStringDataAndUtf16Length(string_id, utf16_length);
@@ -514,7 +514,7 @@ class DexFile {
// Returns the number of type identifiers in the .dex file.
uint32_t NumTypeIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->type_ids_size_;
}
@@ -553,7 +553,7 @@ class DexFile {
// Returns the number of field identifiers in the .dex file.
size_t NumFieldIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->field_ids_size_;
}
@@ -593,7 +593,7 @@ class DexFile {
// Returns the number of method identifiers in the .dex file.
size_t NumMethodIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->method_ids_size_;
}
@@ -643,7 +643,7 @@ class DexFile {
}
// Returns the number of class definitions in the .dex file.
uint32_t NumClassDefs() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->class_defs_size_;
}
@@ -673,7 +673,7 @@ class DexFile {
const TypeList* GetInterfacesList(const ClassDef& class_def) const {
if (class_def.interfaces_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
const uint8_t* addr = begin_ + class_def.interfaces_off_;
return reinterpret_cast<const TypeList*>(addr);
@@ -683,7 +683,7 @@ class DexFile {
// Returns a pointer to the raw memory mapped class_data_item
const uint8_t* GetClassData(const ClassDef& class_def) const {
if (class_def.class_data_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
return begin_ + class_def.class_data_off_;
}
@@ -692,7 +692,7 @@ class DexFile {
//
const CodeItem* GetCodeItem(const uint32_t code_off) const {
if (code_off == 0) {
- return NULL; // native or abstract method
+ return nullptr; // native or abstract method
} else {
const uint8_t* addr = begin_ + code_off;
return reinterpret_cast<const CodeItem*>(addr);
@@ -705,7 +705,7 @@ class DexFile {
// Returns the number of prototype identifiers in the .dex file.
size_t NumProtoIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->proto_ids_size_;
}
@@ -745,7 +745,7 @@ class DexFile {
const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
if (proto_id.parameters_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
const uint8_t* addr = begin_ + proto_id.parameters_off_;
return reinterpret_cast<const TypeList*>(addr);
@@ -778,7 +778,7 @@ class DexFile {
// Get the pointer to the start of the debugging data
const uint8_t* GetDebugInfoStream(const CodeItem* code_item) const {
if (code_item->debug_info_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
return begin_ + code_item->debug_info_off_;
}
@@ -818,7 +818,8 @@ class DexFile {
struct LocalInfo {
LocalInfo()
- : name_(NULL), descriptor_(NULL), signature_(NULL), start_address_(0), is_live_(false) {}
+ : name_(nullptr), descriptor_(nullptr), signature_(nullptr), start_address_(0),
+ is_live_(false) {}
const char* name_; // E.g., list
const char* descriptor_; // E.g., Ljava/util/LinkedList;
@@ -841,10 +842,10 @@ class DexFile {
void InvokeLocalCbIfLive(void* context, int reg, uint32_t end_address,
LocalInfo* local_in_reg, DexDebugNewLocalCb local_cb) const {
- if (local_cb != NULL && local_in_reg[reg].is_live_) {
+ if (local_cb != nullptr && local_in_reg[reg].is_live_) {
local_cb(context, reg, local_in_reg[reg].start_address_, end_address,
local_in_reg[reg].name_, local_in_reg[reg].descriptor_,
- local_in_reg[reg].signature_ != NULL ? local_in_reg[reg].signature_ : "");
+ local_in_reg[reg].signature_ != nullptr ? local_in_reg[reg].signature_ : "");
}
}
@@ -865,7 +866,7 @@ class DexFile {
const char* GetSourceFile(const ClassDef& class_def) const {
if (class_def.source_file_idx_ == 0xffffffff) {
- return NULL;
+ return nullptr;
} else {
return StringDataByIdx(class_def.source_file_idx_);
}
@@ -926,7 +927,7 @@ class DexFile {
kVerifyError
};
- // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-nullptr
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
// return.
static std::unique_ptr<const DexFile> Open(const ZipArchive& zip_archive, const char* entry_name,
const std::string& location, std::string* error_msg,
@@ -1055,7 +1056,7 @@ class DexFileParameterIterator {
DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id)
: dex_file_(dex_file), size_(0), pos_(0) {
type_list_ = dex_file_.GetProtoParameters(proto_id);
- if (type_list_ != NULL) {
+ if (type_list_ != nullptr) {
size_ = type_list_->Size();
}
}
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 09ef3eef77..4d099e1ca5 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -33,7 +33,7 @@ class DexFileTest : public CommonRuntimeTest {};
TEST_F(DexFileTest, Open) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> dex(OpenTestDexFile("Nested"));
- ASSERT_TRUE(dex.get() != NULL);
+ ASSERT_TRUE(dex.get() != nullptr);
}
static const uint8_t kBase64Map[256] = {
@@ -136,14 +136,14 @@ static const char kRawDex[] =
static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
const char* location) {
// decode base64
- CHECK(base64 != NULL);
+ CHECK(base64 != nullptr);
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -168,7 +168,7 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
TEST_F(DexFileTest, Header) {
ScratchFile tmp;
std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
- ASSERT_TRUE(raw.get() != NULL);
+ ASSERT_TRUE(raw.get() != nullptr);
const DexFile::Header& header = raw->GetHeader();
// TODO: header.magic_
@@ -234,7 +234,7 @@ TEST_F(DexFileTest, GetMethodSignature) {
ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
const uint8_t* class_data = raw->GetClassData(class_def);
- ASSERT_TRUE(class_data != NULL);
+ ASSERT_TRUE(class_data != nullptr);
ClassDataItemIterator it(*raw, class_data);
EXPECT_EQ(1u, it.NumDirectMethods());
@@ -281,8 +281,8 @@ TEST_F(DexFileTest, FindStringId) {
EXPECT_EQ(1U, raw->NumClassDefs());
const char* strings[] = { "LGetMethodSignature;", "Ljava/lang/Float;", "Ljava/lang/Object;",
- "D", "I", "J", NULL };
- for (size_t i = 0; strings[i] != NULL; i++) {
+ "D", "I", "J", nullptr };
+ for (size_t i = 0; strings[i] != nullptr; i++) {
const char* str = strings[i];
const DexFile::StringId* str_id = raw->FindStringId(str);
const char* dex_str = raw->GetStringData(*str_id);
@@ -294,10 +294,10 @@ TEST_F(DexFileTest, FindTypeId) {
for (size_t i = 0; i < java_lang_dex_file_->NumTypeIds(); i++) {
const char* type_str = java_lang_dex_file_->StringByTypeIdx(i);
const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
- ASSERT_TRUE(type_str_id != NULL);
+ ASSERT_TRUE(type_str_id != nullptr);
uint32_t type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
- ASSERT_TRUE(type_id != NULL);
+ ASSERT_TRUE(type_id != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id), i);
}
}
@@ -307,14 +307,14 @@ TEST_F(DexFileTest, FindProtoId) {
const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(i);
const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
std::vector<uint16_t> to_find_types;
- if (to_find_tl != NULL) {
+ if (to_find_tl != nullptr) {
for (size_t j = 0; j < to_find_tl->Size(); j++) {
to_find_types.push_back(to_find_tl->GetTypeItem(j).type_idx_);
}
}
const DexFile::ProtoId* found =
java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
- ASSERT_TRUE(found != NULL);
+ ASSERT_TRUE(found != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), i);
}
}
@@ -326,7 +326,7 @@ TEST_F(DexFileTest, FindMethodId) {
const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
- ASSERT_TRUE(found != NULL) << "Didn't find method " << i << ": "
+ ASSERT_TRUE(found != nullptr) << "Didn't find method " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name)
<< java_lang_dex_file_->GetMethodSignature(to_find);
@@ -341,7 +341,7 @@ TEST_F(DexFileTest, FindFieldId) {
const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
const DexFile::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
const DexFile::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
- ASSERT_TRUE(found != NULL) << "Didn't find field " << i << ": "
+ ASSERT_TRUE(found != nullptr) << "Didn't find field " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.type_idx_) << " "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index a3f3de8514..2603975910 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -1473,7 +1473,7 @@ bool DexFileVerifier::CheckInterStringIdItem() {
}
// Check ordering between items.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::StringId* prev_item = reinterpret_cast<const DexFile::StringId*>(previous_item_);
const char* prev_str = dex_file_->GetStringData(*prev_item);
const char* str = dex_file_->GetStringData(*item);
@@ -1499,7 +1499,7 @@ bool DexFileVerifier::CheckInterTypeIdItem() {
}
// Check ordering between items.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
ErrorStringPrintf("Out-of-order type_ids: %x then %x",
@@ -1548,7 +1548,7 @@ bool DexFileVerifier::CheckInterProtoIdItem() {
}
// Check ordering between items. This relies on type_ids being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::ProtoId* prev = reinterpret_cast<const DexFile::ProtoId*>(previous_item_);
if (UNLIKELY(prev->return_type_idx_ > item->return_type_idx_)) {
ErrorStringPrintf("Out-of-order proto_id return types");
@@ -1610,7 +1610,7 @@ bool DexFileVerifier::CheckInterFieldIdItem() {
}
// Check ordering between items. This relies on the other sections being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::FieldId* prev_item = reinterpret_cast<const DexFile::FieldId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order field_ids");
@@ -1657,7 +1657,7 @@ bool DexFileVerifier::CheckInterMethodIdItem() {
}
// Check ordering between items. This relies on the other sections being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::MethodId* prev_item = reinterpret_cast<const DexFile::MethodId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order method_ids");
@@ -1728,7 +1728,7 @@ bool DexFileVerifier::CheckInterClassDefItem() {
}
const DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
- if (interfaces != NULL) {
+ if (interfaces != nullptr) {
uint32_t size = interfaces->Size();
// Ensure that all interfaces refer to classes (not arrays or primitives).
@@ -1952,7 +1952,7 @@ bool DexFileVerifier::CheckInterSectionIterate(size_t offset, uint32_t count, ui
}
// Iterate through the items in the section.
- previous_item_ = NULL;
+ previous_item_ = nullptr;
for (uint32_t i = 0; i < count; i++) {
uint32_t new_offset = (offset + alignment_mask) & ~alignment_mask;
ptr_ = begin_ + new_offset;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 18bf2e7a88..877dfc2efd 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -36,7 +36,7 @@ class DexFileVerifier {
private:
DexFileVerifier(const DexFile* dex_file, const uint8_t* begin, size_t size, const char* location)
: dex_file_(dex_file), begin_(begin), size_(size), location_(location),
- header_(&dex_file->GetHeader()), ptr_(NULL), previous_item_(NULL) {
+ header_(&dex_file->GetHeader()), ptr_(nullptr), previous_item_(nullptr) {
}
bool Verify();
@@ -99,12 +99,12 @@ class DexFileVerifier {
bool CheckInterSection();
// Load a string by (type) index. Checks whether the index is in bounds, printing the error if
- // not. If there is an error, nullptr is returned.
+ // not. If there is an error, null is returned.
const char* CheckLoadStringByIdx(uint32_t idx, const char* error_fmt);
const char* CheckLoadStringByTypeIdx(uint32_t type_idx, const char* error_fmt);
// Load a field/method Id by index. Checks whether the index is in bounds, printing the error if
- // not. If there is an error, nullptr is returned.
+ // not. If there is an error, null is returned.
const DexFile::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
const DexFile::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 00ca8a9169..95a47cc6e9 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -105,14 +105,14 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
const char* location,
std::string* error_msg) {
// decode base64
- CHECK(base64 != NULL);
+ CHECK(base64 != nullptr);
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -178,7 +178,7 @@ static std::unique_ptr<const DexFile> FixChecksumAndOpen(uint8_t* bytes, size_t
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(bytes, length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -205,7 +205,7 @@ static bool ModifyAndLoad(const char* location, size_t offset, uint8_t new_val,
// Decode base64.
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// Make modifications.
dex_bytes.get()[offset] = new_val;
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index d3b9eb47df..c64c21e45d 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -183,7 +183,7 @@ class Instruction {
// Reads an instruction out of the stream at the specified address.
static const Instruction* At(const uint16_t* code) {
- DCHECK(code != NULL);
+ DCHECK(code != nullptr);
return reinterpret_cast<const Instruction*>(code);
}
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index 14e316f2a5..7fae277c14 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -30,8 +30,8 @@ class DexMethodIterator {
found_next_(false),
dex_file_index_(0),
class_def_index_(0),
- class_def_(NULL),
- class_data_(NULL),
+ class_def_(nullptr),
+ class_data_(nullptr),
direct_method_(false) {
CHECK_NE(0U, dex_files_.size());
}
@@ -51,20 +51,20 @@ class DexMethodIterator {
dex_file_index_++;
continue;
}
- if (class_def_ == NULL) {
+ if (class_def_ == nullptr) {
class_def_ = &GetDexFileInternal().GetClassDef(class_def_index_);
}
- if (class_data_ == NULL) {
+ if (class_data_ == nullptr) {
class_data_ = GetDexFileInternal().GetClassData(*class_def_);
- if (class_data_ == NULL) {
+ if (class_data_ == nullptr) {
// empty class, such as a marker interface
// End of this class, advance and retry.
- class_def_ = NULL;
+ class_def_ = nullptr;
class_def_index_++;
continue;
}
}
- if (it_.get() == NULL) {
+ if (it_.get() == nullptr) {
it_.reset(new ClassDataItemIterator(GetDexFileInternal(), class_data_));
// Skip fields
while (GetIterator().HasNextStaticField()) {
@@ -88,16 +88,16 @@ class DexMethodIterator {
}
// End of this class, advance and retry.
DCHECK(!GetIterator().HasNext());
- it_.reset(NULL);
- class_data_ = NULL;
- class_def_ = NULL;
+ it_.reset(nullptr);
+ class_data_ = nullptr;
+ class_def_ = nullptr;
class_def_index_++;
}
}
void Next() {
found_next_ = false;
- if (it_.get() != NULL) {
+ if (it_.get() != nullptr) {
// Advance to next method if we currently are looking at a class.
GetIterator().Next();
}
@@ -115,20 +115,20 @@ class DexMethodIterator {
InvokeType GetInvokeType() {
CHECK(HasNext());
- CHECK(class_def_ != NULL);
+ CHECK(class_def_ != nullptr);
return GetIterator().GetMethodInvokeType(*class_def_);
}
private:
ClassDataItemIterator& GetIterator() const {
- CHECK(it_.get() != NULL);
+ CHECK(it_.get() != nullptr);
return *it_.get();
}
const DexFile& GetDexFileInternal() const {
CHECK_LT(dex_file_index_, dex_files_.size());
const DexFile* dex_file = dex_files_[dex_file_index_];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
return *dex_file;
}
diff --git a/runtime/elf.h b/runtime/elf.h
index 60b5248ed0..4514bb2688 100644
--- a/runtime/elf.h
+++ b/runtime/elf.h
@@ -1852,6 +1852,38 @@ enum {
VER_NEED_CURRENT = 1
};
+struct ElfTypes32 {
+ typedef Elf32_Addr Addr;
+ typedef Elf32_Off Off;
+ typedef Elf32_Half Half;
+ typedef Elf32_Word Word;
+ typedef Elf32_Sword Sword;
+ typedef Elf32_Ehdr Ehdr;
+ typedef Elf32_Shdr Shdr;
+ typedef Elf32_Sym Sym;
+ typedef Elf32_Rel Rel;
+ typedef Elf32_Rela Rela;
+ typedef Elf32_Phdr Phdr;
+ typedef Elf32_Dyn Dyn;
+};
+
+struct ElfTypes64 {
+ typedef Elf64_Addr Addr;
+ typedef Elf64_Off Off;
+ typedef Elf64_Half Half;
+ typedef Elf64_Word Word;
+ typedef Elf64_Sword Sword;
+ typedef Elf64_Xword Xword;
+ typedef Elf64_Sxword Sxword;
+ typedef Elf64_Ehdr Ehdr;
+ typedef Elf64_Shdr Shdr;
+ typedef Elf64_Sym Sym;
+ typedef Elf64_Rel Rel;
+ typedef Elf64_Rela Rela;
+ typedef Elf64_Phdr Phdr;
+ typedef Elf64_Dyn Dyn;
+};
+
// BEGIN android-changed
#endif // ART_RUNTIME_ELF_H_
// END android-changed
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index f2b013f864..e909e64e7a 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -110,12 +110,10 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
delete entry;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base)
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
+ bool program_header_only,
+ uint8_t* requested_base)
: file_(file),
writable_(writable),
program_header_only_(program_header_only),
@@ -138,20 +136,12 @@ ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
CHECK(file != nullptr);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>*
- ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Open(File* file, bool writable, bool program_header_only,
- std::string* error_msg, uint8_t* requested_base) {
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
- elf_file(new ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- (file, writable, program_header_only, requested_base));
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(
+ File* file, bool writable, bool program_header_only,
+ std::string* error_msg, uint8_t* requested_base) {
+ std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
+ (file, writable, program_header_only, requested_base));
int prot;
int flags;
if (writable) {
@@ -167,32 +157,20 @@ ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return elf_file.release();
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>*
- ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Open(File* file, int prot, int flags, std::string* error_msg) {
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
- elf_file(new ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
- /*requested_base*/nullptr));
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(
+ File* file, int prot, int flags, std::string* error_msg) {
+ std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
+ (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
+ /*requested_base*/nullptr));
if (!elf_file->Setup(prot, flags, error_msg)) {
return nullptr;
}
return elf_file.release();
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Setup(int prot, int flags, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, std::string* error_msg) {
int64_t temp_file_length = file_->GetLength();
if (temp_file_length < 0) {
errno = -temp_file_length;
@@ -349,12 +327,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::~ElfFileImpl() {
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>::~ElfFileImpl() {
STLDeleteElements(&segments_);
delete symtab_symbol_table_;
delete dynsym_symbol_table_;
@@ -364,13 +338,9 @@ ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::CheckAndSet(Elf32_Off offset, const char* label,
- uint8_t** target, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::CheckAndSet(Elf32_Off offset, const char* label,
+ uint8_t** target, std::string* error_msg) {
if (Begin() + offset >= End()) {
*error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
file_->GetPath().c_str());
@@ -380,12 +350,9 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::CheckSectionsLinked(const uint8_t* source,
+ const uint8_t* target) const {
// Only works in whole-program mode, as we need to iterate over the sections.
// Note that we normally can't search by type, as duplicates are allowed for most section types.
if (program_header_only_) {
@@ -416,12 +383,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return target_found && source_section != nullptr && source_section->sh_link == target_index;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::CheckSectionsExist(std::string* error_msg) const {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const {
if (!program_header_only_) {
// If in full mode, need section headers.
if (section_headers_start_ == nullptr) {
@@ -504,12 +467,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::SetMap(MemMap* map, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) {
if (map == nullptr) {
// MemMap::Open should have already set an error.
DCHECK(!error_msg->empty());
@@ -643,64 +602,41 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Ehdr& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHeader() const {
+template <typename ElfTypes>
+typename ElfTypes::Ehdr& ElfFileImpl<ElfTypes>::GetHeader() const {
CHECK(header_ != nullptr); // Header has been checked in SetMap. This is a sanity check.
return *header_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetProgramHeadersStart() const {
+template <typename ElfTypes>
+uint8_t* ElfFileImpl<ElfTypes>::GetProgramHeadersStart() const {
CHECK(program_headers_start_ != nullptr); // Header has been set in Setup. This is a sanity
// check.
return program_headers_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionHeadersStart() const {
+template <typename ElfTypes>
+uint8_t* ElfFileImpl<ElfTypes>::GetSectionHeadersStart() const {
CHECK(!program_header_only_); // Only used in "full" mode.
CHECK(section_headers_start_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return section_headers_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Phdr& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamicProgramHeader() const {
+template <typename ElfTypes>
+typename ElfTypes::Phdr& ElfFileImpl<ElfTypes>::GetDynamicProgramHeader() const {
CHECK(dynamic_program_header_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return *dynamic_program_header_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Dyn* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamicSectionStart() const {
+template <typename ElfTypes>
+typename ElfTypes::Dyn* ElfFileImpl<ElfTypes>::GetDynamicSectionStart() const {
CHECK(dynamic_section_start_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return dynamic_section_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbolSectionStart(Elf_Word section_type) const {
+template <typename ElfTypes>
+typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbolSectionStart(
+ Elf_Word section_type) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
@@ -718,12 +654,9 @@ Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetStringSectionStart(Elf_Word section_type) const {
+template <typename ElfTypes>
+const char* ElfFileImpl<ElfTypes>::GetStringSectionStart(
+ Elf_Word section_type) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
@@ -739,12 +672,9 @@ const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetString(Elf_Word section_type, Elf_Word i) const {
+template <typename ElfTypes>
+const char* ElfFileImpl<ElfTypes>::GetString(Elf_Word section_type,
+ Elf_Word i) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
if (i == 0) {
return nullptr;
@@ -759,39 +689,23 @@ const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
// WARNING: The following methods do not check for an error condition (non-existent hash section).
// It is the caller's job to do this.
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashSectionStart() const {
+template <typename ElfTypes>
+typename ElfTypes::Word* ElfFileImpl<ElfTypes>::GetHashSectionStart() const {
return hash_section_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashBucketNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashBucketNum() const {
return GetHashSectionStart()[0];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashChainNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashChainNum() const {
return GetHashSectionStart()[1];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashBucket(size_t i, bool* ok) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashBucket(size_t i, bool* ok) const {
if (i >= GetHashBucketNum()) {
*ok = false;
return 0;
@@ -801,12 +715,8 @@ Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return GetHashSectionStart()[2 + i];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashChain(size_t i, bool* ok) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashChain(size_t i, bool* ok) const {
if (i >= GetHashChainNum()) {
*ok = false;
return 0;
@@ -816,21 +726,13 @@ Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return GetHashSectionStart()[2 + GetHashBucketNum() + i];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetProgramHeaderNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetProgramHeaderNum() const {
return GetHeader().e_phnum;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Phdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetProgramHeader(Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const {
CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); // Sanity check for caller.
uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
if (program_header >= End()) {
@@ -839,12 +741,8 @@ Elf_Phdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return reinterpret_cast<Elf_Phdr*>(program_header);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Phdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindProgamHeaderByType(Elf_Word type) const {
+template <typename ElfTypes>
+typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::FindProgamHeaderByType(Elf_Word type) const {
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* program_header = GetProgramHeader(i);
if (program_header->p_type == type) {
@@ -854,21 +752,13 @@ Elf_Phdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionHeaderNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetSectionHeaderNum() const {
return GetHeader().e_shnum;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionHeader(Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::GetSectionHeader(Elf_Word i) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// Even if we Load(), it doesn't bring in all the sections.
CHECK(!program_header_only_) << file_->GetPath();
@@ -882,12 +772,8 @@ Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return reinterpret_cast<Elf_Shdr*>(section_header);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSectionByType(Elf_Word type) const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::FindSectionByType(Elf_Word type) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// We could change this to switch on known types if they were detected during loading.
CHECK(!program_header_only_) << file_->GetPath();
@@ -914,21 +800,14 @@ static unsigned elfhash(const char *_name) {
return h;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionNameStringSection() const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::GetSectionNameStringSection() const {
return GetSectionHeader(GetHeader().e_shstrndx);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicSymbolAddress(const std::string& symbol_name) const {
+template <typename ElfTypes>
+const uint8_t* ElfFileImpl<ElfTypes>::FindDynamicSymbolAddress(
+ const std::string& symbol_name) const {
// Check that we have a hash section.
if (GetHashSectionStart() == nullptr) {
return nullptr; // Failure condition.
@@ -944,12 +823,9 @@ const uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
// WARNING: Only called from FindDynamicSymbolAddress. Elides check for hash section.
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicSymbol(const std::string& symbol_name) const {
+template <typename ElfTypes>
+const typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::FindDynamicSymbol(
+ const std::string& symbol_name) const {
if (GetHashBucketNum() == 0) {
// No dynamic symbols at all.
return nullptr;
@@ -978,34 +854,21 @@ const Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::IsSymbolSectionType(Elf_Word section_type) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::IsSymbolSectionType(Elf_Word section_type) {
return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM));
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbolNum(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetSymbolNum(Elf_Shdr& section_header) const {
CHECK(IsSymbolSectionType(section_header.sh_type))
<< file_->GetPath() << " " << section_header.sh_type;
CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
return section_header.sh_size / section_header.sh_entsize;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbol(Elf_Word section_type,
- Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbol(Elf_Word section_type, Elf_Word i) const {
Elf_Sym* sym_start = GetSymbolSectionStart(section_type);
if (sym_start == nullptr) {
return nullptr;
@@ -1013,14 +876,9 @@ Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return sym_start + i;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-typename ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::SymbolTable** ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbolTable(Elf_Word section_type) {
+template <typename ElfTypes>
+typename ElfFileImpl<ElfTypes>::SymbolTable**
+ElfFileImpl<ElfTypes>::GetSymbolTable(Elf_Word section_type) {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
@@ -1036,14 +894,9 @@ typename ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSymbolByName(Elf_Word section_type,
- const std::string& symbol_name,
- bool build_map) {
+template <typename ElfTypes>
+typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::FindSymbolByName(
+ Elf_Word section_type, const std::string& symbol_name, bool build_map) {
CHECK(!program_header_only_) << file_->GetPath();
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
@@ -1122,14 +975,9 @@ Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Addr ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSymbolAddress(Elf_Word section_type,
- const std::string& symbol_name,
- bool build_map) {
+template <typename ElfTypes>
+typename ElfTypes::Addr ElfFileImpl<ElfTypes>::FindSymbolAddress(
+ Elf_Word section_type, const std::string& symbol_name, bool build_map) {
Elf_Sym* symbol = FindSymbolByName(section_type, symbol_name, build_map);
if (symbol == nullptr) {
return 0;
@@ -1137,12 +985,9 @@ Elf_Addr ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return symbol->st_value;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetString(Elf_Shdr& string_section, Elf_Word i) const {
+template <typename ElfTypes>
+const char* ElfFileImpl<ElfTypes>::GetString(Elf_Shdr& string_section,
+ Elf_Word i) const {
CHECK(!program_header_only_) << file_->GetPath();
// TODO: remove this static_cast from enum when using -std=gnu++0x
if (static_cast<Elf_Word>(SHT_STRTAB) != string_section.sh_type) {
@@ -1162,124 +1007,80 @@ const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return reinterpret_cast<const char*>(string);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamicNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetDynamicNum() const {
return GetDynamicProgramHeader().p_filesz / sizeof(Elf_Dyn);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Dyn& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamic(Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Dyn& ElfFileImpl<ElfTypes>::GetDynamic(Elf_Word i) const {
CHECK_LT(i, GetDynamicNum()) << file_->GetPath();
return *(GetDynamicSectionStart() + i);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Dyn* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicByType(Elf_Sword type) const {
+template <typename ElfTypes>
+typename ElfTypes::Dyn* ElfFileImpl<ElfTypes>::FindDynamicByType(Elf_Sword type) const {
for (Elf_Word i = 0; i < GetDynamicNum(); i++) {
Elf_Dyn* dyn = &GetDynamic(i);
if (dyn->d_tag == type) {
return dyn;
}
}
- return NULL;
+ return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicValueByType(Elf_Sword type) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::FindDynamicValueByType(Elf_Sword type) const {
Elf_Dyn* dyn = FindDynamicByType(type);
- if (dyn == NULL) {
+ if (dyn == nullptr) {
return 0;
} else {
return dyn->d_un.d_val;
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rel* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelSectionStart(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Rel* ElfFileImpl<ElfTypes>::GetRelSectionStart(Elf_Shdr& section_header) const {
CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rel*>(Begin() + section_header.sh_offset);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelNum(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelNum(Elf_Shdr& section_header) const {
CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
return section_header.sh_size / section_header.sh_entsize;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rel& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRel(Elf_Shdr& section_header, Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Rel& ElfFileImpl<ElfTypes>::GetRel(Elf_Shdr& section_header, Elf_Word i) const {
CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
CHECK_LT(i, GetRelNum(section_header)) << file_->GetPath();
return *(GetRelSectionStart(section_header) + i);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rela* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelaSectionStart(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Rela* ElfFileImpl<ElfTypes>::GetRelaSectionStart(Elf_Shdr& section_header) const {
CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rela*>(Begin() + section_header.sh_offset);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelaNum(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelaNum(Elf_Shdr& section_header) const {
CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
return section_header.sh_size / section_header.sh_entsize;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rela& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRela(Elf_Shdr& section_header, Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Rela& ElfFileImpl<ElfTypes>::GetRela(Elf_Shdr& section_header, Elf_Word i) const {
CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
CHECK_LT(i, GetRelaNum(section_header)) << file_->GetPath();
return *(GetRelaSectionStart(section_header) + i);
}
// Base on bionic phdr_table_get_load_size
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-size_t ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetLoadedSize() const {
+template <typename ElfTypes>
+size_t ElfFileImpl<ElfTypes>::GetLoadedSize() const {
Elf_Addr min_vaddr = 0xFFFFFFFFu;
Elf_Addr max_vaddr = 0x00000000u;
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
@@ -1303,12 +1104,8 @@ size_t ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return loaded_size;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Load(bool executable, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Load(bool executable, std::string* error_msg) {
CHECK(program_header_only_) << file_->GetPath();
if (executable) {
@@ -1543,12 +1340,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ValidPointer(const uint8_t* start) const {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::ValidPointer(const uint8_t* start) const {
for (size_t i = 0; i < segments_.size(); ++i) {
const MemMap* segment = segments_[i];
if (segment->Begin() <= start && start < segment->End()) {
@@ -1559,12 +1352,9 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSectionByName(const std::string& name) const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::FindSectionByName(
+ const std::string& name) const {
CHECK(!program_header_only_);
Elf_Shdr* shstrtab_sec = GetSectionNameStringSection();
if (shstrtab_sec == nullptr) {
@@ -1586,12 +1376,8 @@ Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupDebugSections(typename std::make_signed<Elf_Off>::type base_address_delta) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupDebugSections(typename std::make_signed<Elf_Off>::type base_address_delta) {
const Elf_Shdr* debug_info = FindSectionByName(".debug_info");
const Elf_Shdr* debug_abbrev = FindSectionByName(".debug_abbrev");
const Elf_Shdr* debug_str = FindSectionByName(".debug_str");
@@ -1606,9 +1392,6 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
if (base_address_delta == 0) {
return true;
}
- if (!ApplyOatPatchesTo(".eh_frame", base_address_delta)) {
- return false;
- }
if (!ApplyOatPatchesTo(".debug_info", base_address_delta)) {
return false;
}
@@ -1618,13 +1401,10 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ApplyOatPatchesTo(const char* target_section_name,
- typename std::make_signed<Elf_Off>::type delta) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::ApplyOatPatchesTo(
+ const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type delta) {
auto patches_section = FindSectionByName(".oat_patches");
if (patches_section == nullptr) {
LOG(ERROR) << ".oat_patches section not found.";
@@ -1651,15 +1431,12 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
// Apply .oat_patches to given section.
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ApplyOatPatches(const uint8_t* patches, const uint8_t* patches_end,
- const char* target_section_name,
- typename std::make_signed<Elf_Off>::type delta,
- uint8_t* to_patch, const uint8_t* to_patch_end) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::ApplyOatPatches(
+ const uint8_t* patches, const uint8_t* patches_end,
+ const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type delta,
+ uint8_t* to_patch, const uint8_t* to_patch_end) {
// Read null-terminated section name.
const char* section_name;
while ((section_name = reinterpret_cast<const char*>(patches))[0] != '\0') {
@@ -1684,12 +1461,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return false;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-void ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GdbJITSupport() {
+template <typename ElfTypes>
+void ElfFileImpl<ElfTypes>::GdbJITSupport() {
// We only get here if we only are mapping the program header.
DCHECK(program_header_only_);
@@ -1697,15 +1470,12 @@ void ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
std::string error_msg;
// Make it MAP_PRIVATE so we can just give it to gdb if all the necessary
// sections are there.
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
- all_ptr(Open(const_cast<File*>(file_), PROT_READ | PROT_WRITE,
- MAP_PRIVATE, &error_msg));
+ std::unique_ptr<ElfFileImpl<ElfTypes>> all_ptr(
+ Open(const_cast<File*>(file_), PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
if (all_ptr.get() == nullptr) {
return;
}
- ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>& all = *all_ptr;
+ ElfFileImpl<ElfTypes>& all = *all_ptr;
// We need the eh_frame for gdb but debug info might be present without it.
const Elf_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
@@ -1735,12 +1505,8 @@ void ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
gdb_file_mapping_.reset(all_ptr.release());
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Strip(std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) {
// ELF files produced by MCLinker look roughly like this
//
// +------------+
@@ -1801,7 +1567,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
CHECK_NE(0U, section_headers.size());
CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
- // section 0 is the NULL section, sections start at offset of first section
+ // section 0 is the null section, sections start at offset of first section
CHECK(GetSectionHeader(1) != nullptr);
Elf_Off offset = GetSectionHeader(1)->sh_offset;
for (size_t i = 1; i < section_headers.size(); i++) {
@@ -1843,12 +1609,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
static const bool DEBUG_FIXUP = false;
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Fixup(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Fixup(Elf_Addr base_address) {
if (!FixupDynamic(base_address)) {
LOG(WARNING) << "Failed to fixup .dynamic in " << file_->GetPath();
return false;
@@ -1881,12 +1643,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupDynamic(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupDynamic(Elf_Addr base_address) {
for (Elf_Word i = 0; i < GetDynamicNum(); i++) {
Elf_Dyn& elf_dyn = GetDynamic(i);
Elf_Word d_tag = elf_dyn.d_tag;
@@ -1905,12 +1663,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupSectionHeaders(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupSectionHeaders(Elf_Addr base_address) {
for (Elf_Word i = 0; i < GetSectionHeaderNum(); i++) {
Elf_Shdr* sh = GetSectionHeader(i);
CHECK(sh != nullptr);
@@ -1929,12 +1683,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupProgramHeaders(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupProgramHeaders(Elf_Addr base_address) {
// TODO: ELFObjectFile doesn't have give to Elf_Phdr, so we do that ourselves for now.
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* ph = GetProgramHeader(i);
@@ -1956,12 +1706,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupSymbols(Elf_Addr base_address, bool dynamic) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupSymbols(Elf_Addr base_address, bool dynamic) {
Elf_Word section_type = dynamic ? SHT_DYNSYM : SHT_SYMTAB;
// TODO: Unfortunate ELFObjectFile has protected symbol access, so use ElfFile
Elf_Shdr* symbol_section = FindSectionByType(section_type);
@@ -1986,12 +1732,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupRelocations(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupRelocations(Elf_Addr base_address) {
for (Elf_Word i = 0; i < GetSectionHeaderNum(); i++) {
Elf_Shdr* sh = GetSectionHeader(i);
CHECK(sh != nullptr);
@@ -2023,10 +1765,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
// Explicit instantiations
-template class ElfFileImpl<Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Word,
- Elf32_Sword, Elf32_Addr, Elf32_Sym, Elf32_Rel, Elf32_Rela, Elf32_Dyn, Elf32_Off>;
-template class ElfFileImpl<Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Word,
- Elf64_Sword, Elf64_Addr, Elf64_Sym, Elf64_Rel, Elf64_Rela, Elf64_Dyn, Elf64_Off>;
+template class ElfFileImpl<ElfTypes32>;
+template class ElfFileImpl<ElfTypes64>;
ElfFile::ElfFile(ElfFileImpl32* elf32) : elf32_(elf32), elf64_(nullptr) {
}
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 286c2a638c..fe6896dfe7 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -26,16 +26,12 @@
#include "os.h"
namespace art {
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
+template <typename ElfTypes>
class ElfFileImpl;
// Explicitly instantiated in elf_file.cc
-typedef ElfFileImpl<Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Word, Elf32_Sword,
- Elf32_Addr, Elf32_Sym, Elf32_Rel, Elf32_Rela, Elf32_Dyn, Elf32_Off> ElfFileImpl32;
-typedef ElfFileImpl<Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Word, Elf64_Sword,
- Elf64_Addr, Elf64_Sym, Elf64_Rel, Elf64_Rela, Elf64_Dyn, Elf64_Off> ElfFileImpl64;
+typedef ElfFileImpl<ElfTypes32> ElfFileImpl32;
+typedef ElfFileImpl<ElfTypes64> ElfFileImpl64;
// Used for compile time and runtime for ElfFile access. Because of
// the need for use at runtime, cannot directly use LLVM classes such as
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 383dc41e72..80950c6197 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -32,11 +32,22 @@ extern "C" {
struct JITCodeEntry;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
+template <typename ElfTypes>
class ElfFileImpl {
public:
+ using Elf_Addr = typename ElfTypes::Addr;
+ using Elf_Off = typename ElfTypes::Off;
+ using Elf_Half = typename ElfTypes::Half;
+ using Elf_Word = typename ElfTypes::Word;
+ using Elf_Sword = typename ElfTypes::Sword;
+ using Elf_Ehdr = typename ElfTypes::Ehdr;
+ using Elf_Shdr = typename ElfTypes::Shdr;
+ using Elf_Sym = typename ElfTypes::Sym;
+ using Elf_Rel = typename ElfTypes::Rel;
+ using Elf_Rela = typename ElfTypes::Rela;
+ using Elf_Phdr = typename ElfTypes::Phdr;
+ using Elf_Dyn = typename ElfTypes::Dyn;
+
static ElfFileImpl* Open(File* file, bool writable, bool program_header_only,
std::string* error_msg, uint8_t* requested_base = nullptr);
static ElfFileImpl* Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg);
@@ -83,8 +94,7 @@ class ElfFileImpl {
const std::string& symbol_name,
bool build_map);
- // Lookup a string given string section and offset. Returns nullptr for
- // special 0 offset.
+ // Lookup a string given string section and offset. Returns null for special 0 offset.
const char* GetString(Elf_Shdr&, Elf_Word) const;
Elf_Word GetDynamicNum() const;
@@ -156,7 +166,7 @@ class ElfFileImpl {
// Check whether the offset is in range, and set to target to Begin() + offset if OK.
bool CheckAndSet(Elf32_Off offset, const char* label, uint8_t** target, std::string* error_msg);
- // Find symbol in specified table, returning nullptr if it is not found.
+ // Find symbol in specified table, returning null if it is not found.
//
// If build_map is true, builds a map to speed repeated access. The
// map does not included untyped symbol values (aka STT_NOTYPE)
@@ -173,7 +183,7 @@ class ElfFileImpl {
Elf_Dyn* FindDynamicByType(Elf_Sword type) const;
Elf_Word FindDynamicValueByType(Elf_Sword type) const;
- // Lookup a string by section type. Returns nullptr for special 0 offset.
+ // Lookup a string by section type. Returns null for special 0 offset.
const char* GetString(Elf_Word section_type, Elf_Word) const;
const File* const file_;
@@ -209,9 +219,7 @@ class ElfFileImpl {
// Support for GDB JIT
uint8_t* jit_elf_image_;
JITCodeEntry* jit_gdb_entry_;
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel,
- Elf_Rela, Elf_Dyn, Elf_Off>> gdb_file_mapping_;
+ std::unique_ptr<ElfFileImpl<ElfTypes>> gdb_file_mapping_;
void GdbJITSupport();
// Override the 'base' p_vaddr in the first LOAD segment with this value (if non-null).
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index cbfba12968..64b7ecdabe 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -41,10 +41,10 @@ inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self, bool* slow_path) {
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
- if (UNLIKELY(klass == NULL)) {
+ if (UNLIKELY(klass == nullptr)) {
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
*slow_path = true;
- if (klass == NULL) {
+ if (klass == nullptr) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
} else {
@@ -526,19 +526,19 @@ inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
mirror::Object* this_object,
mirror::ArtMethod* referrer,
bool access_check, InvokeType type) {
- if (UNLIKELY(this_object == NULL && type != kStatic)) {
- return NULL;
+ if (UNLIKELY(this_object == nullptr && type != kStatic)) {
+ return nullptr;
}
mirror::ArtMethod* resolved_method =
referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx);
- if (UNLIKELY(resolved_method == NULL)) {
- return NULL;
+ if (UNLIKELY(resolved_method == nullptr)) {
+ return nullptr;
}
if (access_check) {
// Check for incompatible class change errors and access.
bool icce = resolved_method->CheckIncompatibleClassChange(type);
if (UNLIKELY(icce)) {
- return NULL;
+ return nullptr;
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
mirror::Class* referring_class = referrer->GetDeclaringClass();
@@ -546,7 +546,7 @@ inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
!referring_class->CanAccessMember(methods_class,
resolved_method->GetAccessFlags()))) {
// Potential illegal access, may need to refine the method's class.
- return NULL;
+ return nullptr;
}
}
if (type == kInterface) { // Most common form of slow path dispatch.
@@ -606,7 +606,7 @@ inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
- mirror::Throwable* saved_exception = NULL;
+ mirror::Throwable* saved_exception = nullptr;
if (UNLIKELY(self->IsExceptionPending())) {
saved_exception = self->GetException();
self->ClearException();
@@ -620,7 +620,7 @@ inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
<< self->GetException()->Dump();
}
// Restore pending exception.
- if (saved_exception != NULL) {
+ if (saved_exception != nullptr) {
self->SetException(saved_exception);
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 1d8df68994..ce5673923f 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -23,6 +23,7 @@
#include "gc/accounting/card_table-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "reflection.h"
@@ -43,9 +44,9 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx,
return nullptr; // Failure
}
mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx);
- if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve
+ if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
- if (klass == NULL) { // Error
+ if (klass == nullptr) { // Error
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
@@ -230,13 +231,13 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
// Build argument array possibly triggering GC.
soa.Self()->AssertThreadSuspensionIsAllowable();
- jobjectArray args_jobj = NULL;
+ jobjectArray args_jobj = nullptr;
const JValue zero;
int32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
// Do not create empty arrays unless needed to maintain Dalvik bug compatibility.
if (args.size() > 0 || (target_sdk_version > 0 && target_sdk_version <= 21)) {
- args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL);
- if (args_jobj == NULL) {
+ args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, nullptr);
+ if (args_jobj == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -248,7 +249,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
JValue jv;
jv.SetJ(args.at(i).j);
mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv);
- if (val == NULL) {
+ if (val == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -257,7 +258,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
}
}
- // Call Proxy.invoke(Proxy proxy, ArtMethod method, Object[] args).
+ // Call Proxy.invoke(Proxy proxy, Method method, Object[] args).
jvalue invocation_args[3];
invocation_args[0].l = rcvr_jobj;
invocation_args[1].l = interface_method_jobj;
@@ -269,15 +270,14 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
// Unbox result and handle error conditions.
if (LIKELY(!soa.Self()->IsExceptionPending())) {
- if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) {
+ if (shorty[0] == 'V' || (shorty[0] == 'L' && result == nullptr)) {
// Do nothing.
return zero;
} else {
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ArtMethod> h_interface_method(
- hs.NewHandle(soa.Decode<mirror::ArtMethod*>(interface_method_jobj)));
+ auto h_interface_method(hs.NewHandle(soa.Decode<mirror::Method*>(interface_method_jobj)));
// This can cause thread suspension.
- mirror::Class* result_type = h_interface_method->GetReturnType();
+ mirror::Class* result_type = h_interface_method->GetArtMethod()->GetReturnType();
mirror::Object* result_ref = soa.Decode<mirror::Object*>(result);
JValue result_unboxed;
if (!UnboxPrimitiveForResult(result_ref, result_type, &result_unboxed)) {
@@ -293,10 +293,9 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
if (exception->IsCheckedException()) {
mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
mirror::Class* proxy_class = rcvr->GetClass();
- mirror::ArtMethod* interface_method =
- soa.Decode<mirror::ArtMethod*>(interface_method_jobj);
+ mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj);
mirror::ArtMethod* proxy_method =
- rcvr->GetClass()->FindVirtualMethodForInterface(interface_method);
+ rcvr->GetClass()->FindVirtualMethodForInterface(interface_method->GetArtMethod());
int throws_index = -1;
size_t num_virt_methods = proxy_class->NumVirtualMethods();
for (size_t i = 0; i < num_virt_methods; i++) {
@@ -316,7 +315,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
}
if (!declares_exception) {
soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;",
- NULL);
+ nullptr);
}
}
return zero;
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 28e19d414d..d4844c2a95 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -47,7 +47,7 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::
method = shadow_frame->GetMethod();
}
}
- uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
+ uint16_t arg_offset = (code_item == nullptr) ? 0 : code_item->registers_size_ - code_item->ins_size_;
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
result, method->GetShorty());
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index 2752407750..a68eeebff8 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -34,15 +34,15 @@ extern "C" void* artFindNativeMethod(Thread* self) {
Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
ScopedObjectAccess soa(self);
- mirror::ArtMethod* method = self->GetCurrentMethod(NULL);
- DCHECK(method != NULL);
+ mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ DCHECK(method != nullptr);
- // Lookup symbol address for method, on failure we'll return NULL with an exception set,
+ // Lookup symbol address for method, on failure we'll return null with an exception set,
// otherwise we return the address of the method we found.
void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
- if (native_code == NULL) {
+ if (native_code == nullptr) {
DCHECK(self->IsExceptionPending());
- return NULL;
+ return nullptr;
} else {
// Register so that future calls don't come here
method->RegisterNative(native_code, false);
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index a6ab69b75d..37de380151 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -22,8 +22,8 @@ namespace art {
// Assignable test for code, won't throw. Null and equality tests already performed
extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(klass != NULL);
- DCHECK(ref_class != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(ref_class != nullptr);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
}
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 54dbd8c770..eb1b1056a4 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -41,7 +41,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod*
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? nullptr : this_object,
method, lr, interpreter_entry);
- CHECK(result != NULL) << PrettyMethod(method);
+ CHECK(result != nullptr) << PrettyMethod(method);
return result;
}
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 9644b98be6..f22edc1b9e 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -34,10 +34,10 @@ extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
- * exception may be NULL, in which case this routine should
+ * exception may be null, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
* which previously did the null check inline and constructed
- * and threw a NPE if NULL. This routine responsible for setting
+ * and threw a NPE if null. This routine responsible for setting
* exception_ in thread and delivering the exception.
*/
ScopedQuickEntrypointChecks sqec(self);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 2e813c808a..2e7e2dfd74 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -26,6 +26,7 @@
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "runtime.h"
@@ -760,11 +761,12 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+ self->EndAssertNoThreadSuspension(old_cause);
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), interface_method));
// All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
// that performs allocations.
- self->EndAssertNoThreadSuspension(old_cause);
JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
// Restore references which might have moved.
local_ref_visitor.FixupReferences();
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1770658c0e..6808000e5e 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -200,7 +200,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
fake_stack.push_back(0);
fake_stack.push_back(0xEBAD6070); // return pc
- // Push Method* of NULL to terminate the trace
+ // Push Method* of null to terminate the trace
fake_stack.push_back(0);
// Push null values which will become null incoming arguments.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 5224d64efc..399832a377 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -238,9 +238,9 @@ class AtomicStack {
std::string error_msg;
mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
+ CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_->Begin();
- CHECK(addr != NULL);
+ CHECK(addr != nullptr);
debug_is_sorted_ = true;
begin_ = reinterpret_cast<StackReference<T>*>(addr);
Reset();
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7879632877..1a7b1a374e 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -36,7 +36,7 @@ constexpr uint8_t CardTable::kCardDirty;
/*
* Maintain a card table from the write barrier. All writes of
- * non-NULL values to heap addresses should go through an entry in
+ * non-null values to heap addresses should go through an entry in
* WriteBarrier, and from there to here.
*
* The heap is divided into "cards" of GC_CARD_SIZE bytes, as
@@ -44,7 +44,7 @@ constexpr uint8_t CardTable::kCardDirty;
* data per card, to be used by the GC. The value of the byte will be
* one of GC_CARD_CLEAN or GC_CARD_DIRTY.
*
- * After any store of a non-NULL object pointer into a heap object,
+ * After any store of a non-null object pointer into a heap object,
* code is obliged to mark the card dirty. The setters in
* object.h [such as SetFieldObject] do this for you. The
* compiler also contains code to mark cards as dirty.
@@ -64,13 +64,13 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
std::unique_ptr<MemMap> mem_map(
MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
false, false, &error_msg));
- CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
+ CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* cardtable_begin = mem_map->Begin();
- CHECK(cardtable_begin != NULL);
+ CHECK(cardtable_begin != nullptr);
// We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
// kCardDirty, compute a offset value to make this the case
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 896cce5edc..75ef58a21f 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -43,7 +43,7 @@ namespace accounting {
template<size_t kAlignment> class SpaceBitmap;
// Maintain a card table from the the write barrier. All writes of
-// non-NULL values to heap addresses should go through an entry in
+// non-null values to heap addresses should go through an entry in
// WriteBarrier, and from there to here.
class CardTable {
public:
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 11347a5bfe..ae912006bb 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -55,7 +55,7 @@ template<size_t kAlignment>
inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK(HasAddress(obj)) << obj;
- DCHECK(bitmap_begin_ != NULL);
+ DCHECK(bitmap_begin_ != nullptr);
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2da83250d3..84dadea8ea 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -104,8 +104,8 @@ void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
- CHECK(bitmap_begin_ != NULL);
- CHECK(callback != NULL);
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK(callback != nullptr);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
uintptr_t* bitmap_begin = bitmap_begin_;
@@ -132,7 +132,7 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm
CHECK(mark_bitmap.bitmap_begin_ != nullptr);
CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
- CHECK(callback != NULL);
+ CHECK(callback != nullptr);
CHECK_LE(sweep_begin, sweep_end);
CHECK_GE(sweep_begin, live_bitmap.heap_begin_);
@@ -186,7 +186,7 @@ void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visite
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
- if (super != NULL) {
+ if (super != nullptr) {
WalkInstanceFields(visited, callback, obj, super, arg);
}
// Walk instance fields
@@ -233,7 +233,7 @@ void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited
int32_t length = obj_array->GetLength();
for (int32_t i = 0; i < length; i++) {
mirror::Object* value = obj_array->Get(i);
- if (value != NULL) {
+ if (value != nullptr) {
WalkFieldsInOrder(visited, callback, value, arg);
}
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 850325a33d..edb08ef3d9 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -34,7 +34,7 @@ TEST_F(SpaceBitmapTest, Init) {
size_t heap_capacity = 16 * MB;
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
}
class BitmapVerify {
@@ -62,7 +62,7 @@ TEST_F(SpaceBitmapTest, ScanRange) {
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
// Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 515f124fcc..85234dc27a 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -115,7 +115,7 @@ RosAlloc::~RosAlloc() {
void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
lock_.AssertHeld(self);
DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
- FreePageRun* res = NULL;
+ FreePageRun* res = nullptr;
const size_t req_byte_size = num_pages * kPageSize;
// Find the lowest address free page run that's large enough.
for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
@@ -157,8 +157,8 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
}
// Failed to allocate pages. Grow the footprint, if possible.
- if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
- FreePageRun* last_free_page_run = NULL;
+ if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
+ FreePageRun* last_free_page_run = nullptr;
size_t last_free_page_run_size;
auto it = free_page_runs_.rbegin();
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
@@ -218,7 +218,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
DCHECK(it != free_page_runs_.rend());
FreePageRun* fpr = *it;
if (kIsDebugBuild && last_free_page_run_size > 0) {
- DCHECK(last_free_page_run != NULL);
+ DCHECK(last_free_page_run != nullptr);
DCHECK_EQ(last_free_page_run, fpr);
}
size_t fpr_byte_size = fpr->ByteSize(this);
@@ -249,7 +249,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
res = fpr;
}
}
- if (LIKELY(res != NULL)) {
+ if (LIKELY(res != nullptr)) {
// Update the page map.
size_t page_map_idx = ToPageMapIndex(res);
for (size_t i = 0; i < num_pages; i++) {
@@ -286,7 +286,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
// Fail.
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocPages() : NULL";
+ LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
}
return nullptr;
}
@@ -468,7 +468,7 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
if (UNLIKELY(r == nullptr)) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
+ LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
}
return nullptr;
}
@@ -824,7 +824,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
// already in the non-full run set (i.e., it was full) insert it
// into the non-full run set.
if (run != current_runs_[idx]) {
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
auto pos = non_full_runs->find(run);
if (pos == non_full_runs->end()) {
DCHECK(run_was_full);
@@ -1275,7 +1275,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
// Check if the run should be moved to non_full_runs_ or
// free_page_runs_.
auto* non_full_runs = &non_full_runs_[idx];
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of the
// run.
@@ -1358,7 +1358,7 @@ std::string RosAlloc::DumpPageMap() {
stream << "RosAlloc PageMap: " << std::endl;
lock_.AssertHeld(Thread::Current());
size_t end = page_map_size_;
- FreePageRun* curr_fpr = NULL;
+ FreePageRun* curr_fpr = nullptr;
size_t curr_fpr_size = 0;
size_t remaining_curr_fpr_size = 0;
size_t num_running_empty_pages = 0;
@@ -1373,7 +1373,7 @@ std::string RosAlloc::DumpPageMap() {
// Encountered a fresh free page run.
DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
DCHECK(fpr->IsFree());
- DCHECK(curr_fpr == NULL);
+ DCHECK(curr_fpr == nullptr);
DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
curr_fpr = fpr;
curr_fpr_size = fpr->ByteSize(this);
@@ -1384,7 +1384,7 @@ std::string RosAlloc::DumpPageMap() {
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
@@ -1392,7 +1392,7 @@ std::string RosAlloc::DumpPageMap() {
} else {
// Still part of the current free page run.
DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
- DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
+ DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
remaining_curr_fpr_size -= kPageSize;
@@ -1400,7 +1400,7 @@ std::string RosAlloc::DumpPageMap() {
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
}
@@ -1546,7 +1546,7 @@ bool RosAlloc::Trim() {
void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg) {
// Note: no need to use this to release pages as we already do so in FreePages().
- if (handler == NULL) {
+ if (handler == nullptr) {
return;
}
MutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 14eb80bdb6..f0e8d14d25 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@ class MarkStackTask : public Task {
mark_stack_pos_(mark_stack_size) {
// We may have to copy part of an existing mark stack when another mark stack overflows.
if (mark_stack_size != 0) {
- DCHECK(mark_stack != NULL);
+ DCHECK(mark_stack != nullptr);
// TODO: Check performance?
std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
}
@@ -850,7 +850,7 @@ class RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
- : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
+ : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
end_(end) {
}
@@ -1260,11 +1260,11 @@ void MarkSweep::ProcessMarkStack(bool paused) {
static const size_t kFifoSize = 4;
BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = NULL;
+ Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Object* mark_stack_obj = mark_stack_->PopBack();
- DCHECK(mark_stack_obj != NULL);
+ DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index dbf01d8354..82d02e7fb2 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -750,7 +750,7 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
if (from_space_->HasAddress(obj)) {
- // Returns either the forwarding address or nullptr.
+ // Returns either the forwarding address or null.
return GetForwardingAddressInFromSpace(obj);
} else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
to_space_->HasAddress(obj)) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b770096671..3e56205444 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -293,7 +293,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator
return nullptr;
}
// Try allocating a new thread local buffer, if the allocaiton fails the space must be
- // full so return nullptr.
+ // full so return null.
if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
return nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index beaf067236..b80c4b681c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -412,7 +412,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
// Allocate the card table.
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
- CHECK(card_table_.get() != NULL) << "Failed to create card table";
+ CHECK(card_table_.get() != nullptr) << "Failed to create card table";
if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
rb_table_.reset(new accounting::ReadBarrierTable());
@@ -1052,7 +1052,7 @@ space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
@@ -1065,12 +1065,12 @@ space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
space::Space* result = FindContinuousSpaceFromObject(obj, true);
- if (result != NULL) {
+ if (result != nullptr) {
return result;
}
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
@@ -1082,7 +1082,7 @@ space::ImageSpace* Heap::GetImageSpace() const {
return space->AsImageSpace();
}
}
- return NULL;
+ return nullptr;
}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
@@ -2204,7 +2204,7 @@ void Heap::PreZygoteFork() {
// Turn the current alloc space into a zygote space and obtain the new alloc space composed of
// the remaining available space.
// Remove the old space before creating the zygote space since creating the zygote space sets
- // the old alloc space's bitmaps to nullptr.
+ // the old alloc space's bitmaps to null.
RemoveSpace(old_alloc_space);
if (collector::SemiSpace::kUseRememberedSet) {
// Sanity bound check.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 066b4c5b77..565687c5b5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -396,7 +396,7 @@ class Heap {
void RecordFreeRevoke();
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
- // The call is not needed if NULL is stored in the field.
+ // The call is not needed if null is stored in the field.
ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
const mirror::Object* /*new_value*/) {
card_table_->MarkCard(dst);
@@ -991,7 +991,7 @@ class Heap {
// programs it is "cleared" making it the same as capacity.
size_t growth_limit_;
- // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
+ // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
// a GC should be triggered.
size_t max_allowed_footprint_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 7be0704d01..4c93a4c5a8 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -30,7 +30,7 @@ ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (!ref->IsEnqueued()) {
EnqueuePendingReference(ref);
@@ -43,7 +43,7 @@ void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
}
void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
list_ = ref;
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index c496a422e0..df43606485 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -45,7 +45,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 9eace897e6..4fc4adac91 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -35,7 +35,7 @@ inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_b
obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- if (LIKELY(obj != NULL)) {
+ if (LIKELY(obj != nullptr)) {
// Zero freshly allocated memory, done while not holding the space's lock.
memset(obj, 0, num_bytes);
}
@@ -57,13 +57,13 @@ inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = allocation_size;
*bytes_tl_bulk_allocated = allocation_size;
}
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 225861db60..7b1a421f4a 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -39,7 +39,7 @@ DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::st
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
- CHECK(mspace != NULL);
+ CHECK(mspace != nullptr);
}
DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
@@ -176,7 +176,7 @@ size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
}
size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- DCHECK(ptrs != NULL);
+ DCHECK(ptrs != nullptr);
// Don't need the lock to calculate the size of the freed pointers.
size_t bytes_freed = 0;
@@ -232,7 +232,7 @@ void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_byte
void* arg) {
MutexLock mu(Thread::Current(), lock_);
mspace_inspect_all(mspace_, callback, arg);
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
size_t DlMallocSpace::GetFootprint() {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e28e8d7771..f35003864e 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -666,7 +666,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
}
std::unique_ptr<File> file(OS::OpenFileForReading(image_filename));
- if (file.get() == NULL) {
+ if (file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open '%s'", image_filename);
return nullptr;
}
@@ -695,7 +695,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(),
PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
- if (map.get() == NULL) {
+ if (map.get() == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
}
@@ -786,7 +786,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
nullptr, error_msg);
- if (oat_file == NULL) {
+ if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
return nullptr;
@@ -811,7 +811,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
}
bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
for (const OatFile::OatDexFile* oat_dex_file : oat_file_->GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
uint32_t dex_file_location_checksum;
@@ -837,7 +837,7 @@ const OatFile* ImageSpace::GetOatFile() const {
}
OatFile* ImageSpace::ReleaseOatFile() {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
return oat_file_.release();
}
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 9ae2af4711..54dc7a61dd 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -52,7 +52,7 @@ class ImageSpace : public MemMapSpace {
InstructionSet image_isa);
// Reads the image header from the specified image location for the
- // instruction set image_isa. Returns nullptr on failure, with
+ // instruction set image_isa. Returns null on failure, with
// reason in error_msg.
static ImageHeader* ReadImageHeader(const char* image_location,
InstructionSet image_isa,
@@ -122,7 +122,7 @@ class ImageSpace : public MemMapSpace {
private:
// Tries to initialize an ImageSpace from the given image path,
- // returning NULL on error.
+ // returning null on error.
//
// If validate_oat_file is false (for /system), do not verify that
// image's OatFile is up-to-date relative to its DexFile
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a4a9d80fa0..4dfdaa5907 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -18,6 +18,7 @@
#include <memory>
+#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
@@ -123,12 +124,22 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (UNLIKELY(mem_map == NULL)) {
+ if (UNLIKELY(mem_map == nullptr)) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
- return NULL;
+ return nullptr;
+ }
+ mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
+ if (kIsDebugBuild) {
+ ReaderMutexLock mu2(Thread::Current(), *Locks::heap_bitmap_lock_);
+ auto* heap = Runtime::Current()->GetHeap();
+ auto* live_bitmap = heap->GetLiveBitmap();
+ auto* space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj);
+ CHECK(space_bitmap == nullptr) << obj << " overlaps with bitmap " << *space_bitmap;
+ auto* obj_end = reinterpret_cast<mirror::Object*>(mem_map->End());
+ space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj_end - 1);
+ CHECK(space_bitmap == nullptr) << obj_end << " overlaps with bitmap " << *space_bitmap;
}
MutexLock mu(self, lock_);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
const size_t allocation_size = mem_map->BaseSize();
@@ -195,7 +206,7 @@ void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg)
for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
MemMap* mem_map = it->second;
callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
- callback(NULL, NULL, 0, arg);
+ callback(nullptr, nullptr, 0, arg);
}
}
@@ -305,7 +316,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
+ CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 9195b06017..b014217fe2 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -75,13 +75,13 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(*initial_size) << ") is larger than its capacity ("
<< PrettySize(*growth_limit) << ")";
- return NULL;
+ return nullptr;
}
if (*growth_limit > *capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(*growth_limit) << ") is larger than the capacity ("
<< PrettySize(*capacity) << ")";
- return NULL;
+ return nullptr;
}
// Page align growth limit and capacity which will be used to manage mmapped storage
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bbf1bbbdbd..5f3a1db3f7 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -60,7 +60,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Allocate num_bytes without allowing the underlying space to grow.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
- // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
+ // Return the storage space required by obj. If usable_size isn't null then it is set to the
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
virtual size_t Free(Thread* self, mirror::Object* ptr)
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index b88ce24114..19109f0d59 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -40,7 +40,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// space to confirm the request was granted.
static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 9d582a3f86..25d44452e2 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -73,18 +73,18 @@ inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes
rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_bytes_allocated,
&rosalloc_usable_size,
&rosalloc_bytes_tl_bulk_allocated));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = rosalloc_bytes_allocated;
DCHECK_EQ(rosalloc_usable_size, rosalloc_->UsableSize(result));
if (usable_size != nullptr) {
*usable_size = rosalloc_usable_size;
}
- DCHECK(bytes_tl_bulk_allocated != NULL);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
*bytes_tl_bulk_allocated = rosalloc_bytes_tl_bulk_allocated;
}
return result;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index eb1d5f456c..2c7d93ecd1 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -64,9 +64,9 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
capacity, low_memory_mode, running_on_valgrind);
- if (rosalloc == NULL) {
+ if (rosalloc == nullptr) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
- return NULL;
+ return nullptr;
}
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
@@ -113,10 +113,10 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
size_t starting_size = Heap::kDefaultStartingSize;
MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
requested_begin);
- if (mem_map == NULL) {
+ if (mem_map == nullptr) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
- return NULL;
+ return nullptr;
}
RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
@@ -145,7 +145,7 @@ allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_
art::gc::allocator::RosAlloc::kPageReleaseModeAll :
art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
running_on_valgrind);
- if (rosalloc != NULL) {
+ if (rosalloc != nullptr) {
rosalloc->SetFootprintLimit(initial_size);
} else {
PLOG(ERROR) << "RosAlloc::Create failed";
@@ -170,7 +170,7 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
rosalloc_->SetFootprintLimit(footprint);
}
// Note RosAlloc zeroes memory internally.
- // Return the new allocation or NULL.
+ // Return the new allocation or null.
CHECK(!kDebugSpaces || result == nullptr || Contains(result));
return result;
}
@@ -192,7 +192,7 @@ MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& n
size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
if (kDebugSpaces) {
- CHECK(ptr != NULL);
+ CHECK(ptr != nullptr);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
if (kRecentFreeCount > 0) {
@@ -309,7 +309,7 @@ void RosAllocSpace::InspectAllRosAllocWithSuspendAll(
MutexLock mu2(self, *Locks::thread_list_lock_);
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
}
tl->ResumeAll();
@@ -324,7 +324,7 @@ void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end,
// from SignalCatcher::HandleSigQuit().
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
} else if (Locks::mutator_lock_->IsSharedHeld(self)) {
// The mutators are not suspended yet and we have a shared access
diff --git a/runtime/gc_map.h b/runtime/gc_map.h
index ffe54c4706..b4ccdd6d54 100644
--- a/runtime/gc_map.h
+++ b/runtime/gc_map.h
@@ -28,7 +28,7 @@ namespace art {
class NativePcOffsetToReferenceMap {
public:
explicit NativePcOffsetToReferenceMap(const uint8_t* data) : data_(data) {
- CHECK(data_ != NULL);
+ CHECK(data_ != nullptr);
}
// The number of entries in the table.
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 23af25d468..fb7ff549e9 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -538,7 +538,7 @@ class Hprof : public SingleRootVisitor {
// STRING format:
// ID: ID for this string
- // U1*: UTF8 characters for string (NOT NULL terminated)
+ // U1*: UTF8 characters for string (NOT null terminated)
// (the record format encodes the length)
__ AddU4(id);
__ AddUtf8String(string.c_str());
@@ -931,7 +931,7 @@ void Hprof::DumpHeapObject(mirror::Object* obj) {
mirror::Class* c = obj->GetClass();
if (c == nullptr) {
- // This object will bother HprofReader, because it has a NULL
+ // This object will bother HprofReader, because it has a null
// class, so just don't dump it. It could be
// gDvm.unlinkedJavaLangClass or it could be an object just
// allocated which hasn't been initialized yet.
@@ -1057,7 +1057,7 @@ void Hprof::DumpHeapArray(mirror::Array* obj, mirror::Class* klass) {
__ AddU4(length);
__ AddClassId(LookupClassId(klass));
- // Dump the elements, which are always objects or NULL.
+ // Dump the elements, which are always objects or null.
__ AddIdList(obj->AsObjectArray<mirror::Object>());
} else {
size_t size;
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e571a0ebc1..639be515f6 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -31,7 +31,7 @@ class Object;
// Returns "false" if something looks bad.
inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
if (UNLIKELY(iref == nullptr)) {
- LOG(WARNING) << "Attempt to look up NULL " << kind_;
+ LOG(WARNING) << "Attempt to look up nullptr " << kind_;
return false;
}
if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d6f9682053..e2b95599a6 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -103,9 +103,9 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
prevState.all = cookie;
size_t topIndex = segment_state_.parts.topIndex;
- CHECK(obj != NULL);
+ CHECK(obj != nullptr);
VerifyObject(obj);
- DCHECK(table_ != NULL);
+ DCHECK(table_ != nullptr);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
if (topIndex == max_entries_) {
@@ -144,7 +144,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
<< " holes=" << segment_state_.parts.numHoles;
}
- DCHECK(result != NULL);
+ DCHECK(result != nullptr);
return result;
}
@@ -172,7 +172,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
int topIndex = segment_state_.parts.topIndex;
int bottomIndex = prevState.parts.topIndex;
- DCHECK(table_ != NULL);
+ DCHECK(table_ != nullptr);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid &&
@@ -227,9 +227,8 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
}
}
} else {
- // Not the top-most entry. This creates a hole. We NULL out the
- // entry to prevent somebody from deleting it twice and screwing up
- // the hole count.
+ // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody
+ // from deleting it twice and screwing up the hole count.
if (table_[idx].GetReference()->IsNull()) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
@@ -270,9 +269,7 @@ void IndirectReferenceTable::Dump(std::ostream& os) const {
ReferenceTable::Table entries;
for (size_t i = 0; i < Capacity(); ++i) {
mirror::Object* obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
- if (UNLIKELY(obj == nullptr)) {
- // Remove NULLs.
- } else {
+ if (obj != nullptr) {
obj = table_[i].GetReference()->Read();
entries.push_back(GcRoot<mirror::Object>(obj));
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 0072184f62..a0e53af181 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -268,9 +268,9 @@ class IndirectReferenceTable {
bool IsValid() const;
/*
- * Add a new entry. "obj" must be a valid non-NULL object reference.
+ * Add a new entry. "obj" must be a valid non-nullptr object reference.
*
- * Returns NULL if the table is full (max entries reached, or alloc
+ * Returns nullptr if the table is full (max entries reached, or alloc
* failed during expansion).
*/
IndirectRef Add(uint32_t cookie, mirror::Object* obj)
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index fe1b8f00dc..c20002bdf9 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -216,7 +216,7 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
ASSERT_EQ(0U, irt.Capacity()) << "temporal del not empty";
CheckDump(&irt, 0, 0);
- // nullptr isn't a valid iref.
+ // null isn't a valid iref.
ASSERT_TRUE(irt.Get(nullptr) == nullptr);
// Stale lookup.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 51600f7ee8..e6c333d5cd 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -177,14 +177,14 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
- if (m == NULL) {
+ if (m == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
}
last_return_pc_ = 0;
return true; // Ignore upcalls.
}
- if (GetCurrentQuickFrame() == NULL) {
+ if (GetCurrentQuickFrame() == nullptr) {
bool interpreter_frame = true;
InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, 0, GetFrameId(),
interpreter_frame);
@@ -309,7 +309,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
struct RestoreStackVisitor : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
- : StackVisitor(thread_in, NULL), thread_(thread_in),
+ : StackVisitor(thread_in, nullptr), thread_(thread_in),
instrumentation_exit_pc_(instrumentation_exit_pc),
instrumentation_(instrumentation),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -320,14 +320,14 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
return false; // Stop.
}
mirror::ArtMethod* m = GetMethod();
- if (GetCurrentQuickFrame() == NULL) {
+ if (GetCurrentQuickFrame() == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId()
<< " Method=" << PrettyMethod(m);
}
return true; // Ignore shadow frames.
}
- if (m == NULL) {
+ if (m == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
}
@@ -645,7 +645,7 @@ void Instrumentation::ResetQuickAllocEntryPoints() {
Runtime* runtime = Runtime::Current();
if (runtime->IsStarted()) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
+ runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, nullptr);
}
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 1f1f9e8b96..a85d10fafb 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -194,7 +194,7 @@ mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
// GetResolvedString() contains a RB.
mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
- if (image_string != NULL) {
+ if (image_string != nullptr) {
return image_string;
}
}
@@ -236,11 +236,6 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
if (strong != nullptr) {
return strong;
}
- // Check the image for a match.
- mirror::String* image = LookupStringFromImage(s);
- if (image != nullptr) {
- return is_strong ? InsertStrong(image) : InsertWeak(image);
- }
// There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
if (weak != nullptr) {
@@ -251,6 +246,11 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
}
return weak;
}
+ // Check the image for a match.
+ mirror::String* image = LookupStringFromImage(s);
+ if (image != nullptr) {
+ return is_strong ? InsertStrong(image) : InsertWeak(image);
+ }
// No match in the strong table or the weak table. Insert into the strong / weak table.
return is_strong ? InsertStrong(s) : InsertWeak(s);
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 200a764520..1e5d3c22c9 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -150,7 +150,7 @@ class InternTable {
UnorderedSet post_zygote_table_;
};
- // Insert if non null, otherwise return nullptr.
+ // Insert if non null, otherwise return null.
mirror::String* Insert(mirror::String* s, bool is_strong)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index d462e1444f..194d0af6fe 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -38,8 +38,8 @@ TEST_F(InternTableTest, Intern) {
EXPECT_TRUE(foo_1->Equals("foo"));
EXPECT_TRUE(foo_2->Equals("foo"));
EXPECT_TRUE(foo_3->Equals("foo"));
- EXPECT_TRUE(foo_1.Get() != NULL);
- EXPECT_TRUE(foo_2.Get() != NULL);
+ EXPECT_TRUE(foo_1.Get() != nullptr);
+ EXPECT_TRUE(foo_2.Get() != nullptr);
EXPECT_EQ(foo_1.Get(), foo_2.Get());
EXPECT_NE(foo_1.Get(), bar.Get());
EXPECT_NE(foo_2.Get(), bar.Get());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 4801124fc6..423b9520c9 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -78,7 +78,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
@@ -99,12 +100,14 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
+ fntype* const fn =
+ reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
@@ -122,9 +125,11 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
@@ -133,9 +138,11 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[2])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
@@ -144,7 +151,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
@@ -153,9 +161,11 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[2])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
} else {
@@ -187,7 +197,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
@@ -302,7 +313,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
const DexFile::CodeItem* code_item = method->GetCodeItem();
uint16_t num_regs;
uint16_t num_ins;
- if (code_item != NULL) {
+ if (code_item != nullptr) {
num_regs = code_item->registers_size_;
num_ins = code_item->ins_size_;
} else if (method->IsAbstract()) {
@@ -325,7 +336,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
size_t cur_reg = num_regs - num_ins;
if (!method->IsStatic()) {
- CHECK(receiver != NULL);
+ CHECK(receiver != nullptr);
shadow_frame->SetVRegReference(cur_reg, receiver);
++cur_reg;
}
@@ -365,7 +376,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
}
if (LIKELY(!method->IsNative())) {
JValue r = Execute(self, code_item, *shadow_frame, JValue());
- if (result != NULL) {
+ if (result != nullptr) {
*result = r;
}
} else {
@@ -386,8 +397,9 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JValue value;
- value.SetJ(ret_val->GetJ()); // Set value to last known result in case the shadow frame chain is empty.
- while (shadow_frame != NULL) {
+ // Set value to last known result in case the shadow frame chain is empty.
+ value.SetJ(ret_val->GetJ());
+ while (shadow_frame != nullptr) {
self->SetTopOfShadowStack(shadow_frame);
const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
const uint32_t dex_pc = shadow_frame->GetDexPC();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3ae611bcbf..4765ebcf14 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -489,7 +489,7 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
const uint16_t num_ins = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
uint16_t num_regs;
- if (LIKELY(code_item != NULL)) {
+ if (LIKELY(code_item != nullptr)) {
num_regs = code_item->registers_size_;
DCHECK_EQ(num_ins, code_item->ins_size_);
} else {
@@ -543,11 +543,11 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
switch (shorty[shorty_pos + 1]) {
case 'L': {
Object* o = shadow_frame.GetVRegReference(src_reg);
- if (do_assignability_check && o != NULL) {
+ if (do_assignability_check && o != nullptr) {
Class* arg_type =
new_shadow_frame->GetMethod()->GetClassFromTypeIndex(
params->GetTypeItem(shorty_pos).type_idx_, true);
- if (arg_type == NULL) {
+ if (arg_type == nullptr) {
CHECK(self->IsExceptionPending());
return false;
}
@@ -651,7 +651,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(arrayClass == NULL)) {
+ if (UNLIKELY(arrayClass == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
@@ -671,7 +671,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
Object* newArray = Array::Alloc<true>(self, arrayClass, length,
arrayClass->GetComponentSizeShift(),
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(newArray == NULL)) {
+ if (UNLIKELY(newArray == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0e0d56a3ad..dbedc164f3 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -367,9 +367,9 @@ static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruc
uint32_t raw_value = shadow_frame.GetVReg(i);
Object* ref_value = shadow_frame.GetVRegReference(i);
oss << StringPrintf(" vreg%u=0x%08X", i, raw_value);
- if (ref_value != NULL) {
+ if (ref_value != nullptr) {
if (ref_value->GetClass()->IsStringClass() &&
- ref_value->AsString()->GetCharArray() != NULL) {
+ ref_value->AsString()->GetCharArray() != nullptr) {
oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
} else {
oss << "/" << PrettyTypeOf(ref_value);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index cead26c2ea..dc0b6870a0 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -55,7 +55,8 @@ namespace interpreter {
} while (false)
#define UPDATE_HANDLER_TABLE() \
- currentHandlersTable = handlersTable[Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
+ currentHandlersTable = handlersTable[ \
+ Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
#define BACKWARD_BRANCH_INSTRUMENTATION(offset) \
do { \
@@ -328,10 +329,10 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
self->AllowThreadSuspension();
const uint8_t vreg_index = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (do_assignability_check && obj_result != NULL) {
+ if (do_assignability_check && obj_result != nullptr) {
Class* return_type = shadow_frame.GetMethod()->GetReturnType();
obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (return_type == NULL) {
+ if (return_type == nullptr) {
// Return the pending exception.
HANDLE_PENDING_EXCEPTION();
}
@@ -364,7 +365,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = inst->VRegB_11n(inst_data);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(1);
}
@@ -375,7 +376,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = inst->VRegB_21s();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(2);
}
@@ -386,7 +387,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = inst->VRegB_31i();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(3);
}
@@ -397,7 +398,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(2);
}
@@ -426,7 +427,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CONST_STRING) {
String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -437,7 +438,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -449,7 +450,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CONST_CLASS) {
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -460,7 +461,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -472,7 +473,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -485,11 +486,11 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CHECK_CAST) {
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
HANDLE_PENDING_EXCEPTION();
} else {
@@ -502,11 +503,11 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(INSTANCE_OF) {
Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
ADVANCE(2);
}
}
@@ -514,7 +515,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
- if (UNLIKELY(array == NULL)) {
+ if (UNLIKELY(array == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -529,7 +530,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
Object* obj = AllocObjectFromCode<do_access_check, true>(
inst->VRegB_21c(), shadow_frame.GetMethod(), self,
runtime->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -551,7 +552,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
Object* obj = AllocArrayFromCode<do_access_check, true>(
inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -591,7 +592,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(THROW) {
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(exception == NULL)) {
+ if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
@@ -778,7 +779,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_NE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -795,7 +797,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_LT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -812,7 +815,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_GE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -829,7 +833,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_GT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -846,7 +851,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_LE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -966,7 +972,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -984,7 +990,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1002,7 +1008,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1020,7 +1026,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1038,7 +1044,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1056,7 +1062,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1074,7 +1080,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1092,7 +1098,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1111,7 +1117,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1130,7 +1136,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1149,7 +1155,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1168,7 +1174,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1187,7 +1193,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1206,7 +1212,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1224,43 +1230,50 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_BOOLEAN) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_BYTE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_CHAR) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_SHORT) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_WIDE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_OBJECT) {
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
@@ -1308,314 +1321,366 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_BOOLEAN) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_BYTE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_CHAR) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_SHORT) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_WIDE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_OBJECT) {
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BOOLEAN) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BYTE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_CHAR) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_SHORT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_WIDE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_OBJECT) {
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BOOLEAN_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BYTE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_CHAR_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_SHORT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_OBJECT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_BOOLEAN) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_BYTE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_CHAR) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_SHORT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_WIDE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_OBJECT) {
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL) {
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE) {
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_SUPER) {
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_SUPER_RANGE) {
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_DIRECT) {
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_DIRECT_RANGE) {
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_INTERFACE) {
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_INTERFACE_RANGE) {
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_STATIC) {
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_STATIC_RANGE) {
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_QUICK) {
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<false>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE_QUICK) {
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<true>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NOT_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NOT_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -1636,7 +1701,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(FLOAT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -1657,7 +1723,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(DOUBLE_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -2213,15 +2280,17 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(DIV_INT_LIT16) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ bool success = DoIntDivide(
+ shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(REM_INT_LIT16) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ bool success = DoIntRemainder(
+ shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index fe7ad770af..82f0009b85 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -68,7 +68,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
uint32_t dex_pc = shadow_frame.GetDexPC();
bool notified_method_entry_event = false;
- const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+ const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing.
if (kIsDebugBuild) {
self->AssertNoPendingException();
@@ -231,11 +231,11 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
self->AllowThreadSuspension();
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
- if (do_assignability_check && obj_result != NULL) {
+ if (do_assignability_check && obj_result != nullptr) {
Class* return_type = shadow_frame.GetMethod()->GetReturnType();
// Re-load since it might have moved.
obj_result = shadow_frame.GetVRegReference(ref_idx);
- if (return_type == NULL) {
+ if (return_type == nullptr) {
// Return the pending exception.
HANDLE_PENDING_EXCEPTION();
}
@@ -266,7 +266,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int4_t val = inst->VRegB_11n(inst_data);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_1xx();
break;
@@ -277,7 +277,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int16_t val = inst->VRegB_21s();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_2xx();
break;
@@ -288,7 +288,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int32_t val = inst->VRegB_31i();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_3xx();
break;
@@ -299,7 +299,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_2xx();
break;
@@ -328,7 +328,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::CONST_STRING: {
PREAMBLE();
String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -339,7 +339,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::CONST_STRING_JUMBO: {
PREAMBLE();
String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -351,7 +351,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -362,7 +362,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::MONITOR_ENTER: {
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -374,7 +374,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::MONITOR_EXIT: {
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -387,11 +387,11 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
HANDLE_PENDING_EXCEPTION();
} else {
@@ -404,11 +404,12 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
+ (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
inst = inst->Next_2xx();
}
break;
@@ -416,7 +417,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::ARRAY_LENGTH: {
PREAMBLE();
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
- if (UNLIKELY(array == NULL)) {
+ if (UNLIKELY(array == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -431,7 +432,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
Object* obj = AllocObjectFromCode<do_access_check, true>(
inst->VRegB_21c(), shadow_frame.GetMethod(), self,
runtime->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -454,7 +455,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
Object* obj = AllocArrayFromCode<do_access_check, true>(
inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -498,7 +499,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::THROW: {
PREAMBLE();
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(exception == NULL)) {
+ if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
@@ -651,7 +652,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_EQ: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) ==
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -664,7 +666,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_NE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -677,7 +680,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_LT: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -690,7 +694,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_GE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -703,7 +708,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_GT: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -716,7 +722,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_LE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -808,7 +815,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_BOOLEAN: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -826,7 +833,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_BYTE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -844,7 +851,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_CHAR: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -862,7 +869,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_SHORT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -880,7 +887,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -898,7 +905,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_WIDE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -916,7 +923,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_OBJECT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -934,7 +941,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_BOOLEAN: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -953,7 +960,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_BYTE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -972,7 +979,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_CHAR: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -991,7 +998,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_SHORT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1010,7 +1017,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1029,7 +1036,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_WIDE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1048,7 +1055,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_OBJECT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1066,43 +1073,50 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IGET_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_BYTE: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_CHAR: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_SHORT: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_WIDE: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_OBJECT: {
PREAMBLE();
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
@@ -1150,272 +1164,318 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::SGET_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_BYTE: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_CHAR: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_SHORT: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_WIDE: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_OBJECT: {
PREAMBLE();
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BYTE: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_CHAR: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_SHORT: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_WIDE: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_OBJECT: {
PREAMBLE();
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BOOLEAN_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BYTE_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_CHAR_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_SHORT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_WIDE_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_OBJECT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_BYTE: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_CHAR: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_SHORT: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_WIDE: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_OBJECT: {
PREAMBLE();
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::INVOKE_VIRTUAL: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_SUPER: {
PREAMBLE();
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_SUPER_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_DIRECT: {
PREAMBLE();
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_DIRECT_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_INTERFACE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_INTERFACE_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_STATIC: {
PREAMBLE();
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_STATIC_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<false>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<true>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::NEG_INT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NOT_INT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_LONG:
PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NOT_LONG:
PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_FLOAT:
PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_DOUBLE:
PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_LONG:
@@ -1500,20 +1560,20 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
break;
case Instruction::INT_TO_BYTE:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int8_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_CHAR:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<uint16_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_SHORT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int16_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::ADD_INT: {
@@ -2050,14 +2110,16 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::DIV_INT_LIT16: {
PREAMBLE();
bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::REM_INT_LIT16: {
PREAMBLE();
bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 61def350f9..f30c93a5af 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -915,7 +915,7 @@ static void UnstartedJNIArrayCreateObjectArray(Thread* self,
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
mirror::Class* array_class = class_linker->FindArrayClass(self, &element_class);
- if (UNLIKELY(array_class == NULL)) {
+ if (UNLIKELY(array_class == nullptr)) {
CHECK(self->IsExceptionPending());
return;
}
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 8dffee606c..55441c9f39 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -127,7 +127,7 @@ struct JdwpState {
* Among other things, this binds to a port to listen for a connection from
* the debugger.
*
- * Returns a newly-allocated JdwpState struct on success, or NULL on failure.
+ * Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
*/
static JdwpState* Create(const JdwpOptions* options)
LOCKS_EXCLUDED(Locks::mutator_lock_);
diff --git a/runtime/jdwp/jdwp_expand_buf.cc b/runtime/jdwp/jdwp_expand_buf.cc
index cc85cdd72a..e492d7eb26 100644
--- a/runtime/jdwp/jdwp_expand_buf.cc
+++ b/runtime/jdwp/jdwp_expand_buf.cc
@@ -156,7 +156,7 @@ static void SetUtf8String(uint8_t* buf, const char* str, size_t strLen) {
}
/*
- * Add a UTF8 string as a 4-byte length followed by a non-NULL-terminated
+ * Add a UTF8 string as a 4-byte length followed by a non-nullptr-terminated
* string.
*
* Because these strings are coming out of the VM, it's safe to assume that
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 2457f1452c..8e9ab32c88 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -133,7 +133,7 @@ static JdwpError RequestInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
if (is_constructor) {
// If we invoked a constructor (which actually returns void), return the receiver,
- // unless we threw, in which case we return NULL.
+ // unless we threw, in which case we return null.
resultTag = JT_OBJECT;
resultValue = (exceptObjId == 0) ? object_id : 0;
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 8a20e39bc3..da891fed77 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -86,10 +86,10 @@ class JitCodeCache {
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
- // Reserve a region of code of size at least "size". Returns nullptr if there is no more room.
+ // Reserve a region of code of size at least "size". Returns null if there is no more room.
uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
- // Add a data array of size (end - begin) with the associated contents, returns nullptr if there
+ // Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
LOCKS_EXCLUDED(lock_);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 8a5461baee..f5a3a6bcad 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -42,6 +42,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -349,7 +350,7 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::Object* obj_field = soa.Decode<mirror::Object*>(jlr_field);
if (obj_field->GetClass() != mirror::Field::StaticClass()) {
- // Not even a java.lang.reflect.Field, return nullptr.
+ // Not even a java.lang.reflect.Field, return null. TODO, is this check necessary?
return nullptr;
}
auto* field = static_cast<mirror::Field*>(obj_field);
@@ -361,19 +362,13 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::ArtMethod* m = soa.DecodeMethod(mid);
CHECK(!kMovingMethods);
- ScopedLocalRef<jobject> art_method(env, soa.AddLocalReference<jobject>(m));
- jobject reflect_method;
+ mirror::AbstractMethod* method;
if (m->IsConstructor()) {
- reflect_method = env->AllocObject(WellKnownClasses::java_lang_reflect_Constructor);
+ method = mirror::Constructor::CreateFromArtMethod(soa.Self(), m);
} else {
- reflect_method = env->AllocObject(WellKnownClasses::java_lang_reflect_Method);
+ method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
}
- if (env->ExceptionCheck()) {
- return nullptr;
- }
- SetObjectField(env, reflect_method,
- WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod, art_method.get());
- return reflect_method;
+ return soa.AddLocalReference<jobject>(method);
}
static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 5516eab4f8..77db404ca8 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -882,7 +882,7 @@ TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
}
static void BogusMethod() {
- // You can't pass nullptr function pointers to RegisterNatives.
+ // You can't pass null function pointers to RegisterNatives.
}
TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
@@ -1025,13 +1025,13 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
env_->set_region_fn(a, size - 1, size, nullptr); \
ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ /* It's okay for the buffer to be null as long as the length is 0. */ \
env_->get_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
env_->get_region_fn(a, 123, 0, nullptr); \
ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ /* It's okay for the buffer to be null as long as the length is 0. */ \
env_->set_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
env_->set_region_fn(a, 123, 0, nullptr); \
@@ -1200,7 +1200,7 @@ TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
}
TEST_F(JniInternalTest, GetArrayLength) {
- // Already tested in NewObjectArray/NewPrimitiveArray except for NULL.
+ // Already tested in NewObjectArray/NewPrimitiveArray except for null.
CheckJniAbortCatcher jni_abort_catcher;
bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(0, env_->GetArrayLength(nullptr));
@@ -1463,7 +1463,7 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
EXPECT_EQ('l', chars[2]);
EXPECT_EQ('x', chars[3]);
- // It's okay for the buffer to be nullptr as long as the length is 0.
+ // It's okay for the buffer to be null as long as the length is 0.
env_->GetStringRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
env_->GetStringRegion(s, 123, 0, nullptr);
@@ -1485,7 +1485,7 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
EXPECT_EQ('l', bytes[2]);
EXPECT_EQ('x', bytes[3]);
- // It's okay for the buffer to be nullptr as long as the length is 0.
+ // It's okay for the buffer to be null as long as the length is 0.
env_->GetStringUTFRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
env_->GetStringUTFRegion(s, 123, 0, nullptr);
@@ -1493,7 +1493,7 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
}
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
- // Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
+ // Passing in a null jstring is ignored normally, but caught by -Xcheck:jni.
bool old_check_jni = vm_->SetCheckJniEnabled(false);
{
CheckJniAbortCatcher check_jni_abort_catcher;
@@ -2102,7 +2102,7 @@ TEST_F(JniInternalTest, MonitorEnterExit) {
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, imse_class));
- // It's an error to call MonitorEnter or MonitorExit on nullptr.
+ // It's an error to call MonitorEnter or MonitorExit on null.
{
CheckJniAbortCatcher check_jni_abort_catcher;
env_->MonitorEnter(nullptr);
diff --git a/runtime/mapping_table.h b/runtime/mapping_table.h
index 79e6e94daa..dcd5f00217 100644
--- a/runtime/mapping_table.h
+++ b/runtime/mapping_table.h
@@ -106,7 +106,7 @@ class MappingTable {
const MappingTable* const table_; // The original table.
uint32_t element_; // A value in the range 0 to end_.
const uint32_t end_; // Equal to table_->DexToPcSize().
- const uint8_t* encoded_table_ptr_; // Either nullptr or points to encoded data after this entry.
+ const uint8_t* encoded_table_ptr_; // Either null or points to encoded data after this entry.
uint32_t native_pc_offset_; // The current value of native pc offset.
uint32_t dex_pc_; // The current value of dex pc.
};
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index edd2888168..959bb75c93 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -190,7 +190,7 @@ static bool CheckNonOverlapping(uintptr_t begin,
// the expected value, calling munmap if validation fails, giving the
// reason in error_msg.
//
-// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// If the expected_ptr is null, nothing is checked beyond the fact
// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
// non-null, we check that pointer is the actual_ptr == expected_ptr,
// and if not, report in error_msg what the conflict mapping was if
@@ -398,8 +398,8 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt
page_aligned_byte_count, prot, false);
}
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, int fd,
- off_t start, bool reuse, const char* filename,
+MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
+ int fd, off_t start, bool reuse, const char* filename,
std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
@@ -429,7 +429,8 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int p
size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
// The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
// not necessarily to virtual memory. mmap will page align 'expected' for us.
- uint8_t* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
+ uint8_t* page_aligned_expected =
+ (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
page_aligned_byte_count,
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 11b2569c30..dc6d93536d 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -53,24 +53,25 @@ static constexpr bool kMadviseZeroes = false;
class MemMap {
public:
// Request an anonymous region of length 'byte_count' and a requested base address.
- // Use NULL as the requested base address if you don't care.
+ // Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
//
// The word "anonymous" in this context means "not backed by a file". The supplied
// 'ashmem_name' will be used -- on systems that support it -- to give the mapping
// a name.
//
- // On success, returns returns a MemMap instance. On failure, returns a NULL;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
bool low_4gb, bool reuse, std::string* error_msg);
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative.
//
- // On success, returns returns a MemMap instance. On failure, returns a NULL;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
const char* filename, std::string* error_msg) {
- return MapFileAtAddress(NULL, byte_count, prot, flags, fd, start, false, filename, error_msg);
+ return MapFileAtAddress(
+ nullptr, byte_count, prot, flags, fd, start, false, filename, error_msg);
}
// Map part of a file, taking care of non-page aligned offsets. The
@@ -79,13 +80,12 @@ class MemMap {
// mapping. "reuse" allows us to create a view into an existing
// mapping where we do not take ownership of the memory.
//
- // On success, returns returns a MemMap instance. On failure, returns a
- // nullptr;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg);
- // Releases the memory mapping
+ // Releases the memory mapping.
~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
const std::string& GetName() const {
diff --git a/runtime/memory_region.cc b/runtime/memory_region.cc
index 06eba0ff1c..a5c70c3b26 100644
--- a/runtime/memory_region.cc
+++ b/runtime/memory_region.cc
@@ -25,7 +25,7 @@
namespace art {
void MemoryRegion::CopyFrom(size_t offset, const MemoryRegion& from) const {
- CHECK(from.pointer() != NULL);
+ CHECK(from.pointer() != nullptr);
CHECK_GT(from.size(), 0U);
CHECK_GE(this->size(), from.size());
CHECK_LE(offset, this->size() - from.size());
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
new file mode 100644
index 0000000000..81c656b6fa
--- /dev/null
+++ b/runtime/mirror/abstract_method.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "abstract_method.h"
+
+#include "mirror/art_method-inl.h"
+
+namespace art {
+namespace mirror {
+
+bool AbstractMethod::CreateFromArtMethod(mirror::ArtMethod* method) {
+ auto* interface_method = method->GetInterfaceMethodIfProxy();
+ SetFieldObject<false>(ArtMethodOffset(), method);
+ SetFieldObject<false>(DeclaringClassOffset(), method->GetDeclaringClass());
+ SetFieldObject<false>(
+ DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass());
+ SetField32<false>(AccessFlagsOffset(), method->GetAccessFlags());
+ SetField32<false>(DexMethodIndexOffset(), method->GetDexMethodIndex());
+ return true;
+}
+
+mirror::ArtMethod* AbstractMethod::GetArtMethod() {
+ return GetFieldObject<mirror::ArtMethod>(ArtMethodOffset());
+}
+
+mirror::Class* AbstractMethod::GetDeclaringClass() {
+ return GetFieldObject<mirror::Class>(DeclaringClassOffset());
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
new file mode 100644
index 0000000000..ef51d7f4ae
--- /dev/null
+++ b/runtime/mirror/abstract_method.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
+#define ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
+
+#include "accessible_object.h"
+#include "gc_root.h"
+#include "object.h"
+#include "object_callbacks.h"
+#include "read_barrier_option.h"
+
+namespace art {
+
+struct AbstractMethodOffsets;
+
+namespace mirror {
+
+class ArtMethod;
+
+// C++ mirror of java.lang.reflect.AbstractMethod.
+class MANAGED AbstractMethod : public AccessibleObject {
+ public:
+ // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
+ bool CreateFromArtMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ static MemberOffset ArtMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, art_method_));
+ }
+ static MemberOffset DeclaringClassOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_));
+ }
+ static MemberOffset DeclaringClassOfOverriddenMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_of_overridden_method_));
+ }
+ static MemberOffset AccessFlagsOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, access_flags_));
+ }
+ static MemberOffset DexMethodIndexOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, dex_method_index_));
+ }
+
+ HeapReference<mirror::ArtMethod> art_method_;
+ HeapReference<mirror::Class> declaring_class_;
+ HeapReference<mirror::Class> declaring_class_of_overridden_method_;
+ uint32_t access_flags_;
+ uint32_t dex_method_index_;
+
+ friend struct art::AbstractMethodOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index a300d5237e..0f306e8699 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -36,7 +36,7 @@ namespace art {
namespace mirror {
inline uint32_t ArtMethod::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength + 7;
+ uint32_t vtable_entries = Object::kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
}
@@ -48,7 +48,7 @@ inline Class* ArtMethod::GetJavaLangReflectArtMethod() {
inline Class* ArtMethod::GetDeclaringClass() {
Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_));
- DCHECK(result != NULL) << this;
+ DCHECK(result != nullptr) << this;
DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this;
return result;
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 92aea1f3cc..543cf9bae3 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -16,6 +16,7 @@
#include "art_method.h"
+#include "abstract_method.h"
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -53,14 +54,11 @@ GcRoot<Class> ArtMethod::java_lang_reflect_ArtMethod_;
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
- ArtField* f =
- soa.DecodeField(WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
- mirror::ArtMethod* method = f->GetObject(soa.Decode<mirror::Object*>(jlr_method))->AsArtMethod();
- DCHECK(method != nullptr);
- return method;
+ auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(jlr_method);
+ DCHECK(abstract_method != nullptr);
+ return abstract_method->GetArtMethod();
}
-
void ArtMethod::VisitRoots(RootVisitor* visitor) {
java_lang_reflect_ArtMethod_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
@@ -364,7 +362,7 @@ const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods.
if (class_linker->IsQuickToInterpreterBridge(code) ||
@@ -505,7 +503,7 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
ClassLinker* class_linker = runtime->GetClassLinker();
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods. And we really shouldn't see a failure for non-native methods here.
DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
@@ -547,5 +545,31 @@ void ArtMethod::UnregisterNative() {
RegisterNative(GetJniDlsymLookupStub(), false);
}
+bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) {
+ auto* dex_cache = GetDexCache();
+ auto* dex_file = dex_cache->GetDexFile();
+ const auto& method_id = dex_file->GetMethodId(GetDexMethodIndex());
+ const auto& proto_id = dex_file->GetMethodPrototype(method_id);
+ const DexFile::TypeList* proto_params = dex_file->GetProtoParameters(proto_id);
+ auto count = proto_params != nullptr ? proto_params->Size() : 0u;
+ auto param_len = params.Get() != nullptr ? params->GetLength() : 0u;
+ if (param_len != count) {
+ return false;
+ }
+ auto* cl = Runtime::Current()->GetClassLinker();
+ for (size_t i = 0; i < count; ++i) {
+ auto type_idx = proto_params->GetTypeItem(i).type_idx_;
+ auto* type = cl->ResolveType(type_idx, this);
+ if (type == nullptr) {
+ Thread::Current()->AssertPendingException();
+ return false;
+ }
+ if (type != params->GetWithoutChecks(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 55b8068d99..0da5925b6c 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -341,10 +341,10 @@ class MANAGED ArtMethod FINAL : public Object {
return reinterpret_cast<const void*>(code);
}
- // Actual entry point pointer to compiled oat code or nullptr.
+ // Actual entry point pointer to compiled oat code or null.
const void* GetQuickOatEntryPoint(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Actual pointer to compiled oat code or nullptr.
+ // Actual pointer to compiled oat code or null.
const void* GetQuickOatCodePointer(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
@@ -541,6 +541,10 @@ class MANAGED ArtMethod FINAL : public Object {
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // May cause thread suspension due to class resolution.
+ bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static size_t SizeWithoutPointerFields(size_t pointer_size) {
size_t total = sizeof(ArtMethod) - sizeof(PtrSizedFields);
#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index aaa66f9579..712286f4e5 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -66,7 +66,7 @@ inline ObjectArray<ArtMethod>* Class::GetDirectMethods() {
inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(NULL == GetFieldObject<ObjectArray<ArtMethod>>(
+ DCHECK(nullptr == GetFieldObject<ObjectArray<ArtMethod>>(
OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
DCHECK_NE(0, new_direct_methods->GetLength());
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), new_direct_methods);
@@ -85,7 +85,7 @@ inline void Class::SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
// Returns the number of static, private, and constructor methods.
inline uint32_t Class::NumDirectMethods() {
- return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0;
+ return (GetDirectMethods() != nullptr) ? GetDirectMethods()->GetLength() : 0;
}
template<VerifyObjectFlags kVerifyFlags>
@@ -102,7 +102,7 @@ inline void Class::SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods
}
inline uint32_t Class::NumVirtualMethods() {
- return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0;
+ return (GetVirtualMethods() != nullptr) ? GetVirtualMethods()->GetLength() : 0;
}
template<VerifyObjectFlags kVerifyFlags>
@@ -186,7 +186,7 @@ inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) {
}
inline bool Class::Implements(Class* klass) {
- DCHECK(klass != NULL);
+ DCHECK(klass != nullptr);
DCHECK(klass->IsInterface()) << PrettyClass(this);
// All interfaces implemented directly and by our superclass, and
// recursively all super-interfaces of those interfaces, are listed
@@ -233,8 +233,8 @@ inline bool Class::IsAssignableFromArray(Class* src) {
// If "this" is not also an array, it must be Object.
// src's super should be java_lang_Object, since it is an array.
Class* java_lang_Object = src->GetSuperClass();
- DCHECK(java_lang_Object != NULL) << PrettyClass(src);
- DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src);
+ DCHECK(java_lang_Object != nullptr) << PrettyClass(src);
+ DCHECK(java_lang_Object->GetSuperClass() == nullptr) << PrettyClass(src);
return this == java_lang_Object;
}
return IsArrayAssignableFromArray(src);
@@ -335,13 +335,13 @@ inline bool Class::IsSubClass(Class* klass) {
return true;
}
current = current->GetSuperClass();
- } while (current != NULL);
+ } while (current != nullptr);
return false;
}
inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
Class* declaring_class = method->GetDeclaringClass();
- DCHECK(declaring_class != NULL) << PrettyClass(this);
+ DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
// TODO cache to improve lookup speed
int32_t iftable_count = GetIfTableCount();
@@ -351,7 +351,7 @@ inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
return iftable->GetMethodArray(i)->Get(method->GetMethodIndex());
}
}
- return NULL;
+ return nullptr;
}
inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) {
@@ -382,7 +382,7 @@ inline IfTable* Class::GetIfTable() {
inline int32_t Class::GetIfTableCount() {
IfTable* iftable = GetIfTable();
- if (iftable == NULL) {
+ if (iftable == nullptr) {
return 0;
}
return iftable->Count();
@@ -484,7 +484,7 @@ inline void Class::SetClinitThreadId(pid_t new_clinit_thread_id) {
}
inline void Class::SetVerifyErrorClass(Class* klass) {
- CHECK(klass != NULL) << PrettyClass(this);
+ CHECK(klass != nullptr) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass);
} else {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 2afb4af0e3..1739019755 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -25,6 +25,7 @@
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "handle_scope-inl.h"
+#include "method.h"
#include "object_array-inl.h"
#include "object-inl.h"
#include "runtime.h"
@@ -876,5 +877,26 @@ bool Class::ProxyDescriptorEquals(const char* match) {
return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match;
}
+mirror::ArtMethod* Class::GetDeclaredConstructor(
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) {
+ auto* direct_methods = GetDirectMethods();
+ size_t count = direct_methods != nullptr ? direct_methods->GetLength() : 0u;
+ for (size_t i = 0; i < count; ++i) {
+ auto* m = direct_methods->GetWithoutChecks(i);
+ // Skip <clinit> which is a static constructor, as well as non constructors.
+ if (m->IsStatic() || !m->IsConstructor()) {
+ continue;
+ }
+ // May cause thread suspension and exceptions.
+ if (m->EqualParameters(args)) {
+ return m;
+ }
+ if (self->IsExceptionPending()) {
+ return nullptr;
+ }
+ }
+ return nullptr;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 20f23877e6..18496fdce3 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -47,6 +47,7 @@ namespace mirror {
class ArtMethod;
class ClassLoader;
+class Constructor;
class DexCache;
class IfTable;
@@ -399,7 +400,7 @@ class MANAGED Class FINAL : public Object {
// Depth of class from java.lang.Object
uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t depth = 0;
- for (Class* klass = this; klass->GetSuperClass() != NULL; klass = klass->GetSuperClass()) {
+ for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
depth++;
}
return depth;
@@ -408,7 +409,7 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetComponentType<kVerifyFlags, kReadBarrierOption>() != NULL;
+ return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -436,8 +437,8 @@ class MANAGED Class FINAL : public Object {
}
void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(GetComponentType() == NULL);
- DCHECK(new_component_type != NULL);
+ DCHECK(GetComponentType() == nullptr);
+ DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type);
}
@@ -453,7 +454,7 @@ class MANAGED Class FINAL : public Object {
}
bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return !IsPrimitive() && GetSuperClass() == NULL;
+ return !IsPrimitive() && GetSuperClass() == nullptr;
}
bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -610,7 +611,7 @@ class MANAGED Class FINAL : public Object {
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(src != NULL);
+ DCHECK(src != nullptr);
if (this == src) {
// Can always assign to things of the same type.
return true;
@@ -637,7 +638,7 @@ class MANAGED Class FINAL : public Object {
}
bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetSuperClass() != NULL;
+ return GetSuperClass() != nullptr;
}
static MemberOffset SuperClassOffset() {
@@ -1052,6 +1053,11 @@ class MANAGED Class FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
}
+ // May cause thread suspension due to EqualParameters.
+ mirror::ArtMethod* GetDeclaredConstructor(
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
// fence.
class InitializeClassVisitor {
@@ -1097,14 +1103,14 @@ class MANAGED Class FINAL : public Object {
bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // defining class loader, or NULL for the "bootstrap" system loader
+ // Defining class loader, or null for the "bootstrap" system loader.
HeapReference<ClassLoader> class_loader_;
// For array classes, the component class object for instanceof/checkcast
- // (for String[][][], this will be String[][]). NULL for non-array classes.
+ // (for String[][][], this will be String[][]). null for non-array classes.
HeapReference<Class> component_type_;
- // DexCache of resolved constant pool entries (will be NULL for classes generated by the
+ // DexCache of resolved constant pool entries (will be null for classes generated by the
// runtime such as arrays and primitive classes).
HeapReference<DexCache> dex_cache_;
@@ -1130,7 +1136,7 @@ class MANAGED Class FINAL : public Object {
// Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
HeapReference<String> name_;
- // The superclass, or NULL if this is java.lang.Object, an interface or primitive type.
+ // The superclass, or null if this is java.lang.Object, an interface or primitive type.
HeapReference<Class> super_class_;
// If class verify fails, we must return same error on subsequent tries.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 1d6846b6c4..228fce5314 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,10 +34,10 @@ class DexCacheTest : public CommonRuntimeTest {};
TEST_F(DexCacheTest, Open) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
- ASSERT_TRUE(java_lang_dex_file_ != NULL);
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
Handle<DexCache> dex_cache(
hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
- ASSERT_TRUE(dex_cache.Get() != NULL);
+ ASSERT_TRUE(dex_cache.Get() != nullptr);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 9988f84a72..d927f0c258 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -82,15 +82,12 @@ class MANAGED Field : public AccessibleObject {
}
static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index d1309d294f..b465d070a6 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -23,7 +23,7 @@ namespace art {
namespace mirror {
inline void IfTable::SetInterface(int32_t i, Class* interface) {
- DCHECK(interface != NULL);
+ DCHECK(interface != nullptr);
DCHECK(interface->IsInterface());
const size_t idx = i * kMax + kInterface;
DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 4d899d2bf9..1c1c7b30eb 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -27,7 +27,7 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
public:
ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
- DCHECK(interface != NULL);
+ DCHECK(interface != nullptr);
return interface;
}
@@ -37,14 +37,14 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* method_array =
down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
- DCHECK(method_array != NULL);
+ DCHECK(method_array != nullptr);
return method_array;
}
size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* method_array =
down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
- if (method_array == NULL) {
+ if (method_array == nullptr) {
return 0;
}
return method_array->GetLength();
@@ -52,8 +52,8 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
void SetMethodArray(int32_t i, ObjectArray<ArtMethod>* new_ma)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(new_ma != NULL);
- DCHECK(Get((i * kMax) + kMethodArray) == NULL);
+ DCHECK(new_ma != nullptr);
+ DCHECK(Get((i * kMax) + kMethodArray) == nullptr);
Set<false>((i * kMax) + kMethodArray, new_ma);
}
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
new file mode 100644
index 0000000000..81530bb130
--- /dev/null
+++ b/runtime/mirror/method.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "method.h"
+
+#include "mirror/art_method.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+namespace mirror {
+
+GcRoot<Class> Method::static_class_;
+GcRoot<Class> Method::array_class_;
+GcRoot<Class> Constructor::static_class_;
+GcRoot<Class> Constructor::array_class_;
+
+void Method::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void Method::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void Method::SetArrayClass(Class* klass) {
+ CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ array_class_ = GcRoot<Class>(klass);
+}
+
+void Method::ResetArrayClass() {
+ CHECK(!array_class_.IsNull());
+ array_class_ = GcRoot<Class>(nullptr);
+}
+
+Method* Method::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) {
+ DCHECK(!method->IsConstructor()) << PrettyMethod(method);
+ auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
+ if (LIKELY(ret != nullptr)) {
+ static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+ }
+ return ret;
+}
+
+void Method::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+ array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+void Constructor::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void Constructor::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void Constructor::SetArrayClass(Class* klass) {
+ CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ array_class_ = GcRoot<Class>(klass);
+}
+
+void Constructor::ResetArrayClass() {
+ CHECK(!array_class_.IsNull());
+ array_class_ = GcRoot<Class>(nullptr);
+}
+
+void Constructor::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+ array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+Constructor* Constructor::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) {
+ DCHECK(method->IsConstructor()) << PrettyMethod(method);
+ auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
+ if (LIKELY(ret != nullptr)) {
+ static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+ }
+ return ret;
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
new file mode 100644
index 0000000000..88100f08e2
--- /dev/null
+++ b/runtime/mirror/method.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_METHOD_H_
+#define ART_RUNTIME_MIRROR_METHOD_H_
+
+#include "abstract_method.h"
+#include "gc_root.h"
+
+namespace art {
+namespace mirror {
+
+class Class;
+
+// C++ mirror of java.lang.reflect.Method.
+class MANAGED Method : public AbstractMethod {
+ public:
+ static Method* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return static_class_.Read();
+ }
+
+ static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return array_class_.Read();
+ }
+
+ static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ static GcRoot<Class> static_class_; // java.lang.reflect.Method.class.
+ static GcRoot<Class> array_class_; // [java.lang.reflect.Method.class.
+
+ DISALLOW_COPY_AND_ASSIGN(Method);
+};
+
+// C++ mirror of java.lang.reflect.Constructor.
+class MANAGED Constructor: public AbstractMethod {
+ public:
+ static Constructor* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return static_class_.Read();
+ }
+
+ static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return array_class_.Read();
+ }
+
+ static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class.
+ static GcRoot<Class> array_class_; // [java.lang.reflect.Constructor.class.
+
+ DISALLOW_COPY_AND_ASSIGN(Constructor);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_METHOD_H_
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index af0e85603b..2581fad740 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -48,7 +48,7 @@ inline Class* Object::GetClass() {
template<VerifyObjectFlags kVerifyFlags>
inline void Object::SetClass(Class* new_klass) {
- // new_klass may be NULL prior to class linker initialization.
+ // new_klass may be null prior to class linker initialization.
// We don't mark the card as this occurs as part of object allocation. Not all objects have
// backing cards, such as large objects.
// We use non transactional version since we can't undo this write. We also disable checking as
@@ -179,15 +179,15 @@ inline void Object::AssertReadBarrierPointer() const {
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::VerifierInstanceOf(Class* klass) {
- DCHECK(klass != NULL);
- DCHECK(GetClass<kVerifyFlags>() != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(GetClass<kVerifyFlags>() != nullptr);
return klass->IsInterface() || InstanceOf(klass);
}
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::InstanceOf(Class* klass) {
- DCHECK(klass != NULL);
- DCHECK(GetClass<kVerifyNone>() != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(GetClass<kVerifyNone>() != nullptr);
return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 04d0cd874a..5dac985299 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -203,7 +203,7 @@ void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_val
!runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
return;
}
- for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
+ for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
ArtField* fields = cur->GetIFields();
for (size_t i = 0, count = cur->NumInstanceFields(); i < count; ++i) {
StackHandleScope<1> hs(Thread::Current());
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 30bc1cd66e..d473816448 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -57,14 +57,14 @@ template<class T>
inline T* ObjectArray<T>::Get(int32_t i) {
if (!CheckIsValidIndex(i)) {
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
return GetFieldObject<T>(OffsetOfElement(i));
}
template<class T> template<VerifyObjectFlags kVerifyFlags>
inline bool ObjectArray<T>::CheckAssignable(T* object) {
- if (object != NULL) {
+ if (object != nullptr) {
Class* element_class = GetClass<kVerifyFlags>()->GetComponentType();
if (UNLIKELY(!object->InstanceOf(element_class))) {
ThrowArrayStoreException(object);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 747a00836a..2262af5a9c 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -61,11 +61,12 @@ class ObjectTest : public CommonRuntimeTest {
Handle<String> string(
hs.NewHandle(String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in)));
ASSERT_EQ(expected_utf16_length, string->GetLength());
- ASSERT_TRUE(string->GetCharArray() != NULL);
- ASSERT_TRUE(string->GetCharArray()->GetData() != NULL);
+ ASSERT_TRUE(string->GetCharArray() != nullptr);
+ ASSERT_TRUE(string->GetCharArray()->GetData() != nullptr);
// strlen is necessary because the 1-character string "\x00\x00" is interpreted as ""
ASSERT_TRUE(string->Equals(utf8_in) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
- ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
+ ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) ||
+ (expected_utf16_length == 1 && strlen(utf8_in) == 0));
for (int32_t i = 0; i < expected_utf16_length; i++) {
EXPECT_EQ(utf16_expected[i], string->UncheckedCharAt(i));
}
@@ -110,11 +111,11 @@ TEST_F(ObjectTest, AllocObjectArray) {
Handle<ObjectArray<Object>> oa(
hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 2)));
EXPECT_EQ(2, oa->GetLength());
- EXPECT_TRUE(oa->Get(0) == NULL);
- EXPECT_TRUE(oa->Get(1) == NULL);
+ EXPECT_TRUE(oa->Get(0) == nullptr);
+ EXPECT_TRUE(oa->Get(1) == nullptr);
oa->Set<false>(0, oa.Get());
EXPECT_TRUE(oa->Get(0) == oa.Get());
- EXPECT_TRUE(oa->Get(1) == NULL);
+ EXPECT_TRUE(oa->Get(1) == nullptr);
oa->Set<false>(1, oa.Get());
EXPECT_TRUE(oa->Get(0) == oa.Get());
EXPECT_TRUE(oa->Get(1) == oa.Get());
@@ -122,17 +123,17 @@ TEST_F(ObjectTest, AllocObjectArray) {
Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/ArrayIndexOutOfBoundsException;");
- EXPECT_TRUE(oa->Get(-1) == NULL);
+ EXPECT_TRUE(oa->Get(-1) == nullptr);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
- EXPECT_TRUE(oa->Get(2) == NULL);
+ EXPECT_TRUE(oa->Get(2) == nullptr);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
- ASSERT_TRUE(oa->GetClass() != NULL);
+ ASSERT_TRUE(oa->GetClass() != nullptr);
Handle<mirror::Class> klass(hs.NewHandle(oa->GetClass()));
ASSERT_EQ(2U, klass->NumDirectInterfaces());
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
@@ -308,13 +309,14 @@ TEST_F(ObjectTest, CheckAndAllocArrayFromCode) {
Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V");
const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I");
- ASSERT_TRUE(string_id != NULL);
+ ASSERT_TRUE(string_id != nullptr);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(
java_lang_dex_file_->GetIndexForStringId(*string_id));
- ASSERT_TRUE(type_id != NULL);
+ ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
- Object* array = CheckAndAllocArrayFromCodeInstrumented(type_idx, 3, sort, Thread::Current(), false,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ Object* array = CheckAndAllocArrayFromCodeInstrumented(
+ type_idx, 3, sort, Thread::Current(), false,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
EXPECT_TRUE(array->IsArrayInstance());
EXPECT_EQ(3, array->AsArray()->GetLength());
EXPECT_TRUE(array->GetClass()->IsArrayClass());
@@ -367,36 +369,36 @@ TEST_F(ObjectTest, StaticFieldFromCode) {
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
ArtMethod* clinit = klass->FindClassInitializer();
const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
- ASSERT_TRUE(klass_string_id != NULL);
+ ASSERT_TRUE(klass_string_id != nullptr);
const DexFile::TypeId* klass_type_id = dex_file->FindTypeId(
dex_file->GetIndexForStringId(*klass_string_id));
- ASSERT_TRUE(klass_type_id != NULL);
+ ASSERT_TRUE(klass_type_id != nullptr);
const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;");
- ASSERT_TRUE(type_string_id != NULL);
+ ASSERT_TRUE(type_string_id != nullptr);
const DexFile::TypeId* type_type_id = dex_file->FindTypeId(
dex_file->GetIndexForStringId(*type_string_id));
- ASSERT_TRUE(type_type_id != NULL);
+ ASSERT_TRUE(type_type_id != nullptr);
const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
- ASSERT_TRUE(name_str_id != NULL);
+ ASSERT_TRUE(name_str_id != nullptr);
const DexFile::FieldId* field_id = dex_file->FindFieldId(
*klass_type_id, *name_str_id, *type_type_id);
- ASSERT_TRUE(field_id != NULL);
+ ASSERT_TRUE(field_id != nullptr);
uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
ArtField* field = FindFieldFromCode<StaticObjectRead, true>(field_idx, clinit, Thread::Current(),
sizeof(HeapReference<Object>));
Object* s0 = field->GetObj(klass);
- EXPECT_TRUE(s0 != NULL);
+ EXPECT_TRUE(s0 != nullptr);
Handle<CharArray> char_array(hs.NewHandle(CharArray::Alloc(soa.Self(), 0)));
field->SetObj<false>(field->GetDeclaringClass(), char_array.Get());
EXPECT_EQ(char_array.Get(), field->GetObj(klass));
- field->SetObj<false>(field->GetDeclaringClass(), NULL);
- EXPECT_EQ(NULL, field->GetObj(klass));
+ field->SetObj<false>(field->GetDeclaringClass(), nullptr);
+ EXPECT_EQ(nullptr, field->GetObj(klass));
// TODO: more exhaustive tests of all 6 cases of ArtField::*FromCode
}
@@ -416,13 +418,15 @@ TEST_F(ObjectTest, String) {
AssertString(1, "\xc2\x80", "\x00\x80", 0x80);
AssertString(1, "\xd9\xa6", "\x06\x66", 0x0666);
AssertString(1, "\xdf\xbf", "\x07\xff", 0x07ff);
- AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69", (31 * ((31 * 0x68) + 0x0666)) + 0x69);
+ AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69",
+ (31 * ((31 * 0x68) + 0x0666)) + 0x69);
// Test three-byte characters.
AssertString(1, "\xe0\xa0\x80", "\x08\x00", 0x0800);
AssertString(1, "\xe1\x88\xb4", "\x12\x34", 0x1234);
AssertString(1, "\xef\xbf\xbf", "\xff\xff", 0xffff);
- AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69", (31 * ((31 * 0x68) + 0x1234)) + 0x69);
+ AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69",
+ (31 * ((31 * 0x68) + 0x1234)) + 0x69);
// Test four-byte characters.
AssertString(2, "\xf0\x9f\x8f\xa0", "\xd8\x3c\xdf\xe0", (31 * 0xd83c) + 0xdfe0);
@@ -507,9 +511,9 @@ TEST_F(ObjectTest, DescriptorCompare) {
Handle<ClassLoader> class_loader_2(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_2)));
Class* klass1 = linker->FindClass(soa.Self(), "LProtoCompare;", class_loader_1);
- ASSERT_TRUE(klass1 != NULL);
+ ASSERT_TRUE(klass1 != nullptr);
Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2);
- ASSERT_TRUE(klass2 != NULL);
+ ASSERT_TRUE(klass2 != nullptr);
ArtMethod* m1_1 = klass1->GetVirtualMethod(0);
EXPECT_STREQ(m1_1->GetName(), "m1");
@@ -550,13 +554,13 @@ TEST_F(ObjectTest, InstanceOf) {
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
- ASSERT_TRUE(X != NULL);
- ASSERT_TRUE(Y != NULL);
+ ASSERT_TRUE(X != nullptr);
+ ASSERT_TRUE(Y != nullptr);
Handle<Object> x(hs.NewHandle(X->AllocObject(soa.Self())));
Handle<Object> y(hs.NewHandle(Y->AllocObject(soa.Self())));
- ASSERT_TRUE(x.Get() != NULL);
- ASSERT_TRUE(y.Get() != NULL);
+ ASSERT_TRUE(x.Get() != nullptr);
+ ASSERT_TRUE(y.Get() != nullptr);
EXPECT_TRUE(x->InstanceOf(X));
EXPECT_FALSE(x->InstanceOf(Y));
@@ -571,8 +575,10 @@ TEST_F(ObjectTest, InstanceOf) {
// All array classes implement Cloneable and Serializable.
Object* array = ObjectArray<Object>::Alloc(soa.Self(), Object_array_class, 1);
- Class* java_lang_Cloneable = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
- Class* java_io_Serializable = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
+ Class* java_lang_Cloneable =
+ class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
+ Class* java_io_Serializable =
+ class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
EXPECT_TRUE(array->InstanceOf(java_lang_Cloneable));
EXPECT_TRUE(array->InstanceOf(java_io_Serializable));
}
@@ -622,35 +628,35 @@ TEST_F(ObjectTest, IsAssignableFromArray) {
Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
- ASSERT_TRUE(X != NULL);
- ASSERT_TRUE(Y != NULL);
+ ASSERT_TRUE(X != nullptr);
+ ASSERT_TRUE(Y != nullptr);
Class* YA = class_linker_->FindClass(soa.Self(), "[LY;", class_loader);
Class* YAA = class_linker_->FindClass(soa.Self(), "[[LY;", class_loader);
- ASSERT_TRUE(YA != NULL);
- ASSERT_TRUE(YAA != NULL);
+ ASSERT_TRUE(YA != nullptr);
+ ASSERT_TRUE(YAA != nullptr);
Class* XAA = class_linker_->FindClass(soa.Self(), "[[LX;", class_loader);
- ASSERT_TRUE(XAA != NULL);
+ ASSERT_TRUE(XAA != nullptr);
Class* O = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
Class* OA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
Class* OAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
Class* OAAA = class_linker_->FindSystemClass(soa.Self(), "[[[Ljava/lang/Object;");
- ASSERT_TRUE(O != NULL);
- ASSERT_TRUE(OA != NULL);
- ASSERT_TRUE(OAA != NULL);
- ASSERT_TRUE(OAAA != NULL);
+ ASSERT_TRUE(O != nullptr);
+ ASSERT_TRUE(OA != nullptr);
+ ASSERT_TRUE(OAA != nullptr);
+ ASSERT_TRUE(OAAA != nullptr);
Class* S = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
Class* SA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/io/Serializable;");
Class* SAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/io/Serializable;");
- ASSERT_TRUE(S != NULL);
- ASSERT_TRUE(SA != NULL);
- ASSERT_TRUE(SAA != NULL);
+ ASSERT_TRUE(S != nullptr);
+ ASSERT_TRUE(SA != nullptr);
+ ASSERT_TRUE(SAA != nullptr);
Class* IA = class_linker_->FindSystemClass(soa.Self(), "[I");
- ASSERT_TRUE(IA != NULL);
+ ASSERT_TRUE(IA != nullptr);
EXPECT_TRUE(YAA->IsAssignableFrom(YAA)); // identity
EXPECT_TRUE(XAA->IsAssignableFrom(YAA)); // element superclass
@@ -673,60 +679,62 @@ TEST_F(ObjectTest, FindInstanceField) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
- ASSERT_TRUE(s.Get() != NULL);
+ ASSERT_TRUE(s.Get() != nullptr);
Class* c = s->GetClass();
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
// Wrong type.
- EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == NULL);
- EXPECT_TRUE(c->FindInstanceField("count", "J") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == nullptr);
+ EXPECT_TRUE(c->FindInstanceField("count", "J") == nullptr);
// Wrong name.
- EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == NULL);
- EXPECT_TRUE(c->FindInstanceField("Count", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == nullptr);
+ EXPECT_TRUE(c->FindInstanceField("Count", "I") == nullptr);
// Right name and type.
ArtField* f1 = c->FindDeclaredInstanceField("count", "I");
ArtField* f2 = c->FindInstanceField("count", "I");
- EXPECT_TRUE(f1 != NULL);
- EXPECT_TRUE(f2 != NULL);
+ EXPECT_TRUE(f1 != nullptr);
+ EXPECT_TRUE(f2 != nullptr);
EXPECT_EQ(f1, f2);
// TODO: check that s.count == 3.
// Ensure that we handle superclass fields correctly...
c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/StringBuilder;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
// No StringBuilder.count...
- EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == nullptr);
// ...but there is an AbstractStringBuilder.count.
- EXPECT_TRUE(c->FindInstanceField("count", "I") != NULL);
+ EXPECT_TRUE(c->FindInstanceField("count", "I") != nullptr);
}
TEST_F(ObjectTest, FindStaticField) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<4> hs(soa.Self());
Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
- ASSERT_TRUE(s.Get() != NULL);
+ ASSERT_TRUE(s.Get() != nullptr);
Handle<Class> c(hs.NewHandle(s->GetClass()));
- ASSERT_TRUE(c.Get() != NULL);
+ ASSERT_TRUE(c.Get() != nullptr);
// Wrong type.
- EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL);
- EXPECT_TRUE(mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == nullptr);
+ EXPECT_TRUE(mirror::Class::FindStaticField(
+ soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == nullptr);
// Wrong name.
- EXPECT_TRUE(c->FindDeclaredStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL);
+ EXPECT_TRUE(c->FindDeclaredStaticField(
+ "cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == nullptr);
EXPECT_TRUE(
mirror::Class::FindStaticField(soa.Self(), c, "cASE_INSENSITIVE_ORDER",
- "Ljava/util/Comparator;") == NULL);
+ "Ljava/util/Comparator;") == nullptr);
// Right name and type.
ArtField* f1 = c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
ArtField* f2 = mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER",
"Ljava/util/Comparator;");
- EXPECT_TRUE(f1 != NULL);
- EXPECT_TRUE(f2 != NULL);
+ EXPECT_TRUE(f1 != nullptr);
+ EXPECT_TRUE(f2 != nullptr);
EXPECT_EQ(f1, f2);
// TODO: test static fields via superclasses.
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index ec2b495024..96f6a53396 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -30,7 +30,7 @@ GcRoot<Class> StackTraceElement::java_lang_StackTraceElement_;
void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
CHECK(java_lang_StackTraceElement_.IsNull());
- CHECK(java_lang_StackTraceElement != NULL);
+ CHECK(java_lang_StackTraceElement != nullptr);
java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
}
@@ -44,7 +44,7 @@ StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declari
int32_t line_number) {
StackTraceElement* trace =
down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
- if (LIKELY(trace != NULL)) {
+ if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
trace->Init<true>(declaring_class, method_name, file_name, line_number);
} else {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 4a95519bd0..b367cff7c8 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -50,7 +50,7 @@ inline int32_t String::GetLength() {
inline void String::SetArray(CharArray* new_array) {
// Array is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
- DCHECK(new_array != NULL);
+ DCHECK(new_array != nullptr);
SetFieldObject<false, false>(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array);
}
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index bd6a63c727..b7fd240fe0 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -53,7 +53,7 @@ int32_t String::FastIndexOf(int32_t ch, int32_t start) {
void String::SetClass(Class* java_lang_String) {
CHECK(java_lang_String_.IsNull());
- CHECK(java_lang_String != NULL);
+ CHECK(java_lang_String != nullptr);
java_lang_String_ = GcRoot<Class>(java_lang_String);
}
@@ -137,7 +137,7 @@ bool String::Equals(String* that) {
if (this == that) {
// Quick reference equality test
return true;
- } else if (that == NULL) {
+ } else if (that == nullptr) {
// Null isn't an instanceof anything
return false;
} else if (this->GetLength() != that->GetLength()) {
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index b564649946..ca9464424b 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -45,7 +45,7 @@ void Throwable::SetCause(Throwable* cause) {
CHECK(cause != nullptr);
CHECK(cause != this);
Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
- CHECK(current_cause == NULL || current_cause == this);
+ CHECK(current_cause == nullptr || current_cause == this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause);
} else {
@@ -80,7 +80,7 @@ std::string Throwable::Dump() {
std::string result(PrettyTypeOf(this));
result += ": ";
String* msg = GetDetailMessage();
- if (msg != NULL) {
+ if (msg != nullptr) {
result += msg->ToModifiedUtf8();
}
result += "\n";
@@ -135,7 +135,7 @@ std::string Throwable::Dump() {
void Throwable::SetClass(Class* java_lang_Throwable) {
CHECK(java_lang_Throwable_.IsNull());
- CHECK(java_lang_Throwable != NULL);
+ CHECK(java_lang_Throwable != nullptr);
java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 1a80ded13d..4b412253f3 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -68,11 +68,11 @@ static constexpr uint64_t kLongWaitMs = 100;
* at any given time.
*/
-bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
+bool (*Monitor::is_sensitive_thread_hook_)() = nullptr;
uint32_t Monitor::lock_profiling_threshold_ = 0;
bool Monitor::IsSensitiveThread() {
- if (is_sensitive_thread_hook_ != NULL) {
+ if (is_sensitive_thread_hook_ != nullptr) {
return (*is_sensitive_thread_hook_)();
}
return false;
@@ -90,9 +90,9 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
owner_(owner),
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
- wait_set_(NULL),
+ wait_set_(nullptr),
hash_code_(hash_code),
- locking_method_(NULL),
+ locking_method_(nullptr),
locking_dex_pc_(0),
monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
#ifdef __LP64__
@@ -113,9 +113,9 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
owner_(owner),
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
- wait_set_(NULL),
+ wait_set_(nullptr),
hash_code_(hash_code),
- locking_method_(NULL),
+ locking_method_(nullptr),
locking_dex_pc_(0),
monitor_id_(id) {
#ifdef __LP64__
@@ -183,9 +183,9 @@ Monitor::~Monitor() {
void Monitor::AppendToWaitSet(Thread* thread) {
DCHECK(owner_ == Thread::Current());
- DCHECK(thread != NULL);
+ DCHECK(thread != nullptr);
DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
- if (wait_set_ == NULL) {
+ if (wait_set_ == nullptr) {
wait_set_ = thread;
return;
}
@@ -200,8 +200,8 @@ void Monitor::AppendToWaitSet(Thread* thread) {
void Monitor::RemoveFromWaitSet(Thread *thread) {
DCHECK(owner_ == Thread::Current());
- DCHECK(thread != NULL);
- if (wait_set_ == NULL) {
+ DCHECK(thread != nullptr);
+ if (wait_set_ == nullptr) {
return;
}
if (wait_set_ == thread) {
@@ -211,7 +211,7 @@ void Monitor::RemoveFromWaitSet(Thread *thread) {
}
Thread* t = wait_set_;
- while (t->GetWaitNext() != NULL) {
+ while (t->GetWaitNext() != nullptr) {
if (t->GetWaitNext() == thread) {
t->SetWaitNext(thread->GetWaitNext());
thread->SetWaitNext(nullptr);
@@ -253,7 +253,8 @@ void Monitor::Lock(Thread* self) {
self->SetMonitorEnterObject(GetObject());
{
ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
- MutexLock mu2(self, monitor_lock_); // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+ // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+ MutexLock mu2(self, monitor_lock_);
if (owner_ != nullptr) { // Did the owner_ give the lock up?
if (ATRACE_ENABLED()) {
std::string name;
@@ -311,8 +312,8 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
}
static std::string ThreadToString(Thread* thread) {
- if (thread == NULL) {
- return "NULL";
+ if (thread == nullptr) {
+ return "nullptr";
}
std::ostringstream oss;
// TODO: alternatively, we could just return the thread's name.
@@ -322,7 +323,7 @@ static std::string ThreadToString(Thread* thread) {
void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* found_owner,
Monitor* monitor) {
- Thread* current_owner = NULL;
+ Thread* current_owner = nullptr;
std::string current_owner_string;
std::string expected_owner_string;
std::string found_owner_string;
@@ -331,14 +332,14 @@ void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* fo
// Acquire thread list lock so threads won't disappear from under us.
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
// Re-read owner now that we hold lock.
- current_owner = (monitor != NULL) ? monitor->GetOwner() : NULL;
+ current_owner = (monitor != nullptr) ? monitor->GetOwner() : nullptr;
// Get short descriptions of the threads involved.
current_owner_string = ThreadToString(current_owner);
expected_owner_string = ThreadToString(expected_owner);
found_owner_string = ThreadToString(found_owner);
}
- if (current_owner == NULL) {
- if (found_owner == NULL) {
+ if (current_owner == nullptr) {
+ if (found_owner == nullptr) {
ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
" on thread '%s'",
PrettyTypeOf(o).c_str(),
@@ -352,7 +353,7 @@ void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* fo
expected_owner_string.c_str());
}
} else {
- if (found_owner == NULL) {
+ if (found_owner == nullptr) {
// Race: originally there was no owner, there is now
ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
" (originally believed to be unowned) on thread '%s'",
@@ -380,14 +381,14 @@ void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* fo
}
bool Monitor::Unlock(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
Thread* owner = owner_;
if (owner == self) {
// We own the monitor, so nobody else can be in here.
if (lock_count_ == 0) {
- owner_ = NULL;
- locking_method_ = NULL;
+ owner_ = nullptr;
+ locking_method_ = nullptr;
locking_dex_pc_ = 0;
// Wake a contender.
monitor_contenders_.Signal(self);
@@ -406,7 +407,7 @@ bool Monitor::Unlock(Thread* self) {
void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
monitor_lock_.Lock(self);
@@ -446,9 +447,9 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
++num_waiters_;
int prev_lock_count = lock_count_;
lock_count_ = 0;
- owner_ = NULL;
+ owner_ = nullptr;
mirror::ArtMethod* saved_method = locking_method_;
- locking_method_ = NULL;
+ locking_method_ = nullptr;
uintptr_t saved_dex_pc = locking_dex_pc_;
locking_dex_pc_ = 0;
@@ -465,7 +466,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
MutexLock mu(self, *self->GetWaitMutex());
// Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
- // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
+ // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
// up.
DCHECK(self->GetWaitMonitor() == nullptr);
self->SetWaitMonitor(this);
@@ -538,13 +539,13 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
self->SetInterruptedLocked(false);
}
if (interruptShouldThrow) {
- self->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
+ self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
}
}
}
void Monitor::Notify(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
// Make sure that we hold the lock.
if (owner_ != self) {
@@ -552,7 +553,7 @@ void Monitor::Notify(Thread* self) {
return;
}
// Signal the first waiting thread in the wait set.
- while (wait_set_ != NULL) {
+ while (wait_set_ != nullptr) {
Thread* thread = wait_set_;
wait_set_ = thread->GetWaitNext();
thread->SetWaitNext(nullptr);
@@ -567,7 +568,7 @@ void Monitor::Notify(Thread* self) {
}
void Monitor::NotifyAll(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
// Make sure that we hold the lock.
if (owner_ != self) {
@@ -575,7 +576,7 @@ void Monitor::NotifyAll(Thread* self) {
return;
}
// Signal all threads in the wait set.
- while (wait_set_ != NULL) {
+ while (wait_set_ != nullptr) {
Thread* thread = wait_set_;
wait_set_ = thread->GetWaitNext();
thread->SetWaitNext(nullptr);
@@ -625,7 +626,7 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated" << obj << " to empty lock word";
}
- // The monitor is deflated, mark the object as nullptr so that we know to delete it during the
+ // The monitor is deflated, mark the object as null so that we know to delete it during the
// next GC.
monitor->obj_ = GcRoot<mirror::Object>(nullptr);
}
@@ -697,8 +698,8 @@ static mirror::Object* FakeUnlock(mirror::Object* obj)
}
mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
obj = FakeLock(obj);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
@@ -772,8 +773,8 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
}
bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
obj = FakeUnlock(obj);
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
@@ -979,11 +980,11 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
// This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
// definition of contended that includes a monitor a thread is trying to enter...
mirror::Object* result = thread->GetMonitorEnterObject();
- if (result == NULL) {
+ if (result == nullptr) {
// ...but also a monitor that the thread is waiting on.
MutexLock mu(Thread::Current(), *thread->GetWaitMutex());
Monitor* monitor = thread->GetWaitMonitor();
- if (monitor != NULL) {
+ if (monitor != nullptr) {
result = monitor->GetObject();
}
}
@@ -993,7 +994,7 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
void* callback_context, bool abort_on_failure) {
mirror::ArtMethod* m = stack_visitor->GetMethod();
- CHECK(m != NULL);
+ CHECK(m != nullptr);
// Native methods are an easy special case.
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
@@ -1013,7 +1014,7 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::O
// Is there any reason to believe there's any synchronization in this method?
const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != NULL) << PrettyMethod(m);
+ CHECK(code_item != nullptr) << PrettyMethod(m);
if (code_item->tries_size_ == 0) {
return; // No "tries" implies no synchronization, so no held locks to report.
}
@@ -1088,13 +1089,13 @@ bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc,
const char** source_file, uint32_t* line_number) const {
// If method is null, location is unknown
- if (method == NULL) {
+ if (method == nullptr) {
*source_file = "";
*line_number = 0;
return;
}
*source_file = method->GetDeclaringClassSourceFile();
- if (*source_file == NULL) {
+ if (*source_file == nullptr) {
*source_file = "";
}
*line_number = method->GetLineNumFromDexPC(dex_pc);
@@ -1103,7 +1104,7 @@ void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc,
uint32_t Monitor::GetOwnerThreadId() {
MutexLock mu(Thread::Current(), monitor_lock_);
Thread* owner = owner_;
- if (owner != NULL) {
+ if (owner != nullptr) {
return owner->GetThreadId();
} else {
return ThreadList::kInvalidThreadId;
@@ -1185,7 +1186,7 @@ static mirror::Object* MonitorDeflateCallback(mirror::Object* object, void* arg)
if (Monitor::Deflate(args->self, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
++args->deflate_count;
- // If we deflated, return nullptr so that the monitor gets removed from the array.
+ // If we deflated, return null so that the monitor gets removed from the array.
return nullptr;
}
return object; // Monitor was not deflated.
@@ -1198,7 +1199,7 @@ size_t MonitorList::DeflateMonitors() {
return args.deflate_count;
}
-MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
+MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) {
DCHECK(obj != nullptr);
LockWord lock_word = obj->GetLockWord(true);
switch (lock_word.GetState()) {
@@ -1217,7 +1218,7 @@ MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
Monitor* mon = lock_word.FatLockMonitor();
owner_ = mon->owner_;
entry_count_ = 1 + mon->lock_count_;
- for (Thread* waiter = mon->wait_set_; waiter != NULL; waiter = waiter->GetWaitNext()) {
+ for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
waiters_.push_back(waiter);
}
break;
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index d89290b836..48c9cceff4 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -88,7 +88,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample
cp = EventLogWriteInt(cp, line_number);
// Emit the lock owner source code file name, <= 37 bytes.
- if (owner_filename == NULL) {
+ if (owner_filename == nullptr) {
owner_filename = "";
} else if (strcmp(filename, owner_filename) == 0) {
// Common case, so save on log space.
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 8ae5a54fe7..4ab4e86ac4 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -138,7 +138,8 @@ class MonitorPool {
for (size_t index = 0; index < num_chunks_; ++index) {
uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
if (IsInChunk(chunk_addr, mon)) {
- return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
+ return OffsetToMonitorId(
+ reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
}
}
LOG(FATAL) << "Did not find chunk that contains monitor.";
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 2351463391..30cb2d835d 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -116,8 +116,8 @@ class CreateTask : public Task {
ScopedObjectAccess soa(self);
monitor_test_->thread_ = self; // Pass the Thread.
- monitor_test_->object_.Get()->MonitorEnter(self); // Lock the object. This should transition
- LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked.
+ monitor_test_->object_.Get()->MonitorEnter(self); // Lock the object. This should transition
+ LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked.
LockWord::LockState new_state = lock_after.GetState();
// Cannot use ASSERT only, as analysis thinks we'll keep holding the mutex.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 87ae64d1d4..4f97d20d6c 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -109,7 +109,7 @@ static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
//
// NullableScopedUtfChars name(env, javaName);
// if (env->ExceptionCheck()) {
-// return NULL;
+// return null;
// }
// // ... use name.c_str()
//
@@ -117,7 +117,7 @@ static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
class NullableScopedUtfChars {
public:
NullableScopedUtfChars(JNIEnv* env, jstring s) : mEnv(env), mString(s) {
- mUtfChars = (s != NULL) ? env->GetStringUTFChars(s, NULL) : NULL;
+ mUtfChars = (s != nullptr) ? env->GetStringUTFChars(s, nullptr) : nullptr;
}
~NullableScopedUtfChars() {
@@ -149,9 +149,10 @@ class NullableScopedUtfChars {
void operator=(const NullableScopedUtfChars&);
};
-static jobject DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jobject DexFile_openDexFileNative(
+ JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
ScopedUtfChars sourceName(env, javaSourceName);
- if (sourceName.c_str() == NULL) {
+ if (sourceName.c_str() == nullptr) {
return 0;
}
NullableScopedUtfChars outputName(env, javaOutputName);
@@ -224,9 +225,9 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j
}
ScopedUtfChars class_name(env, javaName);
- if (class_name.c_str() == NULL) {
+ if (class_name.c_str() == nullptr) {
VLOG(class_linker) << "Failed to find class_name";
- return NULL;
+ return nullptr;
}
const std::string descriptor(DotToDescriptor(class_name.c_str()));
const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
@@ -367,7 +368,7 @@ static jint DexFile_getDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename,
instruction_set.c_str(), defer);
}
-// public API, NULL pkgname
+// public API, null pkgname
static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
const char* instruction_set = GetInstructionSetString(kRuntimeISA);
ScopedUtfChars filename(env, javaFilename);
@@ -378,11 +379,14 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)V"),
- NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
+ NATIVE_METHOD(DexFile, defineClassNative,
+ "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
- NATIVE_METHOD(DexFile, getDexOptNeeded, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
- NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
+ NATIVE_METHOD(DexFile, getDexOptNeeded,
+ "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
+ NATIVE_METHOD(DexFile, openDexFileNative,
+ "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 876e29aa77..46881b00fa 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -104,7 +104,7 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF
}
ScopedUtfChars traceFilename(env, javaTraceFilename);
- if (traceFilename.c_str() == NULL) {
+ if (traceFilename.c_str() == nullptr) {
return;
}
Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -116,7 +116,7 @@ static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring java
jint bufferSize, jint flags,
jboolean samplingEnabled, jint intervalUs) {
ScopedUtfChars traceFilename(env, javaTraceFilename);
- if (traceFilename.c_str() == NULL) {
+ if (traceFilename.c_str() == nullptr) {
return;
}
Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -156,7 +156,7 @@ static jlong VMDebug_lastDebuggerActivity(JNIEnv*, jclass) {
static void ThrowUnsupportedOperationException(JNIEnv* env) {
ScopedObjectAccess soa(env);
- soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", NULL);
+ soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", nullptr);
}
static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) {
@@ -200,15 +200,15 @@ static jlong VMDebug_threadCpuTimeNanos(JNIEnv*, jclass) {
* error occurs during file handling.
*/
static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) {
- // Only one of these may be NULL.
- if (javaFilename == NULL && javaFd == NULL) {
+ // Only one of these may be null.
+ if (javaFilename == nullptr && javaFd == nullptr) {
ScopedObjectAccess soa(env);
ThrowNullPointerException("fileName == null && fd == null");
return;
}
std::string filename;
- if (javaFilename != NULL) {
+ if (javaFilename != nullptr) {
ScopedUtfChars chars(env, javaFilename);
if (env->ExceptionCheck()) {
return;
@@ -219,7 +219,7 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job
}
int fd = -1;
- if (javaFd != NULL) {
+ if (javaFd != nullptr) {
fd = jniGetFDFromFileDescriptor(env, javaFd);
if (fd < 0) {
ScopedObjectAccess soa(env);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 196a231892..53bb129609 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -114,7 +114,7 @@ static jobject VMRuntime_newUnpaddedArray(JNIEnv* env, jobject, jclass javaEleme
}
static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
- if (javaArray == NULL) { // Most likely allocation failed
+ if (javaArray == nullptr) { // Most likely allocation failed
return 0;
}
ScopedFastNativeObjectAccess soa(env);
@@ -263,17 +263,17 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
};
// Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, uint32_t string_idx,
- StringTable& strings)
+static void PreloadDexCachesResolveString(
+ Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
- if (string != NULL) {
+ if (string != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
const char* utf8 = dex_file->StringDataByIdx(string_idx);
string = strings[utf8];
- if (string == NULL) {
+ if (string == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved string=" << utf8;
@@ -281,10 +281,11 @@ static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, ui
}
// Based on ClassLinker::ResolveType.
-static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
+static void PreloadDexCachesResolveType(
+ Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != NULL) {
+ if (klass != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -293,9 +294,9 @@ static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cach
if (class_name[1] == '\0') {
klass = linker->FindPrimitiveClass(class_name[0]);
} else {
- klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), NULL);
+ klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), nullptr);
}
- if (klass == NULL) {
+ if (klass == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved klass=" << class_name;
@@ -321,7 +322,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin
Thread* const self = Thread::Current();
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(hs.NewHandle(dex_cache->GetResolvedType(field_id.class_idx_)));
- if (klass.Get() == NULL) {
+ if (klass.Get() == nullptr) {
return;
}
if (is_static) {
@@ -329,7 +330,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin
} else {
field = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
- if (field == NULL) {
+ if (field == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved field " << PrettyField(field);
@@ -341,13 +342,13 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui
InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
- if (method != NULL) {
+ if (method != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
mirror::Class* klass = dex_cache->GetResolvedType(method_id.class_idx_);
- if (klass == NULL) {
+ if (klass == nullptr) {
return;
}
switch (invoke_type) {
@@ -366,7 +367,7 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
UNREACHABLE();
}
- if (method == NULL) {
+ if (method == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method);
@@ -404,7 +405,7 @@ static void PreloadDexCachesStatsTotal(DexCacheStats* total) {
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
total->num_strings += dex_file->NumStringIds();
total->num_fields += dex_file->NumFieldIds();
total->num_methods += dex_file->NumMethodIds();
@@ -421,29 +422,29 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
mirror::String* string = dex_cache->GetResolvedString(j);
- if (string != NULL) {
+ if (string != nullptr) {
filled->num_strings++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
mirror::Class* klass = dex_cache->GetResolvedType(j);
- if (klass != NULL) {
+ if (klass != nullptr) {
filled->num_types++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
ArtField* field = linker->GetResolvedField(j, dex_cache);
- if (field != NULL) {
+ if (field != nullptr) {
filled->num_fields++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j);
- if (method != NULL) {
+ if (method != nullptr) {
filled->num_methods++;
}
}
@@ -482,7 +483,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
@@ -504,7 +505,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == NULL) {
+ if (class_data == nullptr) {
continue;
}
ClassDataItemIterator it(*dex_file, class_data);
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 2cdc68f5b2..17fbc4f85d 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -81,33 +81,26 @@ static jobject VMStack_getCallingClassLoader(JNIEnv* env, jclass) {
return soa.AddLocalReference<jobject>(visitor.caller->GetDeclaringClass()->GetClassLoader());
}
-static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap,
- jobject javaSystem) {
+static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass) {
struct ClosestUserClassLoaderVisitor : public StackVisitor {
- ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap_in,
- mirror::Object* system_in)
- : StackVisitor(thread, NULL), bootstrap(bootstrap_in), system(system_in),
- class_loader(NULL) {}
+ explicit ClosestUserClassLoaderVisitor(Thread* thread)
+ : StackVisitor(thread, nullptr), class_loader(nullptr) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(class_loader == NULL);
+ DCHECK(class_loader == nullptr);
mirror::Class* c = GetMethod()->GetDeclaringClass();
mirror::Object* cl = c->GetClassLoader();
- if (cl != NULL && cl != bootstrap && cl != system) {
+ if (cl != nullptr) {
class_loader = cl;
return false;
}
return true;
}
- mirror::Object* bootstrap;
- mirror::Object* system;
mirror::Object* class_loader;
};
ScopedFastNativeObjectAccess soa(env);
- mirror::Object* bootstrap = soa.Decode<mirror::Object*>(javaBootstrap);
- mirror::Object* system = soa.Decode<mirror::Object*>(javaSystem);
- ClosestUserClassLoaderVisitor visitor(soa.Self(), bootstrap, system);
+ ClosestUserClassLoaderVisitor visitor(soa.Self());
visitor.WalkStack();
return soa.AddLocalReference<jobject>(visitor.class_loader);
}
@@ -136,7 +129,7 @@ static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject jav
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMStack, fillStackTraceElements, "!(Ljava/lang/Thread;[Ljava/lang/StackTraceElement;)I"),
NATIVE_METHOD(VMStack, getCallingClassLoader, "!()Ljava/lang/ClassLoader;"),
- NATIVE_METHOD(VMStack, getClosestUserClassLoader, "!(Ljava/lang/ClassLoader;Ljava/lang/ClassLoader;)Ljava/lang/ClassLoader;"),
+ NATIVE_METHOD(VMStack, getClosestUserClassLoader, "!()Ljava/lang/ClassLoader;"),
NATIVE_METHOD(VMStack, getStackClass2, "!()Ljava/lang/Class;"),
NATIVE_METHOD(VMStack, getThreadStackTrace, "!(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;"),
};
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 5ad18f87ed..b0d923b10c 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -25,6 +25,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -41,7 +42,7 @@ ALWAYS_INLINE static inline mirror::Class* DecodeClass(
const ScopedFastNativeObjectAccess& soa, jobject java_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
- DCHECK(c != NULL);
+ DCHECK(c != nullptr);
DCHECK(c->IsClass());
// TODO: we could EnsureInitialized here, rather than on every reflective get/set or invoke .
// For now, we conservatively preserve the old dalvik behavior. A quick "IsInitialized" check
@@ -91,18 +92,6 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean
return soa.AddLocalReference<jclass>(c.Get());
}
-static jobject Class_findOverriddenMethodIfProxy(JNIEnv* env, jclass, jobject art_method) {
- ScopedFastNativeObjectAccess soa(env);
- mirror::ArtMethod* method = soa.Decode<mirror::ArtMethod*>(art_method);
- mirror::Class* declaring_klass = method->GetDeclaringClass();
- if (!declaring_klass->IsProxyClass()) {
- return art_method;
- }
- uint32_t dex_method_index = method->GetDexMethodIndex();
- mirror::ArtMethod* overriden_method = method->GetDexCacheResolvedMethods()->Get(dex_method_index);
- return soa.AddLocalReference<jobject>(overriden_method);
-}
-
static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
@@ -252,7 +241,7 @@ static jobject Class_getDeclaredFieldInternal(JNIEnv* env, jobject javaThis, jst
static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring name) {
ScopedFastNativeObjectAccess soa(env);
auto* name_string = soa.Decode<mirror::String*>(name);
- if (name == nullptr) {
+ if (name_string == nullptr) {
ThrowNullPointerException("name == null");
return nullptr;
}
@@ -269,17 +258,222 @@ static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring nam
return soa.AddLocalReference<jobject>(result);
}
+static jobject Class_getDeclaredConstructorInternal(
+ JNIEnv* env, jobject javaThis, jobjectArray args) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto* klass = DecodeClass(soa, javaThis);
+ auto* params = soa.Decode<mirror::ObjectArray<mirror::Class>*>(args);
+ StackHandleScope<1> hs(soa.Self());
+ auto* declared_constructor = klass->GetDeclaredConstructor(soa.Self(), hs.NewHandle(params));
+ if (declared_constructor != nullptr) {
+ return soa.AddLocalReference<jobject>(
+ mirror::Constructor::CreateFromArtMethod(soa.Self(), declared_constructor));
+ }
+ return nullptr;
+}
+
+static ALWAYS_INLINE inline bool MethodMatchesConstructor(mirror::ArtMethod* m, bool public_only)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(m != nullptr);
+ return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
+}
+
+static jobjectArray Class_getDeclaredConstructorsInternal(
+ JNIEnv* env, jobject javaThis, jboolean publicOnly) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto* klass = DecodeClass(soa, javaThis);
+ StackHandleScope<2> hs(soa.Self());
+ auto h_direct_methods = hs.NewHandle(klass->GetDirectMethods());
+ size_t constructor_count = 0;
+ auto count = h_direct_methods.Get() != nullptr ? h_direct_methods->GetLength() : 0u;
+ // Two pass approach for speed.
+ for (size_t i = 0; i < count; ++i) {
+ constructor_count += MethodMatchesConstructor(h_direct_methods->GetWithoutChecks(i),
+ publicOnly != JNI_FALSE) ? 1u : 0u;
+ }
+ auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
+ soa.Self(), mirror::Constructor::ArrayClass(), constructor_count));
+ if (UNLIKELY(h_constructors.Get() == nullptr)) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ constructor_count = 0;
+ for (size_t i = 0; i < count; ++i) {
+ auto* method = h_direct_methods->GetWithoutChecks(i);
+ if (MethodMatchesConstructor(method, publicOnly != JNI_FALSE)) {
+ auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), method);
+ if (UNLIKELY(constructor == nullptr)) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ h_constructors->SetWithoutChecks<false>(constructor_count++, constructor);
+ }
+ }
+ return soa.AddLocalReference<jobjectArray>(h_constructors.Get());
+}
+
+static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
+ jobject name, jobjectArray args) {
+ // Covariant return types permit the class to define multiple
+ // methods with the same name and parameter types. Prefer to
+ // return a non-synthetic method in such situations. We may
+ // still return a synthetic method to handle situations like
+ // escalated visibility. We never return miranda methods that
+ // were synthesized by the runtime.
+ constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<5> hs(soa.Self());
+ auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name));
+ if (UNLIKELY(h_method_name.Get() == nullptr)) {
+ ThrowNullPointerException("name == null");
+ return nullptr;
+ }
+ auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
+ auto* klass = DecodeClass(soa, javaThis);
+ mirror::ArtMethod* result = nullptr;
+ auto* virtual_methods = klass->GetVirtualMethods();
+ if (virtual_methods != nullptr) {
+ auto h_virtual_methods = hs.NewHandle(virtual_methods);
+ for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
+ auto* m = h_virtual_methods->GetWithoutChecks(i);
+ auto* np_method = m->GetInterfaceMethodIfProxy();
+ // May cause thread suspension.
+ mirror::String* np_name = np_method->GetNameAsString(soa.Self());
+ if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+ if (UNLIKELY(soa.Self()->IsExceptionPending())) {
+ return nullptr;
+ }
+ continue;
+ }
+ auto modifiers = m->GetAccessFlags();
+ if ((modifiers & kSkipModifiers) == 0) {
+ return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), m));
+ }
+ if ((modifiers & kAccMiranda) == 0) {
+ result = m; // Remember as potential result if it's not a miranda method.
+ }
+ }
+ }
+ if (result == nullptr) {
+ auto* direct_methods = klass->GetDirectMethods();
+ if (direct_methods != nullptr) {
+ auto h_direct_methods = hs.NewHandle(direct_methods);
+ for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
+ auto* m = h_direct_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ if ((modifiers & kAccConstructor) != 0) {
+ continue;
+ }
+ auto* np_method = m->GetInterfaceMethodIfProxy();
+ // May cause thread suspension.
+ mirror::String* np_name = np_method ->GetNameAsString(soa.Self());
+ if (np_name == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+ if (UNLIKELY(soa.Self()->IsExceptionPending())) {
+ return nullptr;
+ }
+ continue;
+ }
+ if ((modifiers & kSkipModifiers) == 0) {
+ return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(
+ soa.Self(), m));
+ }
+ // Direct methods cannot be miranda methods, so this potential result must be synthetic.
+ result = m;
+ }
+ }
+ }
+ return result != nullptr ?
+ soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), result)) :
+ nullptr;
+}
+
+jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
+ jboolean publicOnly) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<5> hs(soa.Self());
+ auto* klass = DecodeClass(soa, javaThis);
+ auto virtual_methods = hs.NewHandle(klass->GetVirtualMethods());
+ auto direct_methods = hs.NewHandle(klass->GetDirectMethods());
+ size_t num_methods = 0;
+ if (virtual_methods.Get() != nullptr) {
+ for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
+ auto* m = virtual_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccMiranda) == 0) {
+ ++num_methods;
+ }
+ }
+ }
+ if (direct_methods.Get() != nullptr) {
+ for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
+ auto* m = direct_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ // Add non-constructor direct/static methods.
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccConstructor) == 0) {
+ ++num_methods;
+ }
+ }
+ }
+ auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc(
+ soa.Self(), mirror::Method::ArrayClass(), num_methods));
+ num_methods = 0;
+ if (virtual_methods.Get() != nullptr) {
+ for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
+ auto* m = virtual_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccMiranda) == 0) {
+ auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
+ if (method == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ ret->SetWithoutChecks<false>(num_methods++, method);
+ }
+ }
+ }
+ if (direct_methods.Get() != nullptr) {
+ for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
+ auto* m = direct_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ // Add non-constructor direct/static methods.
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccConstructor) == 0) {
+ auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
+ if (method == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ ret->SetWithoutChecks<false>(num_methods++, method);
+ }
+ }
+ }
+ return soa.AddLocalReference<jobjectArray>(ret.Get());
+}
+
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Class, classForName, "!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
- NATIVE_METHOD(Class, findOverriddenMethodIfProxy,
- "!(Ljava/lang/reflect/ArtMethod;)Ljava/lang/reflect/ArtMethod;"),
+ NATIVE_METHOD(Class, classForName,
+ "!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, getDeclaredConstructorInternal,
+ "!([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;"),
+ NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"),
+ NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredMethodInternal,
+ "!(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;"),
+ NATIVE_METHOD(Class, getDeclaredMethodsUnchecked,
+ "!(Z)[Ljava/lang/reflect/Method;"),
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
- NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getPublicDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
- NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"),
- NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
- NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
};
void register_java_lang_Class(JNIEnv* env) {
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 1198c2e9ba..b9f8d01405 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -31,14 +31,14 @@ static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) {
// Should only be called while holding the lock on the dex cache.
DCHECK_EQ(dex_cache->GetLockOwnerThreadId(), soa.Self()->GetThreadId());
const DexFile* dex_file = dex_cache->GetDexFile();
- if (dex_file == NULL) {
- return NULL;
+ if (dex_file == nullptr) {
+ return nullptr;
}
void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size());
- if (byte_buffer == NULL) {
+ if (byte_buffer == nullptr) {
DCHECK(soa.Self()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
jvalue args[1];
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 6afe83bbb0..2d153d4490 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -28,7 +28,7 @@ namespace art {
static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) {
ScopedFastNativeObjectAccess soa(env);
- if (UNLIKELY(javaRhs == NULL)) {
+ if (UNLIKELY(javaRhs == nullptr)) {
ThrowNullPointerException("rhs == null");
return -1;
} else {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index d3b52ba47d..be7022e281 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -43,7 +43,7 @@ static jboolean Thread_isInterrupted(JNIEnv* env, jobject java_thread) {
ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
+ return (thread != nullptr) ? thread->IsInterrupted() : JNI_FALSE;
}
static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size,
@@ -64,7 +64,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
internal_thread_state = thread->GetState();
}
switch (internal_thread_state) {
@@ -99,7 +99,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject java_object) {
ScopedObjectAccess soa(env);
mirror::Object* object = soa.Decode<mirror::Object*>(java_object);
- if (object == NULL) {
+ if (object == nullptr) {
ThrowNullPointerException("object == null");
return JNI_FALSE;
}
@@ -112,7 +112,7 @@ static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) {
ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
thread->Interrupt(soa.Self());
}
}
@@ -133,7 +133,7 @@ static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- if (thread != NULL) {
+ if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
thread->SetThreadName(name.c_str());
@@ -154,7 +154,7 @@ static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_
ScopedObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
thread->SetNativePriority(new_priority);
}
}
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index eddd7def10..beb953bd1b 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -27,13 +27,14 @@
namespace art {
-static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
+static jobject Array_createMultiArray(
+ JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK(javaElementClass != NULL);
+ DCHECK(javaElementClass != nullptr);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> element_class(hs.NewHandle(soa.Decode<mirror::Class*>(javaElementClass)));
DCHECK(element_class->IsClass());
- DCHECK(javaDimArray != NULL);
+ DCHECK(javaDimArray != nullptr);
mirror::Object* dimensions_obj = soa.Decode<mirror::Object*>(javaDimArray);
DCHECK(dimensions_obj->IsArrayInstance());
DCHECK_EQ(dimensions_obj->GetClass()->GetComponentType()->GetPrimitiveType(),
@@ -47,18 +48,18 @@ static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementCla
static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK(javaElementClass != NULL);
+ DCHECK(javaElementClass != nullptr);
if (UNLIKELY(length < 0)) {
ThrowNegativeArraySizeException(length);
- return NULL;
+ return nullptr;
}
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
- if (UNLIKELY(array_class == NULL)) {
+ if (UNLIKELY(array_class == nullptr)) {
CHECK(soa.Self()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
DCHECK(array_class->IsObjectArrayClass());
mirror::Array* new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 5e1a4c59f7..c33f81a211 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -21,6 +21,7 @@
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "reflection.h"
#include "scoped_fast_native_object_access.h"
@@ -28,17 +29,10 @@
namespace art {
-/*
- * We get here through Constructor.newInstance(). The Constructor object
- * would not be available if the constructor weren't public (per the
- * definition of Class.getConstructor), so we can skip the method access
- * check. We can also safely assume the constructor isn't associated
- * with an interface, array, or primitive class.
- */
-static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs,
- jboolean accessible) {
+static ALWAYS_INLINE inline jobject NewInstanceHelper(
+ JNIEnv* env, jobject javaMethod, jobjectArray javaArgs, size_t num_frames) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
+ mirror::Method* m = soa.Decode<mirror::Method*>(javaMethod);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass()));
if (UNLIKELY(c->IsAbstract())) {
@@ -67,14 +61,31 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA
}
jobject javaReceiver = soa.AddLocalReference<jobject>(receiver);
- InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, (accessible == JNI_TRUE));
+ InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, num_frames);
// Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod.
return javaReceiver;
}
+/*
+ * We get here through Constructor.newInstance(). The Constructor object
+ * would not be available if the constructor weren't public (per the
+ * definition of Class.getConstructor), so we can skip the method access
+ * check. We can also safely assume the constructor isn't associated
+ * with an interface, array, or primitive class.
+ */
+static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) {
+ return NewInstanceHelper(env, javaMethod, javaArgs, 1);
+}
+
+static jobject Constructor_newInstanceTwoFrames(JNIEnv* env, jobject javaMethod,
+ jobjectArray javaArgs) {
+ return NewInstanceHelper(env, javaMethod, javaArgs, 2);
+}
+
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;Z)Ljava/lang/Object;"),
+ NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;)Ljava/lang/Object;"),
+ NATIVE_METHOD(Constructor, newInstanceTwoFrames, "!([Ljava/lang/Object;)Ljava/lang/Object;"),
};
void register_java_lang_reflect_Constructor(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 9859746563..c20d83245c 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -30,9 +30,9 @@
namespace art {
static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
- jobject javaArgs, jboolean accessible) {
+ jobject javaArgs) {
ScopedFastNativeObjectAccess soa(env);
- return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, (accessible == JNI_TRUE));
+ return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
}
static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
@@ -55,7 +55,7 @@ static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;Z)Ljava/lang/Object;"),
+ NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
NATIVE_METHOD(Method, getExceptionTypesNative, "!()[Ljava/lang/Class;"),
};
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index baf8b24207..4a6ab404f2 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -30,13 +30,12 @@ static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring name, jobjectArra
jobject loader, jobjectArray methods, jobjectArray throws) {
ScopedFastNativeObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* result = class_linker->CreateProxyClass(soa, name, interfaces, loader, methods,
- throws);
- return soa.AddLocalReference<jclass>(result);
+ return soa.AddLocalReference<jclass>(class_linker->CreateProxyClass(
+ soa, name, interfaces, loader, methods, throws));
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/ArtMethod;[[Ljava/lang/Class;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/Method;[[Ljava/lang/Class;)Ljava/lang/Class;"),
};
void register_java_lang_reflect_Proxy(JNIEnv* env) {
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 987427ea79..b96ddc8102 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -43,7 +43,7 @@ static jboolean DdmVmInternal_getRecentAllocationStatus(JNIEnv*, jclass) {
/*
* Get a stack trace as an array of StackTraceElement objects. Returns
- * NULL on failure, e.g. if the threadId couldn't be found.
+ * nullptr on failure, e.g. if the threadId couldn't be found.
*/
static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
jobjectArray trace = nullptr;
@@ -145,7 +145,7 @@ static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) {
}
jbyteArray result = env->NewByteArray(bytes.size());
- if (result != NULL) {
+ if (result != nullptr) {
env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
}
return result;
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index a851f21e29..632ccdedc0 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -27,20 +27,20 @@ class Thread;
// Walks up the stack 'n' callers, when used with Thread::WalkStack.
struct NthCallerVisitor : public StackVisitor {
NthCallerVisitor(Thread* thread, size_t n_in, bool include_runtime_and_upcalls = false)
- : StackVisitor(thread, NULL), n(n_in),
- include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(NULL) {}
+ : StackVisitor(thread, nullptr), n(n_in),
+ include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(nullptr) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
bool do_count = false;
- if (m == NULL || m->IsRuntimeMethod()) {
+ if (m == nullptr || m->IsRuntimeMethod()) {
// Upcall.
do_count = include_runtime_and_upcalls_;
} else {
do_count = true;
}
if (do_count) {
- DCHECK(caller == NULL);
+ DCHECK(caller == nullptr);
if (count == n) {
caller = m;
return false;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index eddbd8ad03..b0cbd0e3e2 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -497,7 +497,7 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
MutexLock mu(Thread::Current(), secondary_lookup_lock_);
auto secondary_lb = secondary_oat_dex_files_.lower_bound(key);
if (secondary_lb != secondary_oat_dex_files_.end() && key == secondary_lb->first) {
- oat_dex_file = secondary_lb->second; // May be nullptr.
+ oat_dex_file = secondary_lb->second; // May be null.
} else {
// We haven't seen this dex_location before, we must check the canonical location.
std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
@@ -506,8 +506,8 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
auto canonical_it = oat_dex_files_.find(canonical_key);
if (canonical_it != oat_dex_files_.end()) {
oat_dex_file = canonical_it->second;
- } // else keep nullptr.
- } // else keep nullptr.
+ } // else keep null.
+ } // else keep null.
// Copy the key to the string_cache_ and store the result in secondary map.
string_cache_.emplace_back(key.data(), key.length());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 42c60dca9c..b32dd22490 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -48,7 +48,7 @@ class OatFile FINAL {
static OatFile* OpenWithElfFile(ElfFile* elf_file, const std::string& location,
const char* abs_dex_location,
std::string* error_msg);
- // Open an oat file. Returns NULL on failure. Requested base can
+ // Open an oat file. Returns null on failure. Requested base can
// optionally be used to request where the file should be loaded.
// See the ResolveRelativeEncodedDexLocation for a description of how the
// abs_dex_location argument is used.
@@ -149,7 +149,7 @@ class OatFile FINAL {
template<class T>
T GetOatPointer(uint32_t offset) const {
if (offset == 0) {
- return NULL;
+ return nullptr;
}
return reinterpret_cast<T>(begin_ + offset);
}
@@ -177,7 +177,7 @@ class OatFile FINAL {
const OatMethod GetOatMethod(uint32_t method_index) const;
// Return a pointer to the OatMethodOffsets for the requested
- // method_index, or nullptr if none is present. Note that most
+ // method_index, or null if none is present. Note that most
// callers should use GetOatMethod.
const OatMethodOffsets* GetOatMethodOffsets(uint32_t method_index) const;
@@ -238,7 +238,7 @@ class OatFile FINAL {
// Returns the absolute dex location for the encoded relative dex location.
//
- // If not nullptr, abs_dex_location is used to resolve the absolute dex
+ // If not null, abs_dex_location is used to resolve the absolute dex
// location of relative dex locations encoded in the oat file.
// For example, given absolute location "/data/app/foo/base.apk", encoded
// dex locations "base.apk", "base.apk:classes2.dex", etc. would be resolved
@@ -300,10 +300,10 @@ class OatFile FINAL {
// Pointer to end of oat region for bounds checking.
const uint8_t* end_;
- // Pointer to the .bss section, if present, otherwise nullptr.
+ // Pointer to the .bss section, if present, otherwise null.
const uint8_t* bss_begin_;
- // Pointer to the end of the .bss section, if present, otherwise nullptr.
+ // Pointer to the end of the .bss section, if present, otherwise null.
const uint8_t* bss_end_;
// Was this oat_file loaded executable?
@@ -331,7 +331,7 @@ class OatFile FINAL {
// Map each location and canonical location (if different) retrieved from the
// oat file to its OatDexFile. This map doesn't change after it's constructed in Setup()
// and therefore doesn't need any locking and provides the cheapest dex file lookup
- // for GetOatDexFile() for a very frequent use case. Never contains a nullptr value.
+ // for GetOatDexFile() for a very frequent use case. Never contains a null value.
Table oat_dex_files_;
// Lock guarding all members needed for secondary lookup in GetOatDexFile().
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index e5c27b2430..37e85ab373 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -233,7 +233,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileAssistant::LoadDexFiles(
for (int i = 1; ; i++) {
std::string secondary_dex_location = DexFile::GetMultiDexClassesDexName(i, dex_location);
oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
- if (oat_dex_file == NULL) {
+ if (oat_dex_file == nullptr) {
// There are no more secondary dex files to load.
break;
}
@@ -393,12 +393,12 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
// Verify the dex checksum.
- // Note: GetOatDexFile will return NULL if the dex checksum doesn't match
+ // Note: GetOatDexFile will return null if the dex checksum doesn't match
// what we provide, which verifies the primary dex checksum for us.
const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
dex_location_, dex_checksum_pointer, false);
- if (oat_dex_file == NULL) {
+ if (oat_dex_file == nullptr) {
return true;
}
@@ -408,7 +408,7 @@ bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
= DexFile::GetMultiDexClassesDexName(i, dex_location_);
const OatFile::OatDexFile* secondary_oat_dex_file
= file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
- if (secondary_oat_dex_file == NULL) {
+ if (secondary_oat_dex_file == nullptr) {
// There are no more secondary dex files to check.
break;
}
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 9e7c2efc45..a25ee31d0d 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -85,7 +85,7 @@ class OatFileAssistant {
// Constructs an OatFileAssistant object to assist the oat file
// corresponding to the given dex location with the target instruction set.
//
- // The dex_location must not be NULL and should remain available and
+ // The dex_location must not be null and should remain available and
// unchanged for the duration of the lifetime of the OatFileAssistant object.
// Typically the dex_location is the absolute path to the original,
// un-optimized dex file.
@@ -152,11 +152,11 @@ class OatFileAssistant {
// Returns true on success.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool MakeUpToDate(std::string* error_msg);
// Returns an oat file that can be used for loading dex files.
- // Returns nullptr if no suitable oat file was found.
+ // Returns null if no suitable oat file was found.
//
// After this call, no other methods of the OatFileAssistant should be
// called, because access to the loaded oat file has been taken away from
@@ -244,7 +244,7 @@ class OatFileAssistant {
// This will fail if dex2oat is not enabled in the current runtime.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool RelocateOatFile(const std::string* input_file, std::string* error_msg);
// Generate the oat file from the dex file.
@@ -254,7 +254,7 @@ class OatFileAssistant {
// This will fail if dex2oat is not enabled in the current runtime.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool GenerateOatFile(std::string* error_msg);
// Executes dex2oat using the current runtime configuration overridden with
@@ -263,7 +263,7 @@ class OatFileAssistant {
// Returns true on success.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
//
// TODO: The OatFileAssistant probably isn't the right place to have this
// function.
@@ -310,12 +310,12 @@ class OatFileAssistant {
// Gets the dex checksum required for an up-to-date oat file.
// Returns dex_checksum if a required checksum was located. Returns
- // nullptr if the required checksum was not found.
+ // null if the required checksum was not found.
// The caller shouldn't clean up or free the returned pointer.
const uint32_t* GetRequiredDexChecksum();
// Returns the loaded odex file.
- // Loads the file if needed. Returns nullptr if the file failed to load.
+ // Loads the file if needed. Returns null if the file failed to load.
// The caller shouldn't clean up or free the returned pointer.
const OatFile* GetOdexFile();
@@ -324,7 +324,7 @@ class OatFileAssistant {
void ClearOdexFileCache();
// Returns the loaded oat file.
- // Loads the file if needed. Returns nullptr if the file failed to load.
+ // Loads the file if needed. Returns null if the file failed to load.
// The caller shouldn't clean up or free the returned pointer.
const OatFile* GetOatFile();
@@ -333,19 +333,19 @@ class OatFileAssistant {
void ClearOatFileCache();
// Returns the loaded image info.
- // Loads the image info if needed. Returns nullptr if the image info failed
+ // Loads the image info if needed. Returns null if the image info failed
// to load.
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
// Returns the loaded profile.
- // Loads the profile if needed. Returns nullptr if the profile failed
+ // Loads the profile if needed. Returns null if the profile failed
// to load.
// The caller shouldn't clean up or free the returned pointer.
ProfileFile* GetProfile();
// Returns the loaded old profile.
- // Loads the old profile if needed. Returns nullptr if the old profile
+ // Loads the old profile if needed. Returns null if the old profile
// failed to load.
// The caller shouldn't clean up or free the returned pointer.
ProfileFile* GetOldProfile();
@@ -357,7 +357,7 @@ class OatFileAssistant {
ScopedFlock flock_;
// In a properly constructed OatFileAssistant object, dex_location_ should
- // never be nullptr.
+ // never be null.
const char* dex_location_ = nullptr;
// In a properly constructed OatFileAssistant object, isa_ should be either
@@ -365,7 +365,7 @@ class OatFileAssistant {
const InstructionSet isa_ = kNone;
// The package name, used solely to find the profile file.
- // This may be nullptr in a properly constructed object. In this case,
+ // This may be null in a properly constructed object. In this case,
// profile_load_attempted_ and old_profile_load_attempted_ will be true, and
// profile_load_succeeded_ and old_profile_load_succeeded_ will be false.
const char* package_name_ = nullptr;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 0c942d22ed..3f6b2d2cc6 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -118,7 +118,7 @@ class OatFileAssistantTest : public CommonRuntimeTest {
std::string GetImageDirectory() {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != NULL);
+ CHECK(host_dir != nullptr);
return std::string(host_dir) + "/framework";
} else {
return std::string("/data/art-test");
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index cf81cc5093..8e99dbb286 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -41,9 +41,10 @@ typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
-typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref, void* arg);
+typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref,
+ void* arg);
-// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
+// A callback for testing if an object is marked, returns null if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index e4403d7932..22827891b0 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -40,10 +40,10 @@ File* OS::CreateEmptyFile(const char* name) {
}
File* OS::OpenFileWithFlags(const char* name, int flags) {
- CHECK(name != NULL);
+ CHECK(name != nullptr);
std::unique_ptr<File> file(new File);
if (!file->Open(name, flags, 0666)) {
- return NULL;
+ return nullptr;
}
return file.release();
}
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 620a4bd220..0bc834f67b 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -324,7 +324,7 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
} else if (option == "vfprintf") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("vfprintf argument was NULL");
+ Usage("vfprintf argument was nullptr");
return false;
}
int (*hook_vfprintf)(FILE *, const char*, va_list) =
@@ -337,7 +337,7 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
} else if (option == "exit") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("exit argument was NULL");
+ Usage("exit argument was nullptr");
return false;
}
void(*hook_exit)(jint) = reinterpret_cast<void(*)(jint)>(const_cast<void*>(hook));
@@ -348,7 +348,7 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
} else if (option == "abort") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("abort was NULL\n");
+ Usage("abort was nullptr\n");
return false;
}
void(*hook_abort)() = reinterpret_cast<void(*)()>(const_cast<void*>(hook));
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 658b656796..a8575de425 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -33,7 +33,6 @@ TEST_F(ParsedOptionsTest, ParsedOptions) {
void* test_vfprintf = reinterpret_cast<void*>(0xa);
void* test_abort = reinterpret_cast<void*>(0xb);
void* test_exit = reinterpret_cast<void*>(0xc);
- void* null = reinterpret_cast<void*>(NULL);
std::string lib_core(CommonRuntimeTest::GetLibCoreDexFileName());
@@ -42,27 +41,27 @@ TEST_F(ParsedOptionsTest, ParsedOptions) {
boot_class_path += lib_core;
RuntimeOptions options;
- options.push_back(std::make_pair(boot_class_path.c_str(), null));
- options.push_back(std::make_pair("-classpath", null));
- options.push_back(std::make_pair(lib_core.c_str(), null));
- options.push_back(std::make_pair("-cp", null));
- options.push_back(std::make_pair(lib_core.c_str(), null));
- options.push_back(std::make_pair("-Ximage:boot_image", null));
- options.push_back(std::make_pair("-Xcheck:jni", null));
- options.push_back(std::make_pair("-Xms2048", null));
- options.push_back(std::make_pair("-Xmx4k", null));
- options.push_back(std::make_pair("-Xss1m", null));
- options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", null));
- options.push_back(std::make_pair("-Dfoo=bar", null));
- options.push_back(std::make_pair("-Dbaz=qux", null));
- options.push_back(std::make_pair("-verbose:gc,class,jni", null));
+ options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
+ options.push_back(std::make_pair("-classpath", nullptr));
+ options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair("-cp", nullptr));
+ options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair("-Ximage:boot_image", nullptr));
+ options.push_back(std::make_pair("-Xcheck:jni", nullptr));
+ options.push_back(std::make_pair("-Xms2048", nullptr));
+ options.push_back(std::make_pair("-Xmx4k", nullptr));
+ options.push_back(std::make_pair("-Xss1m", nullptr));
+ options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", nullptr));
+ options.push_back(std::make_pair("-Dfoo=bar", nullptr));
+ options.push_back(std::make_pair("-Dbaz=qux", nullptr));
+ options.push_back(std::make_pair("-verbose:gc,class,jni", nullptr));
options.push_back(std::make_pair("vfprintf", test_vfprintf));
options.push_back(std::make_pair("abort", test_abort));
options.push_back(std::make_pair("exit", test_exit));
RuntimeArgumentMap map;
std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
- ASSERT_TRUE(parsed.get() != NULL);
+ ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
using Opt = RuntimeArgumentMap;
@@ -104,7 +103,7 @@ TEST_F(ParsedOptionsTest, ParsedOptionsGc) {
RuntimeArgumentMap map;
std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
- ASSERT_TRUE(parsed.get() != NULL);
+ ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
using Opt = RuntimeArgumentMap;
diff --git a/runtime/prebuilt_tools_test.cc b/runtime/prebuilt_tools_test.cc
new file mode 100644
index 0000000000..53bc87665a
--- /dev/null
+++ b/runtime/prebuilt_tools_test.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_runtime_test.h"
+
+#include <cstdio>
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+// Run the tests only on host.
+#ifndef HAVE_ANDROID_OS
+
+class PrebuiltToolsTest : public CommonRuntimeTest {
+};
+
+static void CheckToolsExist(const std::string& tools_dir) {
+ const char* tools[] { "as", "objcopy", "objdump" }; // NOLINT
+ for (const char* tool : tools) {
+ struct stat exec_st;
+ std::string exec_path = tools_dir + tool;
+ if (stat(exec_path.c_str(), &exec_st) != 0) {
+ ADD_FAILURE() << "Can not find " << tool << " in " << tools_dir;
+ }
+ }
+}
+
+TEST_F(PrebuiltToolsTest, CheckHostTools) {
+ std::string tools_dir = GetAndroidHostToolsDir();
+ if (tools_dir.empty()) {
+ ADD_FAILURE() << "Can not find Android tools directory for host";
+ } else {
+ CheckToolsExist(tools_dir);
+ }
+}
+
+TEST_F(PrebuiltToolsTest, CheckTargetTools) {
+ // Other prebuilts are missing from the build server's repo manifest.
+ InstructionSet isas[] = { kThumb2 }; // NOLINT
+ for (InstructionSet isa : isas) {
+ std::string tools_dir = GetAndroidTargetToolsDir(isa);
+ if (tools_dir.empty()) {
+ ADD_FAILURE() << "Can not find Android tools directory for " << isa;
+ } else {
+ CheckToolsExist(tools_dir);
+ }
+ }
+}
+
+#endif // HAVE_ANDROID_OS
+
+} // namespace art
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 38184871af..0ac5f40d55 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -132,7 +132,7 @@ class Primitive {
return "V";
default:
LOG(FATAL) << "Primitive char conversion on invalid type " << static_cast<int>(type);
- return NULL;
+ return nullptr;
}
}
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index db372c3277..90a47b38c2 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -58,7 +58,7 @@ class BoundedStackVisitor : public StackVisitor {
BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
Thread* thread, uint32_t max_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
+ : StackVisitor(thread, nullptr), stack_(stack), max_depth_(max_depth), depth_(0) {
}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -399,7 +399,7 @@ BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
if (method == nullptr) {
profile_table_.NullMethod();
- // Don't record a nullptr method.
+ // Don't record a null method.
return false;
}
@@ -820,7 +820,7 @@ bool ProfileFile::LoadFile(const std::string& fileName) {
// Bad summary info. It should be total/null/boot.
return false;
}
- // This is the number of hits in all profiled methods (without nullptr or boot methods)
+ // This is the number of hits in all profiled methods (without null or boot methods)
uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
// Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 6061f73c19..b471293c09 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -20,6 +20,7 @@
#include "art_field-inl.h"
#include "class_linker-inl.h"
#include "common_compiler_test.h"
+#include "mirror/method.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -53,41 +54,34 @@ class ProxyTest : public CommonCompilerTest {
mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods();
methods_count += (virtual_methods == nullptr) ? 0 : virtual_methods->GetLength();
}
- jclass javaLangReflectArtMethod =
- soa.AddLocalReference<jclass>(mirror::ArtMethod::GetJavaLangReflectArtMethod());
- jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(methods_count,
- javaLangReflectArtMethod, nullptr);
+ jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(
+ methods_count, soa.AddLocalReference<jclass>(mirror::Method::StaticClass()), nullptr);
soa.Self()->AssertNoPendingException();
- // Fill the method array
- mirror::ArtMethod* equalsMethod = javaLangObject->FindDeclaredVirtualMethod("equals",
- "(Ljava/lang/Object;)Z");
- mirror::ArtMethod* hashCodeMethod = javaLangObject->FindDeclaredVirtualMethod("hashCode",
- "()I");
- mirror::ArtMethod* toStringMethod = javaLangObject->FindDeclaredVirtualMethod("toString",
- "()Ljava/lang/String;");
- CHECK(equalsMethod != nullptr);
- CHECK(hashCodeMethod != nullptr);
- CHECK(toStringMethod != nullptr);
-
jsize array_index = 0;
- // Adds Object methods.
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(equalsMethod));
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(hashCodeMethod));
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(toStringMethod));
-
+ // Fill the method array
+ mirror::ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod(
+ "equals", "(Ljava/lang/Object;)Z");
+ CHECK(method != nullptr);
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), method)));
+ method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I");
+ CHECK(method != nullptr);
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), method)));
+ method = javaLangObject->FindDeclaredVirtualMethod("toString", "()Ljava/lang/String;");
+ CHECK(method != nullptr);
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), method)));
// Now adds all interfaces virtual methods.
for (mirror::Class* interface : interfaces) {
- mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods();
- if (virtual_methods != nullptr) {
- for (int32_t mth_index = 0; mth_index < virtual_methods->GetLength(); ++mth_index) {
- mirror::ArtMethod* method = virtual_methods->Get(mth_index);
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(method));
- }
+ for (int32_t i = 0, count = interface->NumVirtualMethods(); i < count; ++i) {
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), interface->GetVirtualMethod(i))));
}
}
CHECK_EQ(array_index, methods_count);
@@ -96,10 +90,9 @@ class ProxyTest : public CommonCompilerTest {
jobjectArray proxyClassThrows = soa.Env()->NewObjectArray(0, javaLangClass, nullptr);
soa.Self()->AssertNoPendingException();
- mirror::Class* proxyClass = class_linker_->CreateProxyClass(soa,
- soa.Env()->NewStringUTF(className),
- proxyClassInterfaces, jclass_loader,
- proxyClassMethods, proxyClassThrows);
+ mirror::Class* proxyClass = class_linker_->CreateProxyClass(
+ soa, soa.Env()->NewStringUTF(className), proxyClassInterfaces, jclass_loader,
+ proxyClassMethods, proxyClassThrows);
soa.Self()->AssertNoPendingException();
return proxyClass;
}
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index beba64f158..a31d8ac5ba 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -40,7 +40,7 @@ ReferenceTable::~ReferenceTable() {
}
void ReferenceTable::Add(mirror::Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
VerifyObject(obj);
if (entries_.size() >= max_size_) {
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
@@ -79,8 +79,8 @@ static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::
static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
int identical, int equiv)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj == NULL) {
- os << " NULL reference (count=" << equiv << ")\n";
+ if (obj == nullptr) {
+ os << " null reference (count=" << equiv << ")\n";
return;
}
if (Runtime::Current()->IsClearedJniWeakGlobal(obj)) {
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index db98e1fc9d..4ffebf2c5f 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -40,8 +40,8 @@ TEST_F(ReferenceTableTest, Basics) {
EXPECT_EQ(0U, rt.Size());
}
- // Check removal of all NULLs in a empty table is a no-op.
- rt.Remove(NULL);
+ // Check removal of all nullss in a empty table is a no-op.
+ rt.Remove(nullptr);
EXPECT_EQ(0U, rt.Size());
// Check removal of all o1 in a empty table is a no-op.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 3e1315c73e..3099094ed4 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -22,6 +22,7 @@
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils.h"
#include "jni_internal.h"
+#include "mirror/abstract_method.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
@@ -537,7 +538,7 @@ void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg
}
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaMethod,
- jobject javaReceiver, jobject javaArgs, bool accessible) {
+ jobject javaReceiver, jobject javaArgs, size_t num_frames) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -547,7 +548,9 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
return nullptr;
}
- mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
+ auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(javaMethod);
+ const bool accessible = abstract_method->IsAccessible();
+ mirror::ArtMethod* m = abstract_method->GetArtMethod();
mirror::Class* declaring_class = m->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
@@ -572,8 +575,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
}
// Get our arrays of arguments and their types, and check they're the same size.
- mirror::ObjectArray<mirror::Object>* objects =
- soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
+ auto* objects = soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
const DexFile::TypeList* classes = m->GetParameterTypeList();
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
@@ -586,7 +588,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
// If method is not set to be accessible, verify it can be accessed by the caller.
mirror::Class* calling_class = nullptr;
if (!accessible && !VerifyAccess(soa.Self(), receiver, declaring_class, m->GetAccessFlags(),
- &calling_class, 2)) {
+ &calling_class, num_frames)) {
ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s method %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
@@ -613,11 +615,21 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
// Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early.
if (soa.Self()->IsExceptionPending()) {
+ // If we get another exception when we are trying to wrap, then just use that instead.
jthrowable th = soa.Env()->ExceptionOccurred();
- soa.Env()->ExceptionClear();
+ soa.Self()->ClearException();
jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
+ if (exception_class == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
+ CHECK(mid != nullptr);
jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th);
+ if (exception_instance == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
soa.Env()->Throw(reinterpret_cast<jthrowable>(exception_instance));
return nullptr;
}
diff --git a/runtime/reflection.h b/runtime/reflection.h
index c2d406a2db..c63f858129 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -65,8 +65,9 @@ void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg
JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+// num_frames is number of frames we look up for access check.
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver,
- jobject args, bool accessible)
+ jobject args, size_t num_frames = 1)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 7aefdaab5a..a62bc5ea62 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -37,35 +37,35 @@ class ReflectionTest : public CommonCompilerTest {
// Turn on -verbose:jni for the JNI tests.
// gLogVerbosity.jni = true;
- vm_->AttachCurrentThread(&env_, NULL);
+ vm_->AttachCurrentThread(&env_, nullptr);
ScopedLocalRef<jclass> aioobe(env_,
env_->FindClass("java/lang/ArrayIndexOutOfBoundsException"));
- CHECK(aioobe.get() != NULL);
+ CHECK(aioobe.get() != nullptr);
aioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(aioobe.get()));
ScopedLocalRef<jclass> ase(env_, env_->FindClass("java/lang/ArrayStoreException"));
- CHECK(ase.get() != NULL);
+ CHECK(ase.get() != nullptr);
ase_ = reinterpret_cast<jclass>(env_->NewGlobalRef(ase.get()));
ScopedLocalRef<jclass> sioobe(env_,
env_->FindClass("java/lang/StringIndexOutOfBoundsException"));
- CHECK(sioobe.get() != NULL);
+ CHECK(sioobe.get() != nullptr);
sioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(sioobe.get()));
}
void CleanUpJniEnv() {
- if (aioobe_ != NULL) {
+ if (aioobe_ != nullptr) {
env_->DeleteGlobalRef(aioobe_);
- aioobe_ = NULL;
+ aioobe_ = nullptr;
}
- if (ase_ != NULL) {
+ if (ase_ != nullptr) {
env_->DeleteGlobalRef(ase_);
- ase_ = NULL;
+ ase_ = nullptr;
}
- if (sioobe_ != NULL) {
+ if (sioobe_ != nullptr) {
env_->DeleteGlobalRef(sioobe_);
- sioobe_ = NULL;
+ sioobe_ = nullptr;
}
}
@@ -105,7 +105,7 @@ class ReflectionTest : public CommonCompilerTest {
mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
class_loader);
- CHECK(c != NULL);
+ CHECK(c != nullptr);
*method = is_static ? c->FindDirectMethod(method_name, method_signature)
: c->FindVirtualMethod(method_name, method_signature);
@@ -501,10 +501,10 @@ TEST_F(ReflectionTest, StaticMainMethod) {
CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
- ASSERT_TRUE(klass != NULL);
+ ASSERT_TRUE(klass != nullptr);
mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
- ASSERT_TRUE(method != NULL);
+ ASSERT_TRUE(method != nullptr);
// Start runtime.
bool started = runtime_->Start();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 7bebb965f4..48bca62fcf 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -77,6 +77,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field.h"
+#include "mirror/method.h"
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
@@ -288,7 +289,7 @@ struct AbortState {
}
gAborting++;
os << "Runtime aborting...\n";
- if (Runtime::Current() == NULL) {
+ if (Runtime::Current() == nullptr) {
os << "(Runtime does not yet exist!)\n";
return;
}
@@ -349,7 +350,7 @@ void Runtime::Abort() {
MutexLock mu(Thread::Current(), *Locks::abort_lock_);
// Get any pending output out of the way.
- fflush(NULL);
+ fflush(nullptr);
// Many people have difficulty distinguish aborts from crashes,
// so be explicit.
@@ -357,7 +358,7 @@ void Runtime::Abort() {
LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
// Call the abort hook if we have one.
- if (Runtime::Current() != NULL && Runtime::Current()->abort_ != NULL) {
+ if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
LOG(INTERNAL_FATAL) << "Calling abort hook...";
Runtime::Current()->abort_();
// notreached
@@ -385,7 +386,7 @@ void Runtime::PreZygoteFork() {
}
void Runtime::CallExitHook(jint status) {
- if (exit_ != NULL) {
+ if (exit_ != nullptr) {
ScopedThreadStateChange tsc(Thread::Current(), kNative);
exit_(status);
LOG(WARNING) << "Exit hook returned instead of exiting!";
@@ -400,16 +401,16 @@ void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) {
bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
// TODO: acquire a static mutex on Runtime to avoid racing.
- if (Runtime::instance_ != NULL) {
+ if (Runtime::instance_ != nullptr) {
return false;
}
- InitLogging(NULL); // Calls Locks::Init() as a side effect.
+ InitLogging(nullptr); // Calls Locks::Init() as a side effect.
instance_ = new Runtime;
if (!instance_->Init(options, ignore_unrecognized)) {
// TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
// leak memory, instead. Fix the destructor. b/19100793.
// delete instance_;
- instance_ = NULL;
+ instance_ = nullptr;
return false;
}
return true;
@@ -430,7 +431,7 @@ static jobject CreateSystemClassLoader(Runtime* runtime) {
mirror::ArtMethod* getSystemClassLoader =
class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
- CHECK(getSystemClassLoader != NULL);
+ CHECK(getSystemClassLoader != nullptr);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
@@ -446,7 +447,7 @@ static jobject CreateSystemClassLoader(Runtime* runtime) {
ArtField* contextClassLoader =
thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
- CHECK(contextClassLoader != NULL);
+ CHECK(contextClassLoader != nullptr);
// We can't run in a transaction yet.
contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
@@ -589,7 +590,7 @@ bool Runtime::InitZygote() {
// Mark rootfs as being a slave so that changes from default
// namespace only flow into our children.
- if (mount("rootfs", "/", NULL, (MS_SLAVE | MS_REC), NULL) == -1) {
+ if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE";
return false;
}
@@ -598,7 +599,7 @@ bool Runtime::InitZygote() {
// bind mount storage into their respective private namespaces, which
// are isolated from each other.
const char* target_base = getenv("EMULATED_STORAGE_TARGET");
- if (target_base != NULL) {
+ if (target_base != nullptr) {
if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
"uid=0,gid=1028,mode=0751") == -1) {
LOG(WARNING) << "Failed to mount tmpfs to " << target_base;
@@ -676,7 +677,7 @@ void Runtime::StartDaemonThreads() {
static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files,
size_t* failures) {
- DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
std::string system_filename;
bool has_system = false;
std::string cache_filename_unused;
@@ -736,7 +737,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
const std::vector<std::string>& dex_locations,
const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
size_t failure_count = 0;
if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
@@ -869,7 +870,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
// If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
// this case.
// If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
- // nullptr and we don't create the jit.
+ // null and we don't create the jit.
use_jit = false;
}
@@ -1128,26 +1129,26 @@ void Runtime::InitThreadGroups(Thread* self) {
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != nullptr || IsAotCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != nullptr || IsAotCompiler());
}
jobject Runtime::GetMainThreadGroup() const {
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != nullptr || IsAotCompiler());
return main_thread_group_;
}
jobject Runtime::GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != nullptr || IsAotCompiler());
return system_thread_group_;
}
jobject Runtime::GetSystemClassLoader() const {
- CHECK(system_class_loader_ != NULL || IsAotCompiler());
+ CHECK(system_class_loader_ != nullptr || IsAotCompiler());
return system_class_loader_;
}
@@ -1273,12 +1274,12 @@ void Runtime::BlockSignals() {
bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
+ return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
}
void Runtime::DetachCurrentThread() {
Thread* self = Thread::Current();
- if (self == NULL) {
+ if (self == nullptr) {
LOG(FATAL) << "attempting to detach thread that is not attached";
}
if (self->HasManagedStack()) {
@@ -1308,7 +1309,9 @@ void Runtime::VisitConstantRoots(RootVisitor* visitor) {
// need to be visited once per GC since they never change.
mirror::ArtMethod::VisitRoots(visitor);
mirror::Class::VisitRoots(visitor);
+ mirror::Constructor::VisitRoots(visitor);
mirror::Reference::VisitRoots(visitor);
+ mirror::Method::VisitRoots(visitor);
mirror::StackTraceElement::VisitRoots(visitor);
mirror::String::VisitRoots(visitor);
mirror::Throwable::VisitRoots(visitor);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d95640dea6..c35f4ca621 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -248,7 +248,7 @@ class Runtime {
}
InternTable* GetInternTable() const {
- DCHECK(intern_table_ != NULL);
+ DCHECK(intern_table_ != nullptr);
return intern_table_;
}
@@ -328,7 +328,7 @@ class Runtime {
void VisitNonConcurrentRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
+ // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -574,7 +574,7 @@ class Runtime {
void StartDaemonThreads();
void StartSignalCatcher();
- // A pointer to the active runtime or NULL.
+ // A pointer to the active runtime or null.
static Runtime* instance_;
// NOTE: these must match the gc::ProcessState values as they come directly from the framework.
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 35d944f1e6..d65e18e124 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -321,7 +321,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
OsInfo os_info;
const char* cmd_line = GetCmdLine();
- if (cmd_line == NULL) {
+ if (cmd_line == nullptr) {
cmd_line = "<unset>"; // Because no-one called InitLogging.
}
pid_t tid = GetTid();
@@ -353,9 +353,10 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
}
}
- if (getenv("debug_db_uid") != NULL || getenv("art_wait_for_gdb_on_crash") != NULL) {
+ if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
LOG(INTERNAL_FATAL) << "********************************************************\n"
- << "* Process " << getpid() << " thread " << tid << " \"" << thread_name << "\""
+ << "* Process " << getpid() << " thread " << tid << " \"" << thread_name
+ << "\""
<< " has been suspended while crashing.\n"
<< "* Attach gdb:\n"
<< "* gdb -p " << tid << "\n"
@@ -370,7 +371,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
memset(&action, 0, sizeof(action));
sigemptyset(&action.sa_mask);
action.sa_handler = SIG_DFL;
- sigaction(signal_number, &action, NULL);
+ sigaction(signal_number, &action, nullptr);
// ...and re-raise so we die with the appropriate status.
kill(getpid(), signal_number);
#else
@@ -390,19 +391,19 @@ void Runtime::InitPlatformSignalHandlers() {
action.sa_flags |= SA_ONSTACK;
int rc = 0;
- rc += sigaction(SIGABRT, &action, NULL);
- rc += sigaction(SIGBUS, &action, NULL);
- rc += sigaction(SIGFPE, &action, NULL);
- rc += sigaction(SIGILL, &action, NULL);
- rc += sigaction(SIGPIPE, &action, NULL);
- rc += sigaction(SIGSEGV, &action, NULL);
+ rc += sigaction(SIGABRT, &action, nullptr);
+ rc += sigaction(SIGBUS, &action, nullptr);
+ rc += sigaction(SIGFPE, &action, nullptr);
+ rc += sigaction(SIGILL, &action, nullptr);
+ rc += sigaction(SIGPIPE, &action, nullptr);
+ rc += sigaction(SIGSEGV, &action, nullptr);
#if defined(SIGSTKFLT)
- rc += sigaction(SIGSTKFLT, &action, NULL);
+ rc += sigaction(SIGSTKFLT, &action, nullptr);
#endif
- rc += sigaction(SIGTRAP, &action, NULL);
+ rc += sigaction(SIGTRAP, &action, nullptr);
// Special dump-all timeout.
if (GetTimeoutSignal() != -1) {
- rc += sigaction(GetTimeoutSignal(), &action, NULL);
+ rc += sigaction(GetTimeoutSignal(), &action, nullptr);
}
CHECK_EQ(rc, 0);
}
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 11b7df61f8..b93fcb4322 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -36,11 +36,11 @@ class ScopedThreadStateChange {
ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
- if (UNLIKELY(self_ == NULL)) {
- // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
+ if (UNLIKELY(self_ == nullptr)) {
+ // Value chosen arbitrarily and won't be used in the destructor since thread_ == null.
old_thread_state_ = kTerminated;
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
+ CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
} else {
DCHECK_EQ(self, Thread::Current());
// Read state without locks, ok as state is effectively thread local and we're not interested
@@ -60,10 +60,10 @@ class ScopedThreadStateChange {
}
~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
- if (UNLIKELY(self_ == NULL)) {
+ if (UNLIKELY(self_ == nullptr)) {
if (!expected_has_no_thread_) {
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown(nullptr);
+ bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDown(nullptr);
CHECK(shutting_down);
}
} else {
@@ -87,7 +87,7 @@ class ScopedThreadStateChange {
protected:
// Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
ScopedThreadStateChange()
- : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
+ : self_(nullptr), thread_state_(kTerminated), old_thread_state_(kTerminated),
expected_has_no_thread_(true) {}
Thread* const self_;
@@ -124,7 +124,7 @@ class ScopedObjectAccessAlreadyRunnable {
* Add a local reference for an object to the indirect reference table associated with the
* current stack frame. When the native function returns, the reference will be discarded.
*
- * We need to allow the same reference to be added multiple times, and cope with NULL.
+ * We need to allow the same reference to be added multiple times, and cope with nullptr.
*
* This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
* it's best if we don't grab a mutex.
@@ -133,8 +133,8 @@ class ScopedObjectAccessAlreadyRunnable {
T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- if (obj == NULL) {
- return NULL;
+ if (obj == nullptr) {
+ return nullptr;
}
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
return Env()->AddLocalReference<T>(obj);
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 26bf655ca9..863d59bd66 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -53,7 +53,7 @@ static void DumpCmdLine(std::ostream& os) {
os << "Cmd line: " << current_cmd_line << "\n";
const char* stashed_cmd_line = GetCmdLine();
- if (stashed_cmd_line != NULL && current_cmd_line != stashed_cmd_line
+ if (stashed_cmd_line != nullptr && current_cmd_line != stashed_cmd_line
&& strcmp(stashed_cmd_line, "<unset>") != 0) {
os << "Original command line: " << stashed_cmd_line << "\n";
}
@@ -67,15 +67,15 @@ SignalCatcher::SignalCatcher(const std::string& stack_trace_file)
: stack_trace_file_(stack_trace_file),
lock_("SignalCatcher lock"),
cond_("SignalCatcher::cond_", lock_),
- thread_(NULL) {
+ thread_(nullptr) {
SetHaltFlag(false);
// Create a raw pthread; its start routine will attach to the runtime.
- CHECK_PTHREAD_CALL(pthread_create, (&pthread_, NULL, &Run, this), "signal catcher thread");
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread_, nullptr, &Run, this), "signal catcher thread");
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- while (thread_ == NULL) {
+ while (thread_ == nullptr) {
cond_.Wait(self);
}
}
@@ -85,7 +85,7 @@ SignalCatcher::~SignalCatcher() {
// to arrive, send it one.
SetHaltFlag(true);
CHECK_PTHREAD_CALL(pthread_kill, (pthread_, SIGQUIT), "signal catcher shutdown");
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "signal catcher shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "signal catcher shutdown");
}
void SignalCatcher::SetHaltFlag(bool new_value) {
@@ -176,7 +176,7 @@ int SignalCatcher::WaitForSignal(Thread* self, SignalSet& signals) {
void* SignalCatcher::Run(void* arg) {
SignalCatcher* signal_catcher = reinterpret_cast<SignalCatcher*>(arg);
- CHECK(signal_catcher != NULL);
+ CHECK(signal_catcher != nullptr);
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
@@ -199,7 +199,7 @@ void* SignalCatcher::Run(void* arg) {
int signal_number = signal_catcher->WaitForSignal(self, signals);
if (signal_catcher->ShouldHalt()) {
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
switch (signal_number) {
diff --git a/runtime/signal_set.h b/runtime/signal_set.h
index 3b89e6e377..c272514f61 100644
--- a/runtime/signal_set.h
+++ b/runtime/signal_set.h
@@ -38,7 +38,7 @@ class SignalSet {
}
void Block() {
- if (sigprocmask(SIG_BLOCK, &set_, NULL) == -1) {
+ if (sigprocmask(SIG_BLOCK, &set_, nullptr) == -1) {
PLOG(FATAL) << "sigprocmask failed";
}
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 4ae49ddd7c..e49bc1d78f 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -36,12 +36,12 @@ namespace art {
mirror::Object* ShadowFrame::GetThisObject() const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
- return NULL;
+ return nullptr;
} else if (m->IsNative()) {
return GetVRegReference(0);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != NULL) << PrettyMethod(m);
+ CHECK(code_item != nullptr) << PrettyMethod(m);
uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
return GetVRegReference(reg);
}
@@ -50,7 +50,7 @@ mirror::Object* ShadowFrame::GetThisObject() const {
mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
- return NULL;
+ return nullptr;
} else {
return GetVRegReference(NumberOfVRegs() - num_ins);
}
@@ -58,9 +58,9 @@ mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
size_t ManagedStack::NumJniShadowFrameReferences() const {
size_t count = 0;
- for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
current_frame = current_frame->GetLink()) {
if (current_frame->GetMethod()->IsNative()) {
// The JNI ShadowFrame only contains references. (For indirect reference.)
@@ -72,9 +72,9 @@ size_t ManagedStack::NumJniShadowFrameReferences() const {
}
bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
- for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
current_frame = current_frame->GetLink()) {
if (current_frame->Contains(shadow_frame_entry)) {
return true;
@@ -85,23 +85,23 @@ bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_fr
}
StackVisitor::StackVisitor(Thread* thread, Context* context)
- : thread_(thread), cur_shadow_frame_(NULL),
- cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
+ : thread_(thread), cur_shadow_frame_(nullptr),
+ cur_quick_frame_(nullptr), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
context_(context) {
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
StackVisitor::StackVisitor(Thread* thread, Context* context, size_t num_frames)
- : thread_(thread), cur_shadow_frame_(NULL),
- cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
+ : thread_(thread), cur_shadow_frame_(nullptr),
+ cur_quick_frame_(nullptr), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
context_(context) {
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
- if (cur_shadow_frame_ != NULL) {
+ if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetDexPC();
- } else if (cur_quick_frame_ != NULL) {
+ } else if (cur_quick_frame_ != nullptr) {
return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
} else {
return 0;
@@ -183,7 +183,7 @@ bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRe
return GetRegisterIfAccessible(reg, kind, val);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
*val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
@@ -199,7 +199,7 @@ bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
DCHECK_LT(vreg, code_item->registers_size_);
uint16_t number_of_dex_registers = code_item->registers_size_;
@@ -297,7 +297,7 @@ bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -345,7 +345,7 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
if (m->IsOptimized(sizeof(void*))) {
- return SetVRegFromOptimizedCode(m, vreg, new_value, kind);
+ return false;
} else {
return SetVRegFromQuickCode(m, vreg, new_value, kind);
}
@@ -372,7 +372,7 @@ bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uin
return SetRegisterIfAccessible(reg, new_value, kind);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -382,57 +382,6 @@ bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uin
}
}
-bool StackVisitor::SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
- VRegKind kind) {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- uint32_t native_pc_offset = m->NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = m->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
- // its instructions?
- uint16_t number_of_dex_registers = code_item->registers_size_;
- DCHECK_LT(vreg, number_of_dex_registers);
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- DexRegisterLocation::Kind location_kind =
- dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
- uint32_t dex_pc = m->ToDexPc(cur_quick_frame_pc_, false);
- switch (location_kind) {
- case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset =
- dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers, code_info);
- uint8_t* addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + offset;
- *reinterpret_cast<uint32_t*>(addr) = new_value;
- return true;
- }
- case DexRegisterLocation::Kind::kInRegister:
- case DexRegisterLocation::Kind::kInFpuRegister: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
- return SetRegisterIfAccessible(reg, new_value, kind);
- }
- case DexRegisterLocation::Kind::kConstant:
- LOG(ERROR) << StringPrintf("Cannot change value of DEX register v%u used as a constant at "
- "DEX pc 0x%x (native pc 0x%x) of method %s",
- vreg, dex_pc, native_pc_offset,
- PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
- return false;
- case DexRegisterLocation::Kind::kNone:
- LOG(ERROR) << StringPrintf("No location for DEX register v%u at DEX pc 0x%x "
- "(native pc 0x%x) of method %s",
- vreg, dex_pc, native_pc_offset,
- PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
- return false;
- default:
- LOG(FATAL) << StringPrintf("Unknown location for DEX register v%u at DEX pc 0x%x "
- "(native pc 0x%x) of method %s",
- vreg, dex_pc, native_pc_offset,
- PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
- UNREACHABLE();
- }
-}
-
bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) {
const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
if (!IsAccessibleRegister(reg, is_float)) {
@@ -477,7 +426,7 @@ bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
if (m->IsOptimized(sizeof(void*))) {
- return SetVRegPairFromOptimizedCode(m, vreg, new_value, kind_lo, kind_hi);
+ return false;
} else {
return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
}
@@ -488,8 +437,8 @@ bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new
}
}
-bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi) {
+bool StackVisitor::SetVRegPairFromQuickCode(
+ mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
@@ -505,7 +454,7 @@ bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
return SetRegisterPairIfAccessible(reg_lo, reg_hi, new_value, is_float);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -515,15 +464,6 @@ bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
}
}
-bool StackVisitor::SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi) {
- uint32_t low_32bits = Low32Bits(new_value);
- uint32_t high_32bits = High32Bits(new_value);
- bool success = SetVRegFromOptimizedCode(m, vreg, low_32bits, kind_lo);
- success &= SetVRegFromOptimizedCode(m, vreg + 1, high_32bits, kind_hi);
- return success;
-}
-
bool StackVisitor::SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
uint64_t new_value, bool is_float) {
if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
@@ -585,14 +525,14 @@ void StackVisitor::SetFPR(uint32_t reg, uintptr_t value) {
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
- DCHECK(sp != NULL);
+ DCHECK(sp != nullptr);
uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
- CHECK(sp != NULL);
+ CHECK(sp != nullptr);
uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -600,7 +540,7 @@ void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
size_t StackVisitor::ComputeNumFrames(Thread* thread) {
struct NumFramesVisitor : public StackVisitor {
explicit NumFramesVisitor(Thread* thread_in)
- : StackVisitor(thread_in, NULL), frames(0) {}
+ : StackVisitor(thread_in, nullptr), frames(0) {}
bool VisitFrame() OVERRIDE {
frames++;
@@ -652,7 +592,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32
void StackVisitor::DescribeStack(Thread* thread) {
struct DescribeStackVisitor : public StackVisitor {
explicit DescribeStackVisitor(Thread* thread_in)
- : StackVisitor(thread_in, NULL) {}
+ : StackVisitor(thread_in, nullptr) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
@@ -666,7 +606,7 @@ void StackVisitor::DescribeStack(Thread* thread) {
std::string StackVisitor::DescribeLocation() const {
std::string result("Visiting method '");
mirror::ArtMethod* m = GetMethod();
- if (m == NULL) {
+ if (m == nullptr) {
return "upcall";
}
result += PrettyMethod(m);
@@ -713,24 +653,24 @@ void StackVisitor::WalkStack(bool include_transitions) {
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
- for (const ManagedStack* current_fragment = thread_->GetManagedStack(); current_fragment != NULL;
- current_fragment = current_fragment->GetLink()) {
+ for (const ManagedStack* current_fragment = thread_->GetManagedStack();
+ current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
cur_quick_frame_ = current_fragment->GetTopQuickFrame();
cur_quick_frame_pc_ = 0;
- if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
+ if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
- DCHECK(current_fragment->GetTopShadowFrame() == NULL);
+ DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
- while (method != NULL) {
+ while (method != nullptr) {
SanityCheckFrame();
bool should_continue = VisitFrame();
if (UNLIKELY(!should_continue)) {
return;
}
- if (context_ != NULL) {
+ if (context_ != nullptr) {
context_->FillCalleeSaves(*this);
}
size_t frame_size = method->GetFrameSizeInBytes();
@@ -748,7 +688,8 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
// Skip runtime save all callee frames which are used to deliver exceptions.
} else if (instrumentation_frame.interpreter_entry_) {
- mirror::ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ mirror::ArtMethod* callee =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
} else if (instrumentation_frame.method_ != GetMethod()) {
@@ -771,7 +712,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_depth_++;
method = cur_quick_frame_->AsMirrorPtr();
}
- } else if (cur_shadow_frame_ != NULL) {
+ } else if (cur_shadow_frame_ != nullptr) {
do {
SanityCheckFrame();
bool should_continue = VisitFrame();
@@ -780,7 +721,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
cur_depth_++;
cur_shadow_frame_ = cur_shadow_frame_->GetLink();
- } while (cur_shadow_frame_ != NULL);
+ } while (cur_shadow_frame_ != nullptr);
}
if (include_transitions) {
bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index fbb0aa4c0f..e2af5eefd2 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -295,11 +295,12 @@ class ShadowFrame {
}
StackReference<mirror::Object>* References() {
- return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
+ return const_cast<StackReference<mirror::Object>*>(
+ const_cast<const ShadowFrame*>(this)->References());
}
const uint32_t number_of_vregs_;
- // Link to previous shadow frame or NULL.
+ // Link to previous shadow frame or null.
ShadowFrame* link_;
mirror::ArtMethod* method_;
uint32_t dex_pc_;
@@ -571,7 +572,8 @@ class StackVisitor {
* Special temporaries may have custom locations and the logic above deals with that.
* However, non-special temporaries are placed relative to the outs.
*/
- int temps_start = sizeof(StackReference<mirror::ArtMethod>) + code_item->outs_size_ * sizeof(uint32_t);
+ int temps_start = sizeof(StackReference<mirror::ArtMethod>) +
+ code_item->outs_size_ * sizeof(uint32_t);
int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
return temps_start + relative_offset;
} else if (reg < num_regs) {
@@ -666,18 +668,12 @@ class StackVisitor {
bool SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
- VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
bool is_float)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 16add796c1..f7ef8942e6 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -35,10 +35,10 @@ static inline Thread* ThreadForEnv(JNIEnv* env) {
}
inline Thread* Thread::Current() {
- // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
+ // We rely on Thread::Current returning null for a detached thread, so it's not obvious
// that we can replace this with a direct %fs access on x86.
if (!is_started_) {
- return NULL;
+ return nullptr;
} else {
void* thread = pthread_getspecific(Thread::pthread_key_self_);
return reinterpret_cast<Thread*>(thread);
@@ -92,7 +92,7 @@ inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
// We expect no locks except the mutator_lock_ or thread list suspend thread lock.
if (i != kMutatorLock) {
BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
+ if (held_mutex != nullptr) {
LOG(ERROR) << "holding \"" << held_mutex->GetName()
<< "\" at point where thread suspension is expected";
bad_mutexes_held = true;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 5ca51fbdd8..b27ad4ae3e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -588,7 +588,8 @@ void Thread::Dump(std::ostream& os) const {
mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
+ return (tlsPtr_.opeer != nullptr) ?
+ reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
}
void Thread::GetThreadName(std::string& name) const {
@@ -713,9 +714,8 @@ bool Thread::RequestCheckpoint(Closure* function) {
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
- bool success =
- tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
- new_state_and_flags.as_int);
+ bool success =tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+ old_state_and_flags.as_int, new_state_and_flags.as_int);
if (UNLIKELY(!success)) {
// The thread changed state before the checkpoint was installed.
CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
@@ -1005,8 +1005,8 @@ static bool ShouldShowNativeStack(const Thread* thread)
// Threads with no managed stack frames should be shown.
const ManagedStack* managed_stack = thread->GetManagedStack();
- if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL &&
- managed_stack->GetTopShadowFrame() == NULL)) {
+ if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
+ managed_stack->GetTopShadowFrame() == nullptr)) {
return true;
}
@@ -1097,7 +1097,7 @@ void Thread::Startup() {
{
// MutexLock to keep annotalysis happy.
//
- // Note we use nullptr for the thread because Thread::Current can
+ // Note we use null for the thread because Thread::Current can
// return garbage since (is_started_ == true) and
// Thread::pthread_key_self_ is not yet initialized.
// This was seen on glibc.
@@ -1162,7 +1162,7 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte
bool Thread::IsStillStarting() const {
// You might think you can check whether the state is kStarting, but for much of thread startup,
// the thread is in kNative; it might also be in kVmWait.
- // You might think you can check whether the peer is nullptr, but the peer is actually created and
+ // You might think you can check whether the peer is null, but the peer is actually created and
// assigned fairly early on, and needs to be.
// It turns out that the last thing to change is the thread name; that's a good proxy for "has
// this thread _ever_ entered kRunnable".
@@ -1171,9 +1171,14 @@ bool Thread::IsStillStarting() const {
}
void Thread::AssertPendingException() const {
- if (UNLIKELY(!IsExceptionPending())) {
- LOG(FATAL) << "Pending exception expected.";
- }
+ CHECK(IsExceptionPending()) << "Pending exception expected.";
+}
+
+void Thread::AssertPendingOOMException() const {
+ AssertPendingException();
+ auto* e = GetException();
+ CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
+ << e->Dump();
}
void Thread::AssertNoPendingException() const {
@@ -1424,7 +1429,7 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
DCHECK_EQ(kind, kWeakGlobal);
result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
- // This is a special case where it's okay to return nullptr.
+ // This is a special case where it's okay to return null.
expect_null = true;
result = nullptr;
}
@@ -2197,7 +2202,7 @@ class ReferenceMapVisitor : public StackVisitor {
const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
- // Can't be nullptr or how would we compile its instructions?
+ // Can't be null or how would we compile its instructions?
DCHECK(code_item != nullptr) << PrettyMethod(m);
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = std::min(map.RegWidth() * 8,
diff --git a/runtime/thread.h b/runtime/thread.h
index b095e22163..35b785df63 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -25,6 +25,7 @@
#include <setjmp.h>
#include <string>
+#include "arch/context.h"
#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/macros.h"
@@ -185,7 +186,7 @@ class Thread {
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
+ // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
// case we use 'tid' to identify the thread, and we'll include as much information as we can.
static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
@@ -245,7 +246,7 @@ class Thread {
// Once called thread suspension will cause an assertion failure.
const char* StartAssertNoThreadSuspension(const char* cause) {
if (kIsDebugBuild) {
- CHECK(cause != NULL);
+ CHECK(cause != nullptr);
const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
tls32_.no_thread_suspension++;
tlsPtr_.last_no_thread_suspension_cause = cause;
@@ -297,7 +298,7 @@ class Thread {
return tls32_.tid;
}
- // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
+ // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -335,12 +336,13 @@ class Thread {
}
void AssertPendingException() const;
+ void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
void SetException(mirror::Throwable* new_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(new_exception != NULL);
+ CHECK(new_exception != nullptr);
// TODO: DCHECK(!IsExceptionPending());
tlsPtr_.exception = new_exception;
}
@@ -354,7 +356,18 @@ class Thread {
Context* GetLongJumpContext();
void ReleaseLongJumpContext(Context* context) {
- DCHECK(tlsPtr_.long_jump_context == nullptr);
+ if (tlsPtr_.long_jump_context != nullptr) {
+ // Each QuickExceptionHandler gets a long jump context and uses
+ // it for doing the long jump, after finding catch blocks/doing deoptimization.
+ // Both finding catch blocks and deoptimization can trigger another
+ // exception such as a result of class loading. So there can be nested
+ // cases of exception handling and multiple contexts being used.
+ // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
+ // for reuse so there is no need to always allocate a new one each time when
+ // getting a context. Since we only keep one context for reuse, delete the
+ // existing one since the passed in context is yet to be used for longjump.
+ delete tlsPtr_.long_jump_context;
+ }
tlsPtr_.long_jump_context = context;
}
@@ -381,11 +394,11 @@ class Thread {
(tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
}
- // If 'msg' is NULL, no detail message is set.
+ // If 'msg' is null, no detail message is set.
void ThrowNewException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
+ // If 'msg' is null, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -477,8 +490,8 @@ class Thread {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
- // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
- // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
+ // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
+ // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
// with the number of valid frames in the returned array.
static jobjectArray InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
@@ -1085,7 +1098,7 @@ class Thread {
// The biased card table, see CardTable for details.
uint8_t* card_table;
- // The pending exception or NULL.
+ // The pending exception or null.
mirror::Throwable* exception;
// The end of this thread's stack. This is the lowest safely-addressable address on the stack.
@@ -1121,13 +1134,13 @@ class Thread {
// Pointer to previous stack trace captured by sampling profiler.
std::vector<mirror::ArtMethod*>* stack_trace_sample;
- // The next thread in the wait set this thread is part of or NULL if not waiting.
+ // The next thread in the wait set this thread is part of or null if not waiting.
Thread* wait_next;
// If we're blocked in MonitorEnter, this is the object we're trying to lock.
mirror::Object* monitor_enter_object;
- // Top of linked list of handle scopes or nullptr for none.
+ // Top of linked list of handle scopes or null for none.
HandleScope* top_handle_scope;
// Needed to get the right ClassLoader in JNI_OnLoad, but also
@@ -1162,7 +1175,7 @@ class Thread {
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
- // Pending checkpoint function or NULL if non-pending. Installation guarding by
+ // Pending checkpoint function or null if non-pending. Installation guarding by
// Locks::thread_suspend_count_lock_.
Closure* checkpoint_functions[kMaxCheckpoints];
@@ -1203,7 +1216,7 @@ class Thread {
// Condition variable waited upon during a wait.
ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
- // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
+ // Pointer to the monitor lock we're currently waiting on or null if not waiting.
Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
// Thread "interrupted" status; stays raised until queried or thrown.
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index 0284364de7..0526f49913 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -50,26 +50,26 @@ void Thread::SetUpAlternateSignalStack() {
ss.ss_sp = new uint8_t[kHostAltSigStackSize];
ss.ss_size = kHostAltSigStackSize;
ss.ss_flags = 0;
- CHECK(ss.ss_sp != NULL);
- SigAltStack(&ss, NULL);
+ CHECK(ss.ss_sp != nullptr);
+ SigAltStack(&ss, nullptr);
// Double-check that it worked.
- ss.ss_sp = NULL;
- SigAltStack(NULL, &ss);
+ ss.ss_sp = nullptr;
+ SigAltStack(nullptr, &ss);
VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp;
}
void Thread::TearDownAlternateSignalStack() {
// Get the pointer so we can free the memory.
stack_t ss;
- SigAltStack(NULL, &ss);
+ SigAltStack(nullptr, &ss);
uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp);
// Tell the kernel to stop using it.
- ss.ss_sp = NULL;
+ ss.ss_sp = nullptr;
ss.ss_flags = SS_DISABLE;
ss.ss_size = kHostAltSigStackSize; // Avoid ENOMEM failure with Mac OS' buggy libc.
- SigAltStack(&ss, NULL);
+ SigAltStack(&ss, nullptr);
// Free it.
delete[] allocated_signal_stack;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 560bcc1a1d..cc54bbdae0 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -116,9 +116,9 @@ void ThreadList::DumpForSigQuit(std::ostream& os) {
}
static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_ANALYSIS {
- // TODO: No thread safety analysis as DumpState with a NULL thread won't access fields, should
+ // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
// refactor DumpState to avoid skipping analysis.
- Thread::DumpState(os, NULL, tid);
+ Thread::DumpState(os, nullptr, tid);
DumpKernelStack(os, tid, " kernel: ", false);
// TODO: Reenable this when the native code in system_server can handle it.
// Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
@@ -136,7 +136,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) {
Thread* self = Thread::Current();
dirent* e;
- while ((e = readdir(d)) != NULL) {
+ while ((e = readdir(d)) != nullptr) {
char* end;
pid_t tid = strtol(e->d_name, &end, 10);
if (!*end) {
@@ -602,7 +602,7 @@ static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const
scoped_name_string(env, (jstring)env->GetObjectField(peer,
WellKnownClasses::java_lang_Thread_name));
ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
- if (scoped_name_chars.c_str() == NULL) {
+ if (scoped_name_chars.c_str() == nullptr) {
LOG(severity) << message << ": " << peer;
env->ExceptionClear();
} else {
@@ -813,7 +813,7 @@ Thread* ThreadList::FindThreadByThreadId(uint32_t thin_lock_id) {
return thread;
}
}
- return NULL;
+ return nullptr;
}
void ThreadList::SuspendAllForDebugger() {
@@ -865,7 +865,7 @@ void ThreadList::SuspendSelfForDebugger() {
// The debugger thread must not suspend itself due to debugger activity!
Thread* debug_thread = Dbg::GetDebugThread();
- CHECK(debug_thread != NULL);
+ CHECK(debug_thread != nullptr);
CHECK(self != debug_thread);
CHECK_NE(self->GetState(), kRunnable);
Locks::mutator_lock_->AssertNotHeld(self);
@@ -1142,7 +1142,7 @@ void ThreadList::Unregister(Thread* self) {
// Clear the TLS data, so that the underlying native thread is recognizably detached.
// (It may wish to reattach later.)
- CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self");
+ CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
// Signal that a thread just detached.
MutexLock mu(nullptr, *Locks::thread_list_lock_);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index fa747b86d6..0f094cc08a 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,7 +68,7 @@ class ThreadList {
// Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
- // else NULL. The peer is used to identify the thread to avoid races with the thread terminating.
+ // else null. The peer is used to identify the thread to avoid races with the thread terminating.
// If the thread should be suspended then value of request_suspension should be true otherwise
// the routine will wait for a previous suspend request. If the suspension times out then *timeout
// is set to true.
@@ -79,7 +79,7 @@ class ThreadList {
Locks::thread_suspend_count_lock_);
// Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
- // thread on success else NULL. The thread id is used to identify the thread to avoid races with
+ // thread on success else null. The thread id is used to identify the thread to avoid races with
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
@@ -164,7 +164,7 @@ class ThreadList {
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
- void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = NULL)
+ void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 2a82285bbe..ce76eae95a 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -158,7 +158,7 @@ Task* ThreadPool::GetTask(Thread* self) {
--waiting_count_;
}
- // We are shutting down, return nullptr to tell the worker thread to stop looping.
+ // We are shutting down, return null to tell the worker thread to stop looping.
return nullptr;
}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 79b57afedd..0557708fd9 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -112,7 +112,7 @@ class ThreadPool {
// get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self);
- // Try to get a task, returning NULL if there is none available.
+ // Try to get a task, returning null if there is none available.
Task* TryGetTask(Thread* self);
Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
@@ -166,7 +166,7 @@ class WorkStealingWorker : public ThreadPoolWorker {
virtual ~WorkStealingWorker();
bool IsRunningTask() const {
- return task_ != NULL;
+ return task_ != nullptr;
}
protected:
diff --git a/runtime/trace.h b/runtime/trace.h
index b8329ff5c5..1ecd4d8fff 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -172,7 +172,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
void WriteToBuf(const uint8_t* src, size_t src_size)
EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
- // Singleton instance of the Trace or NULL when no method tracing is active.
+ // Singleton instance of the Trace or null when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
// The default profiler clock source.
@@ -184,7 +184,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
// Used to remember an unused stack trace to avoid re-allocation during sampling.
static std::unique_ptr<std::vector<mirror::ArtMethod*>> temp_stack_trace_;
- // File to write trace data out to, NULL if direct to ddms.
+ // File to write trace data out to, null if direct to ddms.
std::unique_ptr<File> trace_file_;
// Buffer to store trace data.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index a303aa4155..ec7131dd2c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -60,7 +60,7 @@ static constexpr bool kUseAddr2line = !kIsTargetBuild;
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (NULL, &owner), __FUNCTION__); // Requires Mac OS 10.6
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (nullptr, &owner), __FUNCTION__); // Requires Mac OS 10.6
return owner;
#elif defined(__BIONIC__)
return gettid();
@@ -205,7 +205,7 @@ bool PrintFileToLog(const std::string& file_name, LogSeverity level) {
}
std::string GetIsoDate() {
- time_t now = time(NULL);
+ time_t now = time(nullptr);
tm tmbuf;
tm* ptm = localtime_r(&now, &tmbuf);
return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d",
@@ -220,7 +220,7 @@ uint64_t MilliTime() {
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
#endif
}
@@ -232,7 +232,7 @@ uint64_t MicroTime() {
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
#endif
}
@@ -244,7 +244,7 @@ uint64_t NanoTime() {
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
#endif
}
@@ -264,7 +264,7 @@ void NanoSleep(uint64_t ns) {
timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = ns;
- nanosleep(&tm, NULL);
+ nanosleep(&tm, nullptr);
}
void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
@@ -276,7 +276,7 @@ void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts
#else
UNUSED(clock);
timeval tv;
- gettimeofday(&tv, NULL);
+ gettimeofday(&tv, nullptr);
ts->tv_sec = tv.tv_sec;
ts->tv_nsec = tv.tv_usec * 1000;
#endif
@@ -301,14 +301,14 @@ void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts
}
std::string PrettyDescriptor(mirror::String* java_descriptor) {
- if (java_descriptor == NULL) {
+ if (java_descriptor == nullptr) {
return "null";
}
return PrettyDescriptor(java_descriptor->ToModifiedUtf8().c_str());
}
std::string PrettyDescriptor(mirror::Class* klass) {
- if (klass == NULL) {
+ if (klass == nullptr) {
return "null";
}
std::string temp;
@@ -365,7 +365,7 @@ std::string PrettyDescriptor(const char* descriptor) {
}
std::string PrettyField(ArtField* f, bool with_type) {
- if (f == NULL) {
+ if (f == nullptr) {
return "null";
}
std::string result;
@@ -436,7 +436,7 @@ std::string PrettyArguments(const char* signature) {
std::string PrettyReturnType(const char* signature) {
const char* return_type = strchr(signature, ')');
- CHECK(return_type != NULL);
+ CHECK(return_type != nullptr);
++return_type; // Skip ')'.
return PrettyDescriptor(return_type);
}
@@ -484,10 +484,10 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with
}
std::string PrettyTypeOf(mirror::Object* obj) {
- if (obj == NULL) {
+ if (obj == nullptr) {
return "null";
}
- if (obj->GetClass() == NULL) {
+ if (obj->GetClass() == nullptr) {
return "(raw)";
}
std::string temp;
@@ -499,7 +499,7 @@ std::string PrettyTypeOf(mirror::Object* obj) {
}
std::string PrettyClass(mirror::Class* c) {
- if (c == NULL) {
+ if (c == nullptr) {
return "null";
}
std::string result;
@@ -510,7 +510,7 @@ std::string PrettyClass(mirror::Class* c) {
}
std::string PrettyClassAndClassLoader(mirror::Class* c) {
- if (c == NULL) {
+ if (c == nullptr) {
return "null";
}
std::string result;
@@ -1158,9 +1158,9 @@ void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu)
std::vector<std::string> fields;
Split(stats, ' ', &fields);
*state = fields[0][0];
- *utime = strtoull(fields[11].c_str(), NULL, 10);
- *stime = strtoull(fields[12].c_str(), NULL, 10);
- *task_cpu = strtoull(fields[36].c_str(), NULL, 10);
+ *utime = strtoull(fields[11].c_str(), nullptr, 10);
+ *stime = strtoull(fields[12].c_str(), nullptr, 10);
+ *task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
}
std::string GetSchedulerGroupName(pid_t tid) {
@@ -1358,7 +1358,7 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
// into "futex_wait_queue_me+0xcd/0x110".
const char* text = kernel_stack_frames[i].c_str();
const char* close_bracket = strchr(text, ']');
- if (close_bracket != NULL) {
+ if (close_bracket != nullptr) {
text = close_bracket + 2;
}
os << prefix;
@@ -1373,7 +1373,7 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
const char* GetAndroidRoot() {
const char* android_root = getenv("ANDROID_ROOT");
- if (android_root == NULL) {
+ if (android_root == nullptr) {
if (OS::DirectoryExists("/system")) {
android_root = "/system";
} else {
@@ -1401,7 +1401,7 @@ const char* GetAndroidData() {
const char* GetAndroidDataSafe(std::string* error_msg) {
const char* android_data = getenv("ANDROID_DATA");
- if (android_data == NULL) {
+ if (android_data == nullptr) {
if (OS::DirectoryExists("/data")) {
android_data = "/data";
} else {
@@ -1563,7 +1563,7 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
CHECK(arg_str != nullptr) << i;
args.push_back(arg_str);
}
- args.push_back(NULL);
+ args.push_back(nullptr);
// fork and exec
pid_t pid = fork();
diff --git a/runtime/utils.h b/runtime/utils.h
index 6708c673e6..853fa08251 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -508,7 +508,7 @@ const char* GetAndroidRoot();
// Find $ANDROID_DATA, /data, or abort.
const char* GetAndroidData();
-// Find $ANDROID_DATA, /data, or return nullptr.
+// Find $ANDROID_DATA, /data, or return null.
const char* GetAndroidDataSafe(std::string* error_msg);
// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 6ccbd131b1..d8f8950fbd 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -106,7 +106,7 @@ TEST_F(UtilsTest, PrettyReturnType) {
TEST_F(UtilsTest, PrettyTypeOf) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyTypeOf(NULL));
+ EXPECT_EQ("null", PrettyTypeOf(nullptr));
StackHandleScope<2> hs(soa.Self());
Handle<mirror::String> s(hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "")));
@@ -116,7 +116,7 @@ TEST_F(UtilsTest, PrettyTypeOf) {
EXPECT_EQ("short[]", PrettyTypeOf(a.Get()));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.String[]", PrettyTypeOf(o));
EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyTypeOf(o->GetClass()));
@@ -124,25 +124,25 @@ TEST_F(UtilsTest, PrettyTypeOf) {
TEST_F(UtilsTest, PrettyClass) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyClass(NULL));
+ EXPECT_EQ("null", PrettyClass(nullptr));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyClass(o->GetClass()));
}
TEST_F(UtilsTest, PrettyClassAndClassLoader) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyClassAndClassLoader(NULL));
+ EXPECT_EQ("null", PrettyClassAndClassLoader(nullptr));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.Class<java.lang.String[],null>", PrettyClassAndClassLoader(o->GetClass()));
}
TEST_F(UtilsTest, PrettyField) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyField(NULL));
+ EXPECT_EQ("null", PrettyField(nullptr));
mirror::Class* java_lang_String = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/String;");
@@ -216,21 +216,21 @@ TEST_F(UtilsTest, MangleForJni) {
TEST_F(UtilsTest, JniShortName_JniLongName) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::ArtMethod* m;
m = c->FindVirtualMethod("charAt", "(I)C");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m));
m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m));
m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m));
}
@@ -384,7 +384,8 @@ TEST_F(UtilsTest, GetSystemImageFilename) {
TEST_F(UtilsTest, ExecSuccess) {
std::vector<std::string> command;
if (kIsTargetBuild) {
- command.push_back("/system/bin/id");
+ std::string android_root(GetAndroidRoot());
+ command.push_back(android_root + "/bin/id");
} else {
command.push_back("/usr/bin/id");
}
diff --git a/runtime/verifier/dex_gc_map.cc b/runtime/verifier/dex_gc_map.cc
index cd0b1371e1..c435f9f82e 100644
--- a/runtime/verifier/dex_gc_map.cc
+++ b/runtime/verifier/dex_gc_map.cc
@@ -49,7 +49,7 @@ const uint8_t* DexPcToReferenceMap::FindBitMap(uint16_t dex_pc, bool error_if_no
if (error_if_not_present) {
LOG(ERROR) << "Didn't find reference bit map for dex_pc " << dex_pc;
}
- return NULL;
+ return nullptr;
}
} // namespace verifier
diff --git a/runtime/verifier/dex_gc_map.h b/runtime/verifier/dex_gc_map.h
index d77ea650fe..03a7821ee6 100644
--- a/runtime/verifier/dex_gc_map.h
+++ b/runtime/verifier/dex_gc_map.h
@@ -39,7 +39,7 @@ enum RegisterMapFormat {
class DexPcToReferenceMap {
public:
explicit DexPcToReferenceMap(const uint8_t* data) : data_(data) {
- CHECK(data_ != NULL);
+ CHECK(data_ != nullptr);
}
// The total size of the reference bit map including header.
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index cd414c28cd..2914b7cb57 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -244,11 +244,11 @@ class MethodVerifier {
bool HasFailures() const;
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the method of a quick invoke or nullptr if it cannot be found.
+ // Returns the method of a quick invoke or null if it cannot be found.
mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range, bool allow_failure)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the access field of a quick field access (iget/iput-quick) or nullptr
+ // Returns the access field of a quick field access (iget/iput-quick) or null
// if it cannot be found.
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -585,7 +585,7 @@ class MethodVerifier {
* Widening conversions on integers and references are allowed, but
* narrowing conversions are not.
*
- * Returns the resolved method on success, nullptr on failure (with *failure
+ * Returns the resolved method on success, null on failure (with *failure
* set appropriately).
*/
mirror::ArtMethod* VerifyInvocationArgs(const Instruction* inst,
@@ -686,7 +686,7 @@ class MethodVerifier {
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
uint32_t interesting_dex_pc_;
// The container into which FindLocksAtDexPc should write the registers containing held locks,
- // nullptr if we're not doing FindLocksAtDexPc.
+ // null if we're not doing FindLocksAtDexPc.
std::vector<uint32_t>* monitor_enter_dex_pcs_;
// The types of any error that occurs.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index f67adc1b0e..3994536cca 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -31,7 +31,7 @@ class MethodVerifierTest : public CommonRuntimeTest {
protected:
void VerifyClass(const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ASSERT_TRUE(descriptor != NULL);
+ ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e4d2c3ea1f..d08c937a64 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -707,7 +707,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType {
UnresolvedUninitializedRefType(const std::string& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
+ : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -752,7 +752,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
UnresolvedUninitializedThisRefType(const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : UninitializedType(NULL, descriptor, 0, cache_id) {
+ : UninitializedType(nullptr, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -808,7 +808,7 @@ class UnresolvedType : public RegType {
public:
UnresolvedType(const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : RegType(NULL, descriptor, cache_id) {}
+ : RegType(nullptr, descriptor, cache_id) {}
bool IsNonZeroReferenceTypes() const OVERRIDE;
};
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9024a7dd03..b6f253bb7b 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -30,7 +30,7 @@ namespace verifier {
inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
const RegType* result = entries_[id];
- DCHECK(result != NULL);
+ DCHECK(result != nullptr);
return *result;
}
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index d389244f47..a2d0427244 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -39,6 +39,7 @@ jclass WellKnownClasses::java_lang_ClassNotFoundException;
jclass WellKnownClasses::java_lang_Daemons;
jclass WellKnownClasses::java_lang_Error;
jclass WellKnownClasses::java_lang_Object;
+jclass WellKnownClasses::java_lang_OutOfMemoryError;
jclass WellKnownClasses::java_lang_reflect_AbstractMethod;
jclass WellKnownClasses::java_lang_reflect_ArtMethod;
jclass WellKnownClasses::java_lang_reflect_Constructor;
@@ -78,7 +79,7 @@ jmethodID WellKnownClasses::java_lang_ref_ReferenceQueue_add;
jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
jmethodID WellKnownClasses::java_lang_Short_valueOf;
-jmethodID WellKnownClasses::java_lang_System_runFinalization = NULL;
+jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
jmethodID WellKnownClasses::java_lang_Thread_init;
jmethodID WellKnownClasses::java_lang_Thread_run;
jmethodID WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
@@ -123,7 +124,7 @@ jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type;
static jclass CacheClass(JNIEnv* env, const char* jni_class_name) {
ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name));
- if (c.get() == NULL) {
+ if (c.get() == nullptr) {
LOG(FATAL) << "Couldn't find class: " << jni_class_name;
}
return reinterpret_cast<jclass>(env->NewGlobalRef(c.get()));
@@ -134,7 +135,7 @@ static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static,
jfieldID fid = (is_static ?
env->GetStaticFieldID(c, name, signature) :
env->GetFieldID(c, name, signature));
- if (fid == NULL) {
+ if (fid == nullptr) {
ScopedObjectAccess soa(env);
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -149,7 +150,7 @@ jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static,
jmethodID mid = (is_static ?
env->GetStaticMethodID(c, name, signature) :
env->GetMethodID(c, name, signature));
- if (mid == NULL) {
+ if (mid == nullptr) {
ScopedObjectAccess soa(env);
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -176,6 +177,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_ClassNotFoundException = CacheClass(env, "java/lang/ClassNotFoundException");
java_lang_Daemons = CacheClass(env, "java/lang/Daemons");
java_lang_Object = CacheClass(env, "java/lang/Object");
+ java_lang_OutOfMemoryError = CacheClass(env, "java/lang/OutOfMemoryError");
java_lang_Error = CacheClass(env, "java/lang/Error");
java_lang_reflect_AbstractMethod = CacheClass(env, "java/lang/reflect/AbstractMethod");
java_lang_reflect_ArtMethod = CacheClass(env, "java/lang/reflect/ArtMethod");
@@ -212,7 +214,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
ScopedLocalRef<jclass> java_lang_ref_ReferenceQueue(env, env->FindClass("java/lang/ref/ReferenceQueue"));
java_lang_ref_ReferenceQueue_add = CacheMethod(env, java_lang_ref_ReferenceQueue.get(), true, "add", "(Ljava/lang/ref/Reference;)V");
- java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/ArtMethod;[Ljava/lang/Object;)Ljava/lang/Object;");
+ java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;");
java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V");
java_lang_Thread__UncaughtExceptionHandler_uncaughtException = CacheMethod(env, java_lang_Thread__UncaughtExceptionHandler, false, "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 2df1c0e6b0..cef9d55524 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -50,6 +50,7 @@ struct WellKnownClasses {
static jclass java_lang_Daemons;
static jclass java_lang_Error;
static jclass java_lang_Object;
+ static jclass java_lang_OutOfMemoryError;
static jclass java_lang_reflect_AbstractMethod;
static jclass java_lang_reflect_ArtMethod;
static jclass java_lang_reflect_Constructor;
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index ffab674be1..88c1f69bf5 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -56,7 +56,7 @@ MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_fi
name += " extracted in memory from ";
name += zip_filename;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
- NULL, GetUncompressedLength(),
+ nullptr, GetUncompressedLength(),
PROT_READ | PROT_WRITE, false, false,
error_msg));
if (map.get() == nullptr) {
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 865af515d8..717eb8c82e 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -57,7 +57,7 @@ class ZipEntry {
class ZipArchive {
public:
- // return new ZipArchive instance on success, NULL on error.
+ // return new ZipArchive instance on success, null on error.
static ZipArchive* Open(const char* filename, std::string* error_msg);
static ZipArchive* OpenFromFd(int fd, const char* filename, std::string* error_msg);
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 70a4ddaabf..aded30cd86 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -42,11 +42,11 @@ TEST_F(ZipArchiveTest, FindAndExtract) {
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
ASSERT_TRUE(error_msg.empty());
- file.reset(NULL);
+ file.reset(nullptr);
uint32_t computed_crc = crc32(0L, Z_NULL, 0);
int fd = open(tmp.GetFilename().c_str(), O_RDONLY);
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index e61fcd880a..0359ed3dbb 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -51,7 +51,7 @@ class SignalAction {
// Unclaim the signal and restore the old action.
void Unclaim(int signal) {
claimed_ = false;
- sigaction(signal, &action_, NULL); // Restore old action.
+ sigaction(signal, &action_, nullptr); // Restore old action.
}
// Get the action associated with this signal.
@@ -133,14 +133,14 @@ extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context)
const struct sigaction& action = user_sigactions[sig].GetAction();
if (user_sigactions[sig].OldStyle()) {
- if (action.sa_handler != NULL) {
+ if (action.sa_handler != nullptr) {
action.sa_handler(sig);
} else {
signal(sig, SIG_DFL);
raise(sig);
}
} else {
- if (action.sa_sigaction != NULL) {
+ if (action.sa_sigaction != nullptr) {
action.sa_sigaction(sig, info, context);
} else {
signal(sig, SIG_DFL);
@@ -172,10 +172,10 @@ extern "C" int sigaction(int signal, const struct sigaction* new_action, struct
if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed() &&
(new_action == nullptr || new_action->sa_handler != SIG_DFL)) {
struct sigaction saved_action = user_sigactions[signal].GetAction();
- if (new_action != NULL) {
+ if (new_action != nullptr) {
user_sigactions[signal].SetAction(*new_action, false);
}
- if (old_action != NULL) {
+ if (old_action != nullptr) {
*old_action = saved_action;
}
return 0;
@@ -242,7 +242,7 @@ extern "C" sighandler_t signal(int signal, sighandler_t handler) {
extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
const sigset_t* new_set_ptr = bionic_new_set;
sigset_t tmpset;
- if (bionic_new_set != NULL) {
+ if (bionic_new_set != nullptr) {
tmpset = *bionic_new_set;
if (how == SIG_BLOCK) {
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 544cbc503e..b23b97b7c3 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -25,7 +25,7 @@
#error test code compiled without NDEBUG
#endif
-static JavaVM* jvm = NULL;
+static JavaVM* jvm = nullptr;
extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
assert(vm != nullptr);
@@ -38,7 +38,7 @@ static void* AttachHelper(void* arg) {
assert(jvm != nullptr);
JNIEnv* env = nullptr;
- JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
+ JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, nullptr };
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 876d27ec2e..1414715bbf 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -89,7 +89,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_terminateSignalTest(JNIEnv*, jclass)
}
// Prevent the compiler being a smart-alec and optimizing out the assignment
-// to nullptr.
+// to null.
char *go_away_compiler = nullptr;
extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {
diff --git a/test/008-exceptions/expected.txt b/test/008-exceptions/expected.txt
index ef6eaff59a..92c79dc2a0 100644
--- a/test/008-exceptions/expected.txt
+++ b/test/008-exceptions/expected.txt
@@ -1,9 +1,12 @@
Got an NPE: second throw
java.lang.NullPointerException: second throw
- at Main.catchAndRethrow(Main.java:39)
- at Main.exceptions_007(Main.java:23)
- at Main.main(Main.java:31)
+ at Main.catchAndRethrow(Main.java:58)
+ at Main.exceptions_007(Main.java:41)
+ at Main.main(Main.java:49)
Caused by: java.lang.NullPointerException: first throw
- at Main.throwNullPointerException(Main.java:46)
- at Main.catchAndRethrow(Main.java:36)
+ at Main.throwNullPointerException(Main.java:65)
+ at Main.catchAndRethrow(Main.java:55)
... 2 more
+Static Init
+BadError: This is bad by convention
+BadError: This is bad by convention
diff --git a/test/008-exceptions/src/Main.java b/test/008-exceptions/src/Main.java
index 1f76f12460..7f6d0c5956 100644
--- a/test/008-exceptions/src/Main.java
+++ b/test/008-exceptions/src/Main.java
@@ -14,6 +14,24 @@
* limitations under the License.
*/
+// An exception that doesn't have a <init>(String) method.
+class BadError extends Error {
+ public BadError() {
+ super("This is bad by convention");
+ }
+}
+
+// A class that throws BadException during static initialization.
+class BadInit {
+ static int dummy;
+ static {
+ System.out.println("Static Init");
+ if (true) {
+ throw new BadError();
+ }
+ }
+}
+
/**
* Exceptions across method calls
*/
@@ -29,6 +47,7 @@ public class Main {
}
public static void main (String args[]) {
exceptions_007();
+ exceptionsRethrowClassInitFailure();
}
private static void catchAndRethrow() {
@@ -45,4 +64,26 @@ public class Main {
private static void throwNullPointerException() {
throw new NullPointerException("first throw");
}
+
+ private static void exceptionsRethrowClassInitFailure() {
+ try {
+ try {
+ BadInit.dummy = 1;
+ throw new IllegalStateException("Should not reach here.");
+ } catch (BadError e) {
+ System.out.println(e);
+ }
+
+ // Check if it works a second time.
+
+ try {
+ BadInit.dummy = 1;
+ throw new IllegalStateException("Should not reach here.");
+ } catch (BadError e) {
+ System.out.println(e);
+ }
+ } catch (Exception error) {
+ error.printStackTrace();
+ }
+ }
}
diff --git a/test/068-classloader/expected.txt b/test/068-classloader/expected.txt
index bf131eee63..8725799fe1 100644
--- a/test/068-classloader/expected.txt
+++ b/test/068-classloader/expected.txt
@@ -11,3 +11,5 @@ Ctor: doubled implement, type 1
DoubledImplement one
Got LinkageError on DI (early)
Got LinkageError on IDI (early)
+class Main
+Got expected ClassNotFoundException
diff --git a/test/068-classloader/src-ex/MutationTarget.java b/test/068-classloader/src-ex/MutationTarget.java
new file mode 100644
index 0000000000..b02a236aa0
--- /dev/null
+++ b/test/068-classloader/src-ex/MutationTarget.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Mutator target, see Mutator.java.
+ */
+public class MutationTarget {
+ public static int value = 0;
+} \ No newline at end of file
diff --git a/test/068-classloader/src-ex/Mutator.java b/test/068-classloader/src-ex/Mutator.java
new file mode 100644
index 0000000000..6bcd5b8554
--- /dev/null
+++ b/test/068-classloader/src-ex/Mutator.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Simple mutator to change a static field of the mutator target. This will require a dex-cache
+ * access, so this setup allows the correct disambiguation between multiple class-loaders.
+ */
+public class Mutator {
+ public static void mutate(int v) {
+ MutationTarget.value = v;
+ }
+} \ No newline at end of file
diff --git a/test/068-classloader/src/Main.java b/test/068-classloader/src/Main.java
index 7dfb6f507d..361e2938e3 100644
--- a/test/068-classloader/src/Main.java
+++ b/test/068-classloader/src/Main.java
@@ -21,7 +21,7 @@ public class Main {
/**
* Main entry point.
*/
- public static void main(String[] args) {
+ public static void main(String[] args) throws Exception {
FancyLoader loader;
loader = new FancyLoader(ClassLoader.getSystemClassLoader());
@@ -58,6 +58,65 @@ public class Main {
testAbstract(loader);
testImplement(loader);
testIfaceImplement(loader);
+
+ testSeparation();
+
+ testClassForName();
+ }
+
+ static void testSeparation() {
+ FancyLoader loader1 = new FancyLoader(ClassLoader.getSystemClassLoader());
+ FancyLoader loader2 = new FancyLoader(ClassLoader.getSystemClassLoader());
+
+ try {
+ Class target1 = loader1.loadClass("MutationTarget");
+ Class target2 = loader2.loadClass("MutationTarget");
+
+ if (target1 == target2) {
+ throw new RuntimeException("target1 should not be equal to target2");
+ }
+
+ Class mutator1 = loader1.loadClass("Mutator");
+ Class mutator2 = loader2.loadClass("Mutator");
+
+ if (mutator1 == mutator2) {
+ throw new RuntimeException("mutator1 should not be equal to mutator2");
+ }
+
+ runMutator(mutator1, 1);
+
+ int value = getMutationTargetValue(target1);
+ if (value != 1) {
+ throw new RuntimeException("target 1 has unexpected value " + value);
+ }
+ value = getMutationTargetValue(target2);
+ if (value != 0) {
+ throw new RuntimeException("target 2 has unexpected value " + value);
+ }
+
+ runMutator(mutator2, 2);
+
+ value = getMutationTargetValue(target1);
+ if (value != 1) {
+ throw new RuntimeException("target 1 has unexpected value " + value);
+ }
+ value = getMutationTargetValue(target2);
+ if (value != 2) {
+ throw new RuntimeException("target 2 has unexpected value " + value);
+ }
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ private static void runMutator(Class c, int v) throws Exception {
+ java.lang.reflect.Method m = c.getDeclaredMethod("mutate", int.class);
+ m.invoke(null, v);
+ }
+
+ private static int getMutationTargetValue(Class c) throws Exception {
+ java.lang.reflect.Field f = c.getDeclaredField("value");
+ return f.getInt(null);
}
/**
@@ -422,4 +481,13 @@ public class Main {
DoubledImplement2 di2 = ifaceSuper.getDoubledInstance2();
di2.one();
}
+
+ static void testClassForName() throws Exception {
+ System.out.println(Class.forName("Main").toString());
+ try {
+ System.out.println(Class.forName("Main", false, null).toString());
+ } catch (ClassNotFoundException expected) {
+ System.out.println("Got expected ClassNotFoundException");
+ }
+ }
}
diff --git a/test/080-oom-throw/expected.txt b/test/080-oom-throw/expected.txt
index 73cc0d8b3e..904393bc3b 100644
--- a/test/080-oom-throw/expected.txt
+++ b/test/080-oom-throw/expected.txt
@@ -1,2 +1,3 @@
+Test reflection correctly threw
NEW_ARRAY correctly threw OOME
NEW_INSTANCE correctly threw OOME
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index c93f8bbc54..f007b2535c 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
public class Main {
static class ArrayMemEater {
static boolean sawOome;
@@ -68,6 +71,10 @@ public class Main {
}
public static void main(String[] args) {
+ if (triggerReflectionOOM()) {
+ System.out.println("Test reflection correctly threw");
+ }
+
if (triggerArrayOOM()) {
System.out.println("NEW_ARRAY correctly threw OOME");
}
@@ -76,4 +83,46 @@ public class Main {
System.out.println("NEW_INSTANCE correctly threw OOME");
}
}
+
+ static Object[] holder;
+
+ public static void blowup() throws Exception {
+ int size = 32 * 1024 * 1024;
+ for (int i = 0; i < holder.length; ) {
+ try {
+ holder[i] = new char[size];
+ i++;
+ } catch (OutOfMemoryError oome) {
+ size = size / 2;
+ if (size == 0) {
+ break;
+ }
+ }
+ }
+ holder[0] = new char[100000];
+ }
+
+ static boolean triggerReflectionOOM() {
+ try {
+ Class<?> c = Main.class;
+ Method m = c.getMethod("blowup", (Class[]) null);
+ holder = new Object[1000000];
+ m.invoke(null);
+ holder = null;
+ System.out.println("Didn't throw from blowup");
+ } catch (OutOfMemoryError e) {
+ holder = null;
+ } catch (InvocationTargetException e) {
+ holder = null;
+ if (!(e.getCause() instanceof OutOfMemoryError)) {
+ System.out.println("InvocationTargetException cause not OOME " + e.getCause());
+ return false;
+ }
+ } catch (Exception e) {
+ holder = null;
+ System.out.println("Unexpected exception " + e);
+ return false;
+ }
+ return true;
+ }
}
diff --git a/test/104-growth-limit/src/Main.java b/test/104-growth-limit/src/Main.java
index d666377b54..d31cbf1fe1 100644
--- a/test/104-growth-limit/src/Main.java
+++ b/test/104-growth-limit/src/Main.java
@@ -29,26 +29,28 @@ public class Main {
final Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
final Object runtime = get_runtime.invoke(null);
final Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
+ List<byte[]> l = new ArrayList<byte[]>();
try {
- List<byte[]> l = new ArrayList<byte[]>();
while (true) {
// Allocate a MB at a time
l.add(new byte[1048576]);
alloc1++;
}
} catch (OutOfMemoryError e) {
+ l = null;
}
// Expand the heap to the maximum size.
clear_growth_limit.invoke(runtime);
int alloc2 = 1;
+ l = new ArrayList<byte[]>();
try {
- List<byte[]> l = new ArrayList<byte[]>();
while (true) {
// Allocate a MB at a time
l.add(new byte[1048576]);
alloc2++;
}
} catch (OutOfMemoryError e2) {
+ l = null;
if (alloc1 > alloc2) {
System.out.println("ERROR: Allocated less memory after growth" +
"limit cleared (" + alloc1 + " MBs > " + alloc2 + " MBs");
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 56ceadfa6c..f0578ef1ea 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -16,6 +16,12 @@
public class Main {
+ public static void assertBooleanEquals(boolean expected, boolean result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
public static void assertIntEquals(int expected, int result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -41,7 +47,7 @@ public class Main {
// CHECK-START: long Main.Add0(long) instruction_simplifier (after)
// CHECK-DAG: [[Arg:j\d+]] ParameterValue
// CHECK-DAG: Return [ [[Arg]] ]
- //
+
// CHECK-START: long Main.Add0(long) instruction_simplifier (after)
// CHECK-NOT: Add
@@ -775,6 +781,147 @@ public class Main {
return res;
}
+ // CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Arg]] [[Const1]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: If [ [[Arg]] ]
+
+ public static int EqualTrueRhs(boolean arg) {
+ return (arg != true) ? 3 : 5;
+ }
+
+ // CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Const1]] [[Arg]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: If [ [[Arg]] ]
+
+ public static int EqualTrueLhs(boolean arg) {
+ return (true != arg) ? 3 : 5;
+ }
+
+ // CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
+ // CHECK-DAG: If [ [[NotArg]] ]
+
+ public static int EqualFalseRhs(boolean arg) {
+ return (arg != false) ? 3 : 5;
+ }
+
+ // CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Const0]] [[Arg]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
+ // CHECK-DAG: If [ [[NotArg]] ]
+
+ public static int EqualFalseLhs(boolean arg) {
+ return (false != arg) ? 3 : 5;
+ }
+
+ // CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Arg]] [[Const1]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
+ // CHECK-DAG: If [ [[NotArg]] ]
+
+ public static int NotEqualTrueRhs(boolean arg) {
+ return (arg == true) ? 3 : 5;
+ }
+
+ // CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Const1]] [[Arg]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
+ // CHECK-DAG: If [ [[NotArg]] ]
+
+ public static int NotEqualTrueLhs(boolean arg) {
+ return (true == arg) ? 3 : 5;
+ }
+
+ // CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Arg]] [[Const0]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: If [ [[Arg]] ]
+
+ public static int NotEqualFalseRhs(boolean arg) {
+ return (arg == false) ? 3 : 5;
+ }
+
+ // CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Const0]] [[Arg]] ]
+ // CHECK-DAG: If [ [[Cond]] ]
+
+ // CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: If [ [[Arg]] ]
+
+ public static int NotEqualFalseLhs(boolean arg) {
+ return (false == arg) ? 3 : 5;
+ }
+
+ /*
+ * Test simplification of double Boolean negation. Note that sometimes
+ * both negations can be removed but we only expect the simplifier to
+ * remove the second.
+ */
+
+ // CHECK-START: boolean Main.NotNotBool(boolean) instruction_simplifier_after_types (before)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
+ // CHECK-DAG: [[NotNotArg:z\d+]] BooleanNot [ [[NotArg]] ]
+ // CHECK-DAG: Return [ [[NotNotArg]] ]
+
+ // CHECK-START: boolean Main.NotNotBool(boolean) instruction_simplifier_after_types (after)
+ // CHECK-DAG: [[Arg:z\d+]] ParameterValue
+ // CHECK-DAG: BooleanNot [ [[Arg]] ]
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ // CHECK-START: boolean Main.NotNotBool(boolean) instruction_simplifier_after_types (after)
+ // CHECK: BooleanNot
+ // CHECK-NOT: BooleanNot
+
+ public static boolean NotNotBool(boolean arg) {
+ return !(!arg);
+ }
+
public static void main(String[] args) {
int arg = 123456;
@@ -809,5 +956,16 @@ public class Main {
assertIntEquals(SubNeg1(arg, arg + 1), -(arg + arg + 1));
assertIntEquals(SubNeg2(arg, arg + 1), -(arg + arg + 1));
assertLongEquals(SubNeg3(arg, arg + 1), -(2 * arg + 1));
+
+ assertIntEquals(EqualTrueRhs(true), 5);
+ assertIntEquals(EqualTrueLhs(true), 5);
+ assertIntEquals(EqualFalseRhs(true), 3);
+ assertIntEquals(EqualFalseLhs(true), 3);
+ assertIntEquals(NotEqualTrueRhs(true), 3);
+ assertIntEquals(NotEqualTrueLhs(true), 3);
+ assertIntEquals(NotEqualFalseRhs(true), 5);
+ assertIntEquals(NotEqualFalseLhs(true), 5);
+ assertBooleanEquals(NotNotBool(true), true);
+ assertBooleanEquals(NotNotBool(false), false);
}
}
diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java
index efe0d3f729..3daf6934fa 100644
--- a/test/463-checker-boolean-simplifier/src/Main.java
+++ b/test/463-checker-boolean-simplifier/src/Main.java
@@ -27,16 +27,15 @@ public class Main {
}
/*
- * Elementary test negating a boolean. Verifies that the condition is replaced,
- * blocks merged and empty branches removed.
+ * Elementary test negating a boolean. Verifies that blocks are merged and
+ * empty branches removed.
*/
// CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (before)
// CHECK-DAG: [[Param:z\d+]] ParameterValue
// CHECK-DAG: [[Const0:i\d+]] IntConstant 0
// CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[NotEq:z\d+]] NotEqual [ [[Param]] [[Const0]] ]
- // CHECK-DAG: If [ [[NotEq]] ]
+ // CHECK-DAG: If [ [[Param]] ]
// CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const1]] [[Const0]] ]
// CHECK-DAG: Return [ [[Phi]] ]
@@ -49,11 +48,10 @@ public class Main {
// CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (after)
// CHECK-DAG: [[Param:z\d+]] ParameterValue
// CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Eq:z\d+]] Equal [ [[Param]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Eq]] ]
+ // CHECK-DAG: [[NotParam:z\d+]] BooleanNot [ [[Param]] ]
+ // CHECK-DAG: Return [ [[NotParam]] ]
// CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (after)
- // CHECK-NOT: NotEqual
// CHECK-NOT: If
// CHECK-NOT: Phi
@@ -115,6 +113,9 @@ public class Main {
// CHECK-DAG: [[Cond:z\d+]] LessThan [ [[ParamX]] [[ParamY]] ]
// CHECK-DAG: Return [ [[Cond]] ]
+ // CHECK-START: boolean Main.LessThan(int, int) boolean_simplifier (after)
+ // CHECK-NOT: GreaterThanOrEqual
+
public static boolean LessThan(int x, int y) {
return (x < y) ? true : false;
}
diff --git a/test/474-checker-boolean-input/src/Main.java b/test/474-checker-boolean-input/src/Main.java
index 91e8d4f9df..9151986ca2 100644
--- a/test/474-checker-boolean-input/src/Main.java
+++ b/test/474-checker-boolean-input/src/Main.java
@@ -23,47 +23,74 @@ public class Main {
}
/*
- * Test that zero/one constants are accepted as boolean inputs.
+ * Test that integer Phis are accepted as Boolean inputs until
+ * we implement a suitable type analysis.
*/
- // CHECK-START: boolean Main.TestIntAsBoolean() inliner (before)
- // CHECK-DAG: [[Invoke:z\d+]] InvokeStaticOrDirect
- // CHECK-DAG: BooleanNot [ [[Invoke]] ]
+ // CHECK-START: boolean Main.TestPhiAsBoolean(int) boolean_simplifier (after)
+ // CHECK-DAG: [[Phi:i\d+]] Phi
+ // CHECK-DAG: BooleanNot [ [[Phi]] ]
- // CHECK-START: boolean Main.TestIntAsBoolean() inliner (after)
- // CHECK-DAG: [[Const:i\d+]] IntConstant 1
- // CHECK-DAG: BooleanNot [ [[Const]] ]
+ public static boolean f1;
+ public static boolean f2;
- public static boolean InlineConst() {
- return true;
+ public static boolean InlinePhi(int x) {
+ return (x == 42) ? f1 : f2;
}
- public static boolean TestIntAsBoolean() {
- return InlineConst() != true ? true : false;
+ public static boolean TestPhiAsBoolean(int x) {
+ return InlinePhi(x) != true ? true : false;
}
/*
- * Test that integer Phis are accepted as boolean inputs until we implement
- * a suitable type analysis.
+ * Test that integer And is accepted as a Boolean input until
+ * we implement a suitable type analysis.
*/
- // CHECK-START: boolean Main.TestPhiAsBoolean(int) inliner (before)
- // CHECK-DAG: [[Invoke:z\d+]] InvokeStaticOrDirect
- // CHECK-DAG: BooleanNot [ [[Invoke]] ]
+ // CHECK-START: boolean Main.TestAndAsBoolean(boolean, boolean) boolean_simplifier (after)
+ // CHECK-DAG: [[And:i\d+]] And
+ // CHECK-DAG: BooleanNot [ [[And]] ]
- // CHECK-START: boolean Main.TestPhiAsBoolean(int) inliner (after)
- // CHECK-DAG: [[Phi:i\d+]] Phi
- // CHECK-DAG: BooleanNot [ [[Phi]] ]
+ public static boolean InlineAnd(boolean x, boolean y) {
+ return x & y;
+ }
- public static boolean f1;
- public static boolean f2;
+ public static boolean TestAndAsBoolean(boolean x, boolean y) {
+ return InlineAnd(x, y) != true ? true : false;
+ }
- public static boolean InlinePhi(int x) {
- return (x == 42) ? f1 : f2;
+ /*
+ * Test that integer Or is accepted as a Boolean input until
+ * we implement a suitable type analysis.
+ */
+
+ // CHECK-START: boolean Main.TestOrAsBoolean(boolean, boolean) boolean_simplifier (after)
+ // CHECK-DAG: [[Or:i\d+]] Or
+ // CHECK-DAG: BooleanNot [ [[Or]] ]
+
+ public static boolean InlineOr(boolean x, boolean y) {
+ return x | y;
}
- public static boolean TestPhiAsBoolean(int x) {
- return InlinePhi(x) != true ? true : false;
+ public static boolean TestOrAsBoolean(boolean x, boolean y) {
+ return InlineOr(x, y) != true ? true : false;
+ }
+
+ /*
+ * Test that integer Xor is accepted as a Boolean input until
+ * we implement a suitable type analysis.
+ */
+
+ // CHECK-START: boolean Main.TestXorAsBoolean(boolean, boolean) boolean_simplifier (after)
+ // CHECK-DAG: [[Xor:i\d+]] Xor
+ // CHECK-DAG: BooleanNot [ [[Xor]] ]
+
+ public static boolean InlineXor(boolean x, boolean y) {
+ return x ^ y;
+ }
+
+ public static boolean TestXorAsBoolean(boolean x, boolean y) {
+ return InlineXor(x, y) != true ? true : false;
}
public static void main(String[] args) {
@@ -71,5 +98,11 @@ public class Main {
f2 = false;
assertBoolEquals(true, TestPhiAsBoolean(0));
assertBoolEquals(false, TestPhiAsBoolean(42));
+ assertBoolEquals(true, TestAndAsBoolean(true, false));
+ assertBoolEquals(false, TestAndAsBoolean(true, true));
+ assertBoolEquals(true, TestOrAsBoolean(false, false));
+ assertBoolEquals(false, TestOrAsBoolean(true, true));
+ assertBoolEquals(true, TestXorAsBoolean(true, true));
+ assertBoolEquals(false, TestXorAsBoolean(true, false));
}
}
diff --git a/test/474-fp-sub-neg/expected.txt b/test/474-fp-sub-neg/expected.txt
new file mode 100644
index 0000000000..e6ffe0d430
--- /dev/null
+++ b/test/474-fp-sub-neg/expected.txt
@@ -0,0 +1,2 @@
+-0.0
+-0.0
diff --git a/test/474-fp-sub-neg/info.txt b/test/474-fp-sub-neg/info.txt
new file mode 100644
index 0000000000..eced93fef5
--- /dev/null
+++ b/test/474-fp-sub-neg/info.txt
@@ -0,0 +1,5 @@
+Regression check for optimizing simplify instruction pass.
+A pair (sub, neg) should not be transforemd to (sub) for
+fp calculation because we can lose the sign of zero for
+the following expression:
+ - ( A - B ) != B - A ; if B == A
diff --git a/test/474-fp-sub-neg/src/Main.java b/test/474-fp-sub-neg/src/Main.java
new file mode 100644
index 0000000000..e6bce6793f
--- /dev/null
+++ b/test/474-fp-sub-neg/src/Main.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void floatTest() {
+ float f = 0;
+ float fc = 1f;
+ for (int i = 0; i < 2; i++) {
+ f -= fc;
+ f = -f;
+ }
+
+ System.out.println(f);
+ }
+
+ public static void doubleTest() {
+ double d = 0;
+ double dc = 1f;
+ for (int i = 0; i < 2; i++) {
+ d -= dc;
+ d = -d;
+ }
+
+ System.out.println(d);
+ }
+
+ public static void main(String[] args) {
+ doubleTest();
+ floatTest();
+ }
+
+}
diff --git a/test/475-simplify-mul-zero/expected.txt b/test/475-simplify-mul-zero/expected.txt
new file mode 100644
index 0000000000..7ed6ff82de
--- /dev/null
+++ b/test/475-simplify-mul-zero/expected.txt
@@ -0,0 +1 @@
+5
diff --git a/test/475-simplify-mul-zero/info.txt b/test/475-simplify-mul-zero/info.txt
new file mode 100644
index 0000000000..0db11f2823
--- /dev/null
+++ b/test/475-simplify-mul-zero/info.txt
@@ -0,0 +1,2 @@
+Regression check for optimizing simplify instruction pass.
+Mul should expect zero constant as input. \ No newline at end of file
diff --git a/test/475-simplify-mul-zero/src/Main.java b/test/475-simplify-mul-zero/src/Main.java
new file mode 100644
index 0000000000..57adcff01e
--- /dev/null
+++ b/test/475-simplify-mul-zero/src/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ long l3 = 2207693990L;
+ int i12 = 5;
+
+ for (int i = 1; i < 2; ++i) {
+ i12 ^= (int)(-((-(-(l3 - l3))) * i));
+ }
+
+ System.out.println(i12);
+ }
+}
diff --git a/test/476-checker-ctor-memory-barrier/expected.txt b/test/476-checker-ctor-memory-barrier/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/476-checker-ctor-memory-barrier/expected.txt
diff --git a/test/476-checker-ctor-memory-barrier/info.txt b/test/476-checker-ctor-memory-barrier/info.txt
new file mode 100644
index 0000000000..9bd311f784
--- /dev/null
+++ b/test/476-checker-ctor-memory-barrier/info.txt
@@ -0,0 +1,2 @@
+Tests if we add memory barriers on constructors when needed (i.e when the
+class has final fields).
diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java
new file mode 100644
index 0000000000..10aa2ab164
--- /dev/null
+++ b/test/476-checker-ctor-memory-barrier/src/Main.java
@@ -0,0 +1,147 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+class ClassWithoutFinals {
+ // CHECK-START: void ClassWithoutFinals.<init>() register (after)
+ // CHECK-NOT: MemoryBarrier {{StoreStore}}
+ public ClassWithoutFinals() {}
+}
+
+class ClassWithFinals {
+ public final int x;
+ public ClassWithFinals obj;
+
+ // CHECK-START: void ClassWithFinals.<init>(boolean) register (after)
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: ReturnVoid
+ public ClassWithFinals(boolean cond) {
+ x = 0;
+ if (cond) {
+ // avoid inlining
+ throw new RuntimeException();
+ }
+ }
+
+ // CHECK-START: void ClassWithFinals.<init>() register (after)
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: ReturnVoid
+ public ClassWithFinals() {
+ x = 0;
+ }
+
+ // CHECK-START: void ClassWithFinals.<init>(int) register (after)
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: ReturnVoid
+ public ClassWithFinals(int x) {
+ // This should have two barriers:
+ // - one for the constructor
+ // - one for the `new` which should be inlined.
+ obj = new ClassWithFinals();
+ this.x = x;
+ }
+}
+
+class InheritFromClassWithFinals extends ClassWithFinals {
+ // CHECK-START: void InheritFromClassWithFinals.<init>() register (after)
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: ReturnVoid
+
+ // CHECK-START: void InheritFromClassWithFinals.<init>() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public InheritFromClassWithFinals() {
+ // Should inline the super constructor.
+ }
+
+ // CHECK-START: void InheritFromClassWithFinals.<init>(boolean) register (after)
+ // CHECK: InvokeStaticOrDirect
+
+ // CHECK-START: void InheritFromClassWithFinals.<init>(boolean) register (after)
+ // CHECK-NOT: MemoryBarrier {{StoreStore}}
+ public InheritFromClassWithFinals(boolean cond) {
+ super(cond);
+ // should not inline the super constructor
+ }
+}
+
+class HaveFinalsAndInheritFromClassWithFinals extends ClassWithFinals {
+ final int y;
+
+ // CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>() register (after)
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: ReturnVoid
+
+ // CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public HaveFinalsAndInheritFromClassWithFinals() {
+ // Should inline the super constructor.
+ y = 0;
+ }
+
+ // CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>(boolean) register (after)
+ // CHECK: InvokeStaticOrDirect
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: ReturnVoid
+ public HaveFinalsAndInheritFromClassWithFinals(boolean cond) {
+ super(cond);
+ // should not inline the super constructor
+ y = 0;
+ }
+}
+
+public class Main {
+
+ // CHECK-START: ClassWithFinals Main.noInlineNoConstructorBarrier() register (after)
+ // CHECK: InvokeStaticOrDirect
+
+ // CHECK-START: ClassWithFinals Main.noInlineNoConstructorBarrier() register (after)
+ // CHECK-NOT: MemoryBarrier {{StoreStore}}
+ public static ClassWithFinals noInlineNoConstructorBarrier() {
+ return new ClassWithFinals(false);
+ }
+
+ // CHECK-START: ClassWithFinals Main.inlineConstructorBarrier() register (after)
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: Return
+
+ // CHECK-START: ClassWithFinals Main.inlineConstructorBarrier() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public static ClassWithFinals inlineConstructorBarrier() {
+ return new ClassWithFinals();
+ }
+
+ // CHECK-START: InheritFromClassWithFinals Main.doubleInlineConstructorBarrier() register (after)
+ // CHECK: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: {{.*}}
+ // CHECK: Return
+
+ // CHECK-START: InheritFromClassWithFinals Main.doubleInlineConstructorBarrier() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public static InheritFromClassWithFinals doubleInlineConstructorBarrier() {
+ return new InheritFromClassWithFinals();
+ }
+
+ public static void main(String[] args) { }
+}
diff --git a/test/477-checker-bound-type/expected.txt b/test/477-checker-bound-type/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/477-checker-bound-type/expected.txt
diff --git a/test/477-checker-bound-type/info.txt b/test/477-checker-bound-type/info.txt
new file mode 100644
index 0000000000..68c774a5fb
--- /dev/null
+++ b/test/477-checker-bound-type/info.txt
@@ -0,0 +1,3 @@
+Tests that we only generate a bound type if we have relevant users.
+It also tests a code generator regression for GenerateTestAndBranch which
+didn't take into account NullConstants.
diff --git a/test/477-checker-bound-type/src/Main.java b/test/477-checker-bound-type/src/Main.java
new file mode 100644
index 0000000000..b30028ddd4
--- /dev/null
+++ b/test/477-checker-bound-type/src/Main.java
@@ -0,0 +1,61 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+public class Main {
+
+ // CHECK-START: java.lang.Object Main.boundTypeForIf(java.lang.Object) reference_type_propagation (after)
+ // CHECK: BoundType
+ public static Object boundTypeForIf(Object a) {
+ if (a != null) {
+ return a.toString();
+ } else {
+ return null;
+ }
+ }
+
+ // CHECK-START: java.lang.Object Main.boundTypeForInstanceOf(java.lang.Object) reference_type_propagation (after)
+ // CHECK: BoundType
+ public static Object boundTypeForInstanceOf(Object a) {
+ if (a instanceof Main) {
+ return (Main)a;
+ } else {
+ return null;
+ }
+ }
+
+ // CHECK-START: java.lang.Object Main.noBoundTypeForIf(java.lang.Object) reference_type_propagation (after)
+ // CHECK-NOT: BoundType
+ public static Object noBoundTypeForIf(Object a) {
+ if (a == null) {
+ return new Object();
+ } else {
+ return null;
+ }
+ }
+
+ // CHECK-START: java.lang.Object Main.noBoundTypeForInstanceOf(java.lang.Object) reference_type_propagation (after)
+ // CHECK-NOT: BoundType
+ public static Object noBoundTypeForInstanceOf(Object a) {
+ if (a instanceof Main) {
+ return new Object();
+ } else {
+ return null;
+ }
+ }
+
+ public static void main(String[] args) { }
+}
diff --git a/test/477-long-to-float-conversion-precision/expected.txt b/test/477-long-to-float-conversion-precision/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/477-long-to-float-conversion-precision/expected.txt
diff --git a/test/477-long-to-float-conversion-precision/info.txt b/test/477-long-to-float-conversion-precision/info.txt
new file mode 100644
index 0000000000..d9d41d70ba
--- /dev/null
+++ b/test/477-long-to-float-conversion-precision/info.txt
@@ -0,0 +1 @@
+Tests for type conversions precision.
diff --git a/test/477-long-to-float-conversion-precision/src/Main.java b/test/477-long-to-float-conversion-precision/src/Main.java
new file mode 100644
index 0000000000..bc17053e20
--- /dev/null
+++ b/test/477-long-to-float-conversion-precision/src/Main.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void assertFloatEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Generate, compile and check long-to-float Dex instructions.
+ longToFloat();
+ }
+
+ private static void longToFloat() {
+ // The result for this test case is slightly less accurate on ARM,
+ // due to the implementation of long-to-float type conversions for
+ // this architecture (both in Quick and Optimizing).
+ assertFloatEquals(Float.intBitsToFloat(-555858671), $opt$LongToFloat(-8008112895877447681L));
+ }
+
+ // This method produces a long-to-float Dex instruction.
+ static float $opt$LongToFloat(long a) { return (float)a; }
+}
diff --git a/test/478-checker-inliner-nested-loop/expected.txt b/test/478-checker-inliner-nested-loop/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/478-checker-inliner-nested-loop/expected.txt
diff --git a/test/478-checker-inliner-nested-loop/info.txt b/test/478-checker-inliner-nested-loop/info.txt
new file mode 100644
index 0000000000..c221e37285
--- /dev/null
+++ b/test/478-checker-inliner-nested-loop/info.txt
@@ -0,0 +1,2 @@
+Tests inlining into a nested loop. SSAChecker should verify that
+loop information was updated correctly.
diff --git a/test/478-checker-inliner-nested-loop/src/Main.java b/test/478-checker-inliner-nested-loop/src/Main.java
new file mode 100644
index 0000000000..df583d9302
--- /dev/null
+++ b/test/478-checker-inliner-nested-loop/src/Main.java
@@ -0,0 +1,57 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static int Inline(int x, int y) {
+ int result;
+ if (x <= y) {
+ result = x * y;
+ } else {
+ result = 0;
+ }
+ return result;
+ }
+
+ // CHECK-START: int Main.NestedLoop(int, int) inliner (before)
+ // CHECK-NOT: Mul
+
+ // CHECK-START: int Main.NestedLoop(int, int) inliner (after)
+ // CHECK: Mul
+ // CHECK-NOT: Mul
+
+ public static int NestedLoop(int max_x, int max_y) {
+ int total = 0;
+ for (int x = 0; x < max_x; ++x) {
+ for (int y = 0; y < max_y; ++y) {
+ total += Inline(x, y);
+ }
+ }
+ return total;
+ }
+
+ public static void main(String[] args) {
+ assertIntEquals(0, NestedLoop(1, 1));
+ assertIntEquals(3, NestedLoop(2, 3));
+ }
+}
diff --git a/test/479-regression-implicit-null-check/expected.txt b/test/479-regression-implicit-null-check/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/479-regression-implicit-null-check/expected.txt
diff --git a/test/479-regression-implicit-null-check/info.txt b/test/479-regression-implicit-null-check/info.txt
new file mode 100644
index 0000000000..0bfca8cbad
--- /dev/null
+++ b/test/479-regression-implicit-null-check/info.txt
@@ -0,0 +1,2 @@
+Tests a regression in which we moved the null check to an instruction which
+checked a different object. This lead to valid null checks being elided.
diff --git a/test/479-regression-implicit-null-check/src/Main.java b/test/479-regression-implicit-null-check/src/Main.java
new file mode 100644
index 0000000000..6b6f2e4d2a
--- /dev/null
+++ b/test/479-regression-implicit-null-check/src/Main.java
@@ -0,0 +1,50 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+public class Main {
+ public int x = 0;
+
+ public Main(Main c) {
+ // After inlining the graph will look like:
+ // NullCheck c
+ // InstanceFieldGet c
+ // InstanceFieldSet this 3
+ // The dead code will eliminate the InstanceFieldGet and we'll end up with:
+ // NullCheck c
+ // InstanceFieldSet this 3
+ // At codegen, when verifying if we can move the null check to the user,
+ // we should check that we actually have the same user (not only that the
+ // next instruction can do implicit null checks).
+ // In this case we should generate code for the NullCheck since the next
+ // instruction checks a different object.
+ c.willBeInlined();
+ x = 3;
+ }
+
+ private int willBeInlined() {
+ return x;
+ }
+
+ public static void main(String[] args) {
+ try {
+ new Main(null);
+ throw new RuntimeException("Failed to throw NullPointerException");
+ } catch (NullPointerException e) {
+ // expected
+ }
+ }
+}
diff --git a/test/480-checker-dead-blocks/expected.txt b/test/480-checker-dead-blocks/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/480-checker-dead-blocks/expected.txt
diff --git a/test/480-checker-dead-blocks/info.txt b/test/480-checker-dead-blocks/info.txt
new file mode 100644
index 0000000000..5aeafac744
--- /dev/null
+++ b/test/480-checker-dead-blocks/info.txt
@@ -0,0 +1 @@
+Test removal of dead blocks. \ No newline at end of file
diff --git a/test/480-checker-dead-blocks/src/Main.java b/test/480-checker-dead-blocks/src/Main.java
new file mode 100644
index 0000000000..560ce952a4
--- /dev/null
+++ b/test/480-checker-dead-blocks/src/Main.java
@@ -0,0 +1,147 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static boolean inlineTrue() {
+ return true;
+ }
+
+ public static boolean inlineFalse() {
+ return false;
+ }
+
+ // CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (before)
+ // CHECK-DAG: [[ArgX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ArgY:i\d+]] ParameterValue
+ // CHECK-DAG: If
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[ArgX]] [[ArgY]] ]
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[ArgX]] [[ArgY]] ]
+ // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ]
+ // CHECK-DAG: Return [ [[Phi]] ]
+
+ // CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-DAG: [[ArgX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ArgY:i\d+]] ParameterValue
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[ArgX]] [[ArgY]] ]
+ // CHECK-DAG: Return [ [[Add]] ]
+
+ // CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-NOT: If
+ // CHECK-NOT: Sub
+ // CHECK-NOT: Phi
+
+ public static int testTrueBranch(int x, int y) {
+ int z;
+ if (inlineTrue()) {
+ z = x + y;
+ } else {
+ z = x - y;
+ }
+ return z;
+ }
+
+ // CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (before)
+ // CHECK-DAG: [[ArgX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ArgY:i\d+]] ParameterValue
+ // CHECK-DAG: If
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[ArgX]] [[ArgY]] ]
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[ArgX]] [[ArgY]] ]
+ // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ]
+ // CHECK-DAG: Return [ [[Phi]] ]
+
+ // CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-DAG: [[ArgX:i\d+]] ParameterValue
+ // CHECK-DAG: [[ArgY:i\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[ArgX]] [[ArgY]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-NOT: If
+ // CHECK-NOT: Add
+ // CHECK-NOT: Phi
+
+ public static int testFalseBranch(int x, int y) {
+ int z;
+ if (inlineFalse()) {
+ z = x + y;
+ } else {
+ z = x - y;
+ }
+ return z;
+ }
+
+ // CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination_final (before)
+ // CHECK: Mul
+
+ // CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination_final (after)
+ // CHECK-NOT: Mul
+
+ public static int testRemoveLoop(int x) {
+ if (inlineFalse()) {
+ for (int i = 0; i < x; ++i) {
+ x *= x;
+ }
+ }
+ return x;
+ }
+
+ // CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination_final (before)
+ // CHECK-DAG: Return
+ // CHECK-DAG: Exit
+
+ // CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination_final (after)
+ // CHECK-NOT: Return
+ // CHECK-NOT: Exit
+
+ public static int testInfiniteLoop(int x) {
+ while (inlineTrue()) {
+ x++;
+ }
+ return x;
+ }
+
+ // CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (before)
+ // CHECK-DAG: If
+ // CHECK-DAG: Add
+
+ // CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ // CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination_final (after)
+ // CHECK-NOT: If
+ // CHECK-NOT: Add
+
+ public static int testDeadLoop(int x) {
+ while (inlineFalse()) {
+ x++;
+ }
+ return x;
+ }
+
+ public static void main(String[] args) {
+ assertIntEquals(7, testTrueBranch(4, 3));
+ assertIntEquals(1, testFalseBranch(4, 3));
+ assertIntEquals(42, testRemoveLoop(42));
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 39afc6785b..c5abd4625c 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -335,6 +335,29 @@ endif
TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
+# Known broken tests for Quick's and Optimizing's ARM back ends.
+TEST_ART_BROKEN_ARM_RUN_TESTS := 477-long-to-float-conversion-precision # b/20413424
+
+ifeq ($(TARGET_ARCH),arm)
+ ifneq (,$(filter 32,$(ALL_ADDRESS_SIZES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_ARM_RUN_TESTS),32)
+ endif
+endif
+
+ifdef TARGET_2ND_ARCH
+ ifeq ($(TARGET_2ND_ARCH),arm)
+ ifneq (,$(filter 32,$(ALL_ADDRESS_SIZES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_ARM_RUN_TESTS),32)
+ endif
+ endif
+endif
+
+TEST_ART_BROKEN_ARM_RUN_TESTS :=
+
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 414e4df9f5..1c44958eea 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -225,7 +225,8 @@ if [ "$DEBUGGER" = "y" ]; then
fi
if [ "$USE_JVM" = "y" ]; then
- ${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -classpath classes $MAIN "$@"
+ # Xmx is necessary since we don't pass down the ART flags to JVM.
+ ${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes $MAIN "$@"
exit
fi
@@ -363,6 +364,7 @@ if [ "$HOST" = "n" ]; then
export ANDROID_ROOT=$ANDROID_ROOT && \
$mkdir_cmdline && \
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
+ export PATH=$ANDROID_ROOT/bin:$PATH && \
$dex2oat_cmdline && \
$dalvikvm_cmdline"
diff --git a/tools/art b/tools/art
index 6c89a60b6a..85e6e2fae6 100644
--- a/tools/art
+++ b/tools/art
@@ -92,6 +92,7 @@ fi
ANDROID_DATA=$ANDROID_DATA \
ANDROID_ROOT=$ANDROID_ROOT \
LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
+ PATH=$ANDROID_ROOT/bin:$PATH \
$invoke_with $ANDROID_ROOT/bin/$DALVIKVM $lib \
-XXlib:$LIBART \
-Ximage:$ANDROID_ROOT/framework/core.art \
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index e87ae0860d..a007fa2171 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -19,6 +19,11 @@ if [ ! -d libcore ]; then
exit 1
fi
+if [[ $ANDROID_SERIAL == HT4CTJT03670 ]] || [[ $ANDROID_SERIAL == HT49CJT00070 ]]; then
+ echo "Not running on buildbot because of failures on volantis. Investigating."
+ exit 0
+fi
+
# Jar containing all the tests.
test_jar=out/host/linux-x86/framework/apache-harmony-jdwp-tests-hostdex.jar
junit_jar=out/host/linux-x86/framework/junit.jar
@@ -30,6 +35,7 @@ if [ ! -f $test_jar -o ! -f $junit_jar ]; then
fi
art="/data/local/tmp/system/bin/art"
+art_debugee="sh /data/local/tmp/system/bin/art"
# We use Quick's image on target because optimizing's image is not compiled debuggable.
image="-Ximage:/data/art-test/core.art"
args=$@
@@ -45,6 +51,7 @@ while true; do
# Specify bash explicitly since the art script cannot, since it has to run on the device
# with mksh.
art="bash out/host/linux-x86/bin/art"
+ art_debugee="bash out/host/linux-x86/bin/art"
# We force generation of a new image to avoid build-time and run-time classpath differences.
image="-Ximage:/system/non/existent"
# We do not need a device directory on host.
@@ -76,7 +83,7 @@ vogar $vm_command \
--vm-arg -Djpda.settings.verbose=true \
--vm-arg -Djpda.settings.syncPort=34016 \
--vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
- --vm-arg -Djpda.settings.debuggeeJavaPath="$art $image $debuggee_args" \
+ --vm-arg -Djpda.settings.debuggeeJavaPath="\"$art_debugee $image $debuggee_args\"" \
--classpath $test_jar \
--classpath $junit_jar \
--vm-arg -Xcompiler-option --vm-arg --compiler-backend=Optimizing \