summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.bp13
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--build/art.go13
-rw-r--r--cmdline/cmdline_parser_test.cc9
-rw-r--r--cmdline/cmdline_types.h9
-rw-r--r--compiler/Android.bp3
-rw-r--r--compiler/common_compiler_test.cc7
-rw-r--r--compiler/compiled_method.h41
-rw-r--r--compiler/compiled_method_test.cc32
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/dex/inline_method_analyser.cc7
-rw-r--r--compiler/dex/verification_results.cc17
-rw-r--r--compiler/driver/compiled_method_storage.cc2
-rw-r--r--compiler/driver/compiler_driver.cc149
-rw-r--r--compiler/driver/compiler_driver_test.cc10
-rw-r--r--compiler/elf_builder.h12
-rw-r--r--compiler/elf_writer.h1
-rw-r--r--compiler/elf_writer_quick.cc27
-rw-r--r--compiler/exception_test.cc3
-rw-r--r--compiler/image_test.h7
-rw-r--r--compiler/image_writer.cc22
-rw-r--r--compiler/image_writer.h16
-rw-r--r--compiler/jit/jit_compiler.cc10
-rw-r--r--compiler/linker/arm/relative_patcher_arm_base.cc1
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc14
-rw-r--r--compiler/linker/method_bss_mapping_encoder.h79
-rw-r--r--compiler/linker/method_bss_mapping_encoder_test.cc50
-rw-r--r--compiler/linker/mips/relative_patcher_mips.cc20
-rw-r--r--compiler/linker/mips/relative_patcher_mips.h3
-rw-r--r--compiler/linker/mips/relative_patcher_mips_test.cc1
-rw-r--r--compiler/linker/output_stream_test.cc5
-rw-r--r--compiler/oat_test.cc8
-rw-r--r--compiler/oat_writer.cc592
-rw-r--r--compiler/oat_writer.h64
-rw-r--r--compiler/optimizing/code_generator.cc55
-rw-r--r--compiler/optimizing/code_generator.h14
-rw-r--r--compiler/optimizing/code_generator_arm.cc126
-rw-r--r--compiler/optimizing/code_generator_arm.h14
-rw-r--r--compiler/optimizing/code_generator_arm64.cc118
-rw-r--r--compiler/optimizing/code_generator_arm64.h30
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc140
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h14
-rw-r--r--compiler/optimizing/code_generator_mips.cc140
-rw-r--r--compiler/optimizing/code_generator_mips.h13
-rw-r--r--compiler/optimizing/code_generator_mips64.cc90
-rw-r--r--compiler/optimizing/code_generator_mips64.h16
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc52
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc752
-rw-r--r--compiler/optimizing/code_generator_x86.cc97
-rw-r--r--compiler/optimizing/code_generator_x86.h16
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc80
-rw-r--r--compiler/optimizing/code_generator_x86_64.h13
-rw-r--r--compiler/optimizing/common_arm64.h21
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.cc116
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.h46
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.cc111
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.h46
-rw-r--r--compiler/optimizing/graph_visualizer.cc12
-rw-r--r--compiler/optimizing/induction_var_analysis.cc4
-rw-r--r--compiler/optimizing/induction_var_range.cc24
-rw-r--r--compiler/optimizing/inliner.cc2
-rw-r--r--compiler/optimizing/instruction_builder.cc2
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm.cc55
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc74
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc62
-rw-r--r--compiler/optimizing/intrinsics_mips.cc9
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc9
-rw-r--r--compiler/optimizing/intrinsics_utils.h5
-rw-r--r--compiler/optimizing/intrinsics_x86.cc63
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc63
-rw-r--r--compiler/optimizing/loop_optimization.cc48
-rw-r--r--compiler/optimizing/loop_optimization.h1
-rw-r--r--compiler/optimizing/nodes.cc24
-rw-r--r--compiler/optimizing/nodes.h92
-rw-r--r--compiler/optimizing/nodes_arm.h61
-rw-r--r--compiler/optimizing/nodes_mips.h32
-rw-r--r--compiler/optimizing/optimizing_compiler.cc15
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc5
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc67
-rw-r--r--compiler/optimizing/register_allocator_graph_color.cc56
-rw-r--r--compiler/optimizing/register_allocator_test.cc12
-rw-r--r--compiler/optimizing/scheduler_arm.cc10
-rw-r--r--compiler/optimizing/scheduler_arm.h5
-rw-r--r--compiler/optimizing/scheduler_arm64.cc151
-rw-r--r--compiler/optimizing/scheduler_arm64.h79
-rw-r--r--compiler/optimizing/sharpening.cc25
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc17
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h242
-rw-r--r--compiler/utils/arm/assembler_arm.h2
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.cc2
-rw-r--r--compiler/utils/dedupe_set_test.cc2
-rw-r--r--compiler/utils/intrusive_forward_list.h30
-rw-r--r--compiler/utils/intrusive_forward_list_test.cc418
-rw-r--r--compiler/utils/mips/assembler_mips.cc1201
-rw-r--r--compiler/utils/mips/assembler_mips.h176
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc616
-rw-r--r--compiler/utils/mips/constants_mips.h29
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc111
-rw-r--r--compiler/utils/mips64/assembler_mips64.h24
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc100
-rw-r--r--compiler/utils/swap_space.cc3
-rw-r--r--dex2oat/dex2oat.cc44
-rw-r--r--dex2oat/dex2oat_test.cc5
-rw-r--r--dexdump/dexdump.cc3
-rw-r--r--dexlayout/dex_ir.cc27
-rw-r--r--dexlayout/dex_ir.h1
-rw-r--r--dexlayout/dex_visualize.cc2
-rw-r--r--dexlayout/dexlayout.cc27
-rw-r--r--dexlayout/dexlayout_test.cc20
-rw-r--r--disassembler/disassembler_mips.cc6
-rw-r--r--imgdiag/imgdiag_test.cc1
-rw-r--r--oatdump/oatdump.cc6
-rw-r--r--patchoat/patchoat.cc1
-rw-r--r--profman/profile_assistant_test.cc144
-rw-r--r--profman/profman.cc97
-rw-r--r--runtime/Android.bp5
-rw-r--r--runtime/arch/arch_test.cc38
-rw-r--r--runtime/arch/arm/context_arm.cc3
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc1
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S165
-rw-r--r--runtime/arch/arm/quick_method_frame_info_arm.h30
-rw-r--r--runtime/arch/arm64/context_arm64.cc3
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc1
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S136
-rw-r--r--runtime/arch/arm64/quick_method_frame_info_arm64.h31
-rw-r--r--runtime/arch/mips/context_mips.cc1
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc5
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.cc63
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.h12
-rw-r--r--runtime/arch/mips/instruction_set_features_mips_test.cc98
-rw-r--r--runtime/arch/mips/quick_method_frame_info_mips.h24
-rw-r--r--runtime/arch/mips/registers_mips.cc9
-rw-r--r--runtime/arch/mips/registers_mips.h39
-rw-r--r--runtime/arch/mips64/context_mips64.cc1
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc1
-rw-r--r--runtime/arch/mips64/fault_handler_mips64.cc5
-rw-r--r--runtime/arch/mips64/quick_method_frame_info_mips64.h24
-rw-r--r--runtime/arch/stub_test.cc5
-rw-r--r--runtime/arch/x86/context_x86.cc1
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc2
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.cc5
-rw-r--r--runtime/arch/x86/instruction_set_features_x86_test.cc37
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S46
-rw-r--r--runtime/arch/x86/quick_method_frame_info_x86.h20
-rw-r--r--runtime/arch/x86/thread_x86.cc2
-rw-r--r--runtime/arch/x86_64/context_x86_64.cc1
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S27
-rw-r--r--runtime/arch/x86_64/quick_method_frame_info_x86_64.h20
-rw-r--r--runtime/arch/x86_64/thread_x86_64.cc2
-rw-r--r--runtime/art_field-inl.h2
-rw-r--r--runtime/art_method-inl.h22
-rw-r--r--runtime/art_method.cc4
-rw-r--r--runtime/art_method.h23
-rw-r--r--runtime/asm_support.h32
-rw-r--r--runtime/asm_support_check.h65
-rw-r--r--runtime/atomic.cc2
-rw-r--r--runtime/atomic.h7
-rw-r--r--runtime/barrier_test.cc2
-rw-r--r--runtime/base/allocator.h28
-rw-r--r--runtime/base/arena_allocator.cc2
-rw-r--r--runtime/base/bit_utils.h83
-rw-r--r--runtime/base/bit_utils_iterator.h112
-rw-r--r--runtime/base/bit_utils_test.cc1
-rw-r--r--runtime/base/callee_save_type.h37
-rw-r--r--runtime/base/dumpable-inl.h2
-rw-r--r--runtime/base/histogram-inl.h2
-rw-r--r--runtime/base/logging.cc2
-rw-r--r--runtime/base/mutex-inl.h33
-rw-r--r--runtime/base/mutex.h32
-rw-r--r--runtime/base/mutex_test.cc4
-rw-r--r--runtime/base/stl_util.h52
-rw-r--r--runtime/base/stl_util_identity.h41
-rw-r--r--runtime/base/strlcpy.h38
-rw-r--r--runtime/base/timing_logger.cc3
-rw-r--r--runtime/base/variant_map.h2
-rw-r--r--runtime/check_reference_map_visitor.h1
-rw-r--r--runtime/class_linker.cc143
-rw-r--r--runtime/class_linker.h11
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/class_table-inl.h6
-rw-r--r--runtime/class_table.cc3
-rw-r--r--runtime/common_runtime_test.cc10
-rw-r--r--runtime/common_runtime_test.h11
-rw-r--r--runtime/debugger.cc58
-rw-r--r--runtime/dex2oat_environment_test.h1
-rw-r--r--runtime/dex_cache_resolved_classes.h11
-rw-r--r--runtime/dex_file.cc27
-rw-r--r--runtime/dex_file.h3
-rw-r--r--runtime/dex_file_test.cc2
-rw-r--r--runtime/dex_file_tracking_registrar.cc206
-rw-r--r--runtime/dex_file_tracking_registrar.h72
-rw-r--r--runtime/dex_file_verifier.h11
-rw-r--r--runtime/dex_file_verifier_test.cc2
-rw-r--r--runtime/dex_method_iterator_test.cc2
-rw-r--r--runtime/dex_reference_collection.h85
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc11
-rw-r--r--runtime/entrypoints/entrypoint_utils.h9
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc7
-rw-r--r--runtime/entrypoints/quick/callee_save_frame.h14
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc13
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc5
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc74
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc136
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc39
-rw-r--r--runtime/exec_utils.cc1
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/accounting/atomic_stack.h9
-rw-r--r--runtime/gc/accounting/bitmap.h1
-rw-r--r--runtime/gc/accounting/heap_bitmap.h1
-rw-r--r--runtime/gc/accounting/mod_union_table.cc2
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc2
-rw-r--r--runtime/gc/accounting/remembered_set.h1
-rw-r--r--runtime/gc/accounting/space_bitmap.h4
-rw-r--r--runtime/gc/allocation_listener.h1
-rw-r--r--runtime/gc/allocation_record.cc1
-rw-r--r--runtime/gc/allocation_record.h2
-rw-r--r--runtime/gc/allocator/rosalloc.cc2
-rw-r--r--runtime/gc/allocator/rosalloc.h3
-rw-r--r--runtime/gc/collector/concurrent_copying.cc9
-rw-r--r--runtime/gc/collector/concurrent_copying.h6
-rw-r--r--runtime/gc/collector/garbage_collector.cc3
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc2
-rw-r--r--runtime/gc/collector/mark_compact.cc2
-rw-r--r--runtime/gc/collector/mark_compact.h1
-rw-r--r--runtime/gc/collector/mark_sweep.cc4
-rw-r--r--runtime/gc/collector/mark_sweep.h1
-rw-r--r--runtime/gc/collector/partial_mark_sweep.cc2
-rw-r--r--runtime/gc/collector/semi_space.h1
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc3
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/heap.h5
-rw-r--r--runtime/gc/heap_verification_test.cc21
-rw-r--r--runtime/gc/reference_processor-inl.h2
-rw-r--r--runtime/gc/reference_processor.cc1
-rw-r--r--runtime/gc/reference_processor.h2
-rw-r--r--runtime/gc/reference_queue.cc1
-rw-r--r--runtime/gc/reference_queue.h4
-rw-r--r--runtime/gc/scoped_gc_critical_section.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space.h9
-rw-r--r--runtime/gc/space/dlmalloc_space.cc1
-rw-r--r--runtime/gc/space/image_space.cc19
-rw-r--r--runtime/gc/space/large_object_space.cc2
-rw-r--r--runtime/gc/space/region_space-inl.h2
-rw-r--r--runtime/gc/space/region_space.cc27
-rw-r--r--runtime/gc/space/region_space.h6
-rw-r--r--runtime/gc/space/rosalloc_space.cc1
-rw-r--r--runtime/gc/space/space.cc2
-rw-r--r--runtime/gc/space/zygote_space.cc4
-rw-r--r--runtime/gc/task_processor_test.cc2
-rw-r--r--runtime/gc/verification.cc88
-rw-r--r--runtime/gc/verification.h8
-rw-r--r--runtime/generated/asm_support_gen.h8
-rw-r--r--runtime/handle_scope-inl.h2
-rw-r--r--runtime/imtable_test.cc2
-rw-r--r--runtime/indirect_reference_table.cc55
-rw-r--r--runtime/indirect_reference_table.h8
-rw-r--r--runtime/instrumentation.cc183
-rw-r--r--runtime/instrumentation.h114
-rw-r--r--runtime/instrumentation_test.cc182
-rw-r--r--runtime/intern_table.cc2
-rw-r--r--runtime/intern_table.h3
-rw-r--r--runtime/intern_table_test.cc1
-rw-r--r--runtime/interpreter/interpreter.cc14
-rw-r--r--runtime/interpreter/interpreter_common.cc1
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc28
-rw-r--r--runtime/interpreter/unstarted_runtime.cc2
-rw-r--r--runtime/invoke_type.h2
-rw-r--r--runtime/java_vm_ext.cc81
-rw-r--r--runtime/java_vm_ext.h1
-rw-r--r--runtime/jdwp/jdwp_adb.cc2
-rw-r--r--runtime/jdwp/jdwp_event.cc4
-rw-r--r--runtime/jdwp/jdwp_expand_buf.cc4
-rw-r--r--runtime/jdwp/jdwp_handler.cc2
-rw-r--r--runtime/jit/debugger_interface.cc2
-rw-r--r--runtime/jit/jit.cc2
-rw-r--r--runtime/jit/jit.h1
-rw-r--r--runtime/jit/jit_code_cache.cc9
-rw-r--r--runtime/jit/jit_code_cache.h3
-rw-r--r--runtime/jit/profile_compilation_info-inl.h58
-rw-r--r--runtime/jit/profile_compilation_info.cc206
-rw-r--r--runtime/jit/profile_compilation_info.h147
-rw-r--r--runtime/jit/profile_compilation_info_test.cc135
-rw-r--r--runtime/jit/profile_saver.cc176
-rw-r--r--runtime/jit/profile_saver_options.h44
-rw-r--r--runtime/jni_env_ext.cc6
-rw-r--r--runtime/jni_env_ext.h1
-rw-r--r--runtime/jni_internal.cc56
-rw-r--r--runtime/jni_internal.h8
-rw-r--r--runtime/jni_internal_test.cc25
-rw-r--r--runtime/linear_alloc.cc2
-rw-r--r--runtime/managed_stack-inl.h48
-rw-r--r--runtime/managed_stack.cc57
-rw-r--r--runtime/managed_stack.h107
-rw-r--r--runtime/mem_map.cc11
-rw-r--r--runtime/method_bss_mapping.h57
-rw-r--r--runtime/method_handles.h1
-rw-r--r--runtime/method_reference.h50
-rw-r--r--runtime/mirror/accessible_object.h1
-rw-r--r--runtime/mirror/array.h11
-rw-r--r--runtime/mirror/class-inl.h5
-rw-r--r--runtime/mirror/class.cc4
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/class_ext.h1
-rw-r--r--runtime/mirror/class_loader-inl.h2
-rw-r--r--runtime/mirror/class_loader.h3
-rw-r--r--runtime/mirror/dex_cache.cc1
-rw-r--r--runtime/mirror/executable.h1
-rw-r--r--runtime/mirror/field.h1
-rw-r--r--runtime/mirror/object-inl.h33
-rw-r--r--runtime/mirror/object-readbarrier-inl.h30
-rw-r--r--runtime/mirror/object-refvisitor-inl.h2
-rw-r--r--runtime/mirror/object.h15
-rw-r--r--runtime/mirror/reference-inl.h8
-rw-r--r--runtime/mirror/reference.h7
-rw-r--r--runtime/mirror/stack_trace_element.cc1
-rw-r--r--runtime/mirror/stack_trace_element.h1
-rw-r--r--runtime/mirror/string-inl.h5
-rw-r--r--runtime/mirror/string.cc7
-rw-r--r--runtime/mirror/string.h1
-rw-r--r--runtime/mirror/throwable.cc14
-rw-r--r--runtime/mirror/throwable.h17
-rw-r--r--runtime/monitor.cc12
-rw-r--r--runtime/monitor.h9
-rw-r--r--runtime/monitor_android.cc130
-rw-r--r--runtime/monitor_linux.cc2
-rw-r--r--runtime/monitor_pool.cc2
-rw-r--r--runtime/monitor_pool_test.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc1
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc1
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc1
-rw-r--r--runtime/native/dalvik_system_VMStack.cc1
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc4
-rw-r--r--runtime/native/java_lang_Class.cc1
-rw-r--r--runtime/native/java_lang_Object.cc1
-rw-r--r--runtime/native/java_lang_String.cc1
-rw-r--r--runtime/native/java_lang_StringFactory.cc1
-rw-r--r--runtime/native/java_lang_System.cc1
-rw-r--r--runtime/native/java_lang_Thread.cc1
-rw-r--r--runtime/native/java_lang_Throwable.cc1
-rw-r--r--runtime/native/java_lang_VMClassLoader.cc1
-rw-r--r--runtime/native/java_lang_Void.cc1
-rw-r--r--runtime/native/java_lang_invoke_MethodHandleImpl.cc1
-rw-r--r--runtime/native/java_lang_ref_FinalizerReference.cc1
-rw-r--r--runtime/native/java_lang_ref_Reference.cc1
-rw-r--r--runtime/native/java_lang_reflect_Array.cc3
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc1
-rw-r--r--runtime/native/java_lang_reflect_Executable.cc1
-rw-r--r--runtime/native/java_lang_reflect_Field.cc1
-rw-r--r--runtime/native/java_lang_reflect_Method.cc1
-rw-r--r--runtime/native/java_lang_reflect_Parameter.cc1
-rw-r--r--runtime/native/java_lang_reflect_Proxy.cc1
-rw-r--r--runtime/native/java_util_concurrent_atomic_AtomicLong.cc1
-rw-r--r--runtime/native/libcore_util_CharsetUtils.cc1
-rw-r--r--runtime/native/native_util.h45
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc1
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc1
-rw-r--r--runtime/native/sun_misc_Unsafe.cc2
-rw-r--r--runtime/native_stack_dump.cc2
-rw-r--r--runtime/non_debuggable_classes.cc2
-rw-r--r--runtime/oat.cc20
-rw-r--r--runtime/oat.h5
-rw-r--r--runtime/oat_file.cc123
-rw-r--r--runtime/oat_file.h26
-rw-r--r--runtime/oat_file_assistant.cc6
-rw-r--r--runtime/oat_file_assistant.h6
-rw-r--r--runtime/oat_file_assistant_test.cc21
-rw-r--r--runtime/oat_file_manager.cc13
-rw-r--r--runtime/obj_ptr-inl.h3
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc3
-rw-r--r--runtime/openjdkjvmti/art_jvmti.h7
-rw-r--r--runtime/openjdkjvmti/events-inl.h25
-rw-r--r--runtime/openjdkjvmti/events.cc238
-rw-r--r--runtime/openjdkjvmti/events.h11
-rw-r--r--runtime/openjdkjvmti/jvmti_weak_table.h2
-rw-r--r--runtime/openjdkjvmti/ti_class.cc5
-rw-r--r--runtime/openjdkjvmti/ti_dump.cc2
-rw-r--r--runtime/openjdkjvmti/ti_field.cc2
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc1
-rw-r--r--runtime/openjdkjvmti/ti_jni.cc2
-rw-r--r--runtime/openjdkjvmti/ti_method.cc2
-rw-r--r--runtime/openjdkjvmti/ti_monitor.cc2
-rw-r--r--runtime/openjdkjvmti/ti_object.cc2
-rw-r--r--runtime/openjdkjvmti/ti_phase.cc2
-rw-r--r--runtime/openjdkjvmti/ti_properties.cc2
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc30
-rw-r--r--runtime/openjdkjvmti/ti_search.cc2
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc116
-rw-r--r--runtime/openjdkjvmti/ti_thread.cc2
-rw-r--r--runtime/openjdkjvmti/ti_threadgroup.cc2
-rw-r--r--runtime/parsed_options.cc2
-rw-r--r--runtime/read_barrier-inl.h4
-rw-r--r--runtime/reference_table.h1
-rw-r--r--runtime/reference_table_test.cc2
-rw-r--r--runtime/reflection.cc5
-rw-r--r--runtime/runtime-inl.h19
-rw-r--r--runtime/runtime.cc32
-rw-r--r--runtime/runtime.h23
-rw-r--r--runtime/runtime_common.cc3
-rw-r--r--runtime/safe_map.h6
-rw-r--r--runtime/scoped_thread_state_change-inl.h1
-rw-r--r--runtime/stack.cc49
-rw-r--r--runtime/stack.h82
-rw-r--r--runtime/stride_iterator.h77
-rw-r--r--runtime/thread-current-inl.h47
-rw-r--r--runtime/thread-inl.h38
-rw-r--r--runtime/thread.cc5
-rw-r--r--runtime/thread.h29
-rw-r--r--runtime/thread_list.cc10
-rw-r--r--runtime/thread_list.h4
-rw-r--r--runtime/thread_pool.cc2
-rw-r--r--runtime/ti/agent.cc3
-rw-r--r--runtime/trace.cc29
-rw-r--r--runtime/trace.h48
-rw-r--r--runtime/transaction.cc2
-rw-r--r--runtime/transaction.h1
-rw-r--r--runtime/utils.cc4
-rw-r--r--runtime/verifier/method_verifier.cc17
-rw-r--r--runtime/verifier/method_verifier.h7
-rw-r--r--runtime/verifier/reg_type.h1
-rw-r--r--runtime/verifier/reg_type_test.cc2
-rw-r--r--runtime/verifier/verifier_deps.h1
-rw-r--r--runtime/verify_object.h1
-rw-r--r--runtime/well_known_classes.cc2
-rw-r--r--runtime/zip_archive.cc1
-rw-r--r--test/003-omnibus-opcodes/src/Main.java2
-rw-r--r--test/008-exceptions/src/Main.java12
-rw-r--r--test/023-many-interfaces/src/ManyInterfaces.java6
-rw-r--r--test/024-illegal-access/src/Main.java10
-rw-r--r--test/031-class-attributes/src/ClassAttrs.java10
-rw-r--r--test/032-concrete-sub/src/ConcreteSub.java2
-rw-r--r--test/032-concrete-sub/src/Main.java2
-rw-r--r--test/036-finalizer/src/Main.java2
-rw-r--r--test/042-new-instance/src/Main.java60
-rw-r--r--test/044-proxy/src/BasicTest.java12
-rw-r--r--test/044-proxy/src/Clash.java4
-rw-r--r--test/044-proxy/src/Clash2.java2
-rw-r--r--test/044-proxy/src/Clash3.java2
-rw-r--r--test/044-proxy/src/Clash4.java2
-rw-r--r--test/044-proxy/src/ConstructorProxy.java2
-rw-r--r--test/044-proxy/src/WrappedThrow.java32
-rw-r--r--test/045-reflect-array/src/Main.java4
-rw-r--r--test/046-reflect/src/Main.java28
-rw-r--r--test/048-reflect-v8/src/DefaultDeclared.java2
-rw-r--r--test/050-sync-test/src/Main.java8
-rw-r--r--test/050-sync-test/src/ThreadDeathHandler.java4
-rw-r--r--test/051-thread/src/Main.java2
-rw-r--r--test/053-wait-some/src/Main.java4
-rw-r--r--test/054-uncaught/src/Main.java4
-rw-r--r--test/054-uncaught/src/ThreadDeathHandler.java4
-rw-r--r--test/059-finalizer-throw/src/Main.java4
-rw-r--r--test/064-field-access/src/Main.java32
-rw-r--r--test/065-mismatched-implements/src/Main.java2
-rw-r--r--test/066-mismatched-super/src/Main.java2
-rw-r--r--test/068-classloader/src/Main.java76
-rw-r--r--test/069-field-type/src/Main.java2
-rw-r--r--test/070-nio-buffer/src/Main.java6
-rw-r--r--test/073-mismatched-field/src/Main.java2
-rw-r--r--test/074-gc-thrash/src/Main.java16
-rw-r--r--test/075-verification-error/src/Main.java18
-rw-r--r--test/077-method-override/src/Main.java8
-rw-r--r--test/079-phantom/src/Main.java2
-rw-r--r--test/084-class-init/src/Main.java14
-rw-r--r--test/086-null-super/src/Main.java8
-rw-r--r--test/088-monitor-verification/src/Main.java2
-rw-r--r--test/092-locale/src/Main.java16
-rw-r--r--test/095-switch-MAX_INT/src/Main.java2
-rw-r--r--test/100-reflect2/src/Main.java4
-rw-r--r--test/101-fibonacci/src/Main.java2
-rw-r--r--test/109-suspend-check/src/Main.java2
-rw-r--r--test/114-ParallelGC/src/Main.java2
-rwxr-xr-xtest/115-native-bridge/check20
-rw-r--r--test/120-hashcode/src/Main.java2
-rw-r--r--test/130-hprof/src/Main.java6
-rw-r--r--test/1337-gc-coverage/gc_coverage.cc2
-rw-r--r--test/135-MirandaDispatch/src/Main.java2
-rw-r--r--test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc2
-rw-r--r--test/138-duplicate-classes-check/src/Main.java2
-rw-r--r--test/138-duplicate-classes-check2/src/Main.java2
-rw-r--r--test/141-class-unload/jni_unload.cc2
-rw-r--r--test/141-class-unload/src/Main.java2
-rw-r--r--test/142-classloader2/src/Main.java2
-rw-r--r--test/146-bad-interface/src/Main.java2
-rw-r--r--test/148-multithread-gc-annotations/gc_coverage.cc2
-rw-r--r--test/155-java-set-resolved-type/src/Main.java2
-rw-r--r--test/156-register-dex-file-multi-loader/src/Main.java2
-rw-r--r--test/158-app-image-class-table/src/Main.java2
-rw-r--r--test/159-app-image-fields/src/Main.java2
-rw-r--r--test/301-abstract-protected/src/Main.java2
-rw-r--r--test/487-checker-inline-calls/src/Main.java2
-rw-r--r--test/488-checker-inline-recursive-calls/src/Main.java2
-rw-r--r--test/492-checker-inline-invoke-interface/src/Main.java2
-rw-r--r--test/493-checker-inline-invoke-interface/src/Main.java2
-rw-r--r--test/497-inlining-and-class-loader/src/Main.java2
-rw-r--r--test/522-checker-regression-monitor-exit/src/Main.java6
-rw-r--r--test/551-checker-shifter-operand/src/Main.java11
-rw-r--r--test/552-checker-sharpening/src/Main.java122
-rw-r--r--test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali4
-rw-r--r--test/570-checker-osr/osr.cc1
-rw-r--r--test/570-checker-osr/src/DeoptimizationController.java4
-rw-r--r--test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali8
-rw-r--r--test/595-profile-saving/profile-saving.cc53
-rw-r--r--test/595-profile-saving/run1
-rw-r--r--test/595-profile-saving/src/Main.java32
-rw-r--r--test/596-app-images/src/Main.java88
-rw-r--r--test/596-monitor-inflation/monitor_inflation.cc2
-rw-r--r--test/602-deoptimizeable/src/Main.java4
-rw-r--r--test/617-clinit-oome/src/Main.java2
-rw-r--r--test/623-checker-loop-regressions/src/Main.java23
-rw-r--r--test/626-const-class-linking/src/RacyMisbehavingHelper.java2
-rw-r--r--test/638-no-line-number/src/Main.java4
-rw-r--r--test/640-checker-boolean-simd/src/Main.java24
-rw-r--r--test/640-checker-byte-simd/src/Main.java42
-rw-r--r--test/640-checker-char-simd/src/Main.java42
-rw-r--r--test/640-checker-double-simd/src/Main.java40
-rw-r--r--test/640-checker-float-simd/src/Main.java42
-rw-r--r--test/640-checker-int-simd/src/Main.java69
-rw-r--r--test/640-checker-long-simd/src/Main.java67
-rw-r--r--test/640-checker-short-simd/src/Main.java42
-rw-r--r--test/645-checker-abs-simd/src/Main.java72
-rw-r--r--test/646-checker-hadd-alt-byte/src/Main.java44
-rw-r--r--test/646-checker-hadd-alt-char/src/Main.java44
-rw-r--r--test/646-checker-hadd-alt-short/src/Main.java44
-rw-r--r--test/646-checker-hadd-byte/src/Main.java44
-rw-r--r--test/646-checker-hadd-char/src/Main.java44
-rw-r--r--test/646-checker-hadd-short/src/Main.java72
-rw-r--r--test/651-checker-byte-simd-minmax/src/Main.java28
-rw-r--r--test/651-checker-char-simd-minmax/src/Main.java14
-rw-r--r--test/651-checker-double-simd-minmax/src/Main.java2
-rw-r--r--test/651-checker-float-simd-minmax/src/Main.java2
-rw-r--r--test/651-checker-int-simd-minmax/src/Main.java14
-rw-r--r--test/651-checker-long-simd-minmax/src/Main.java19
-rw-r--r--test/651-checker-short-simd-minmax/src/Main.java28
-rw-r--r--test/654-checker-periodic/expected.txt1
-rw-r--r--test/654-checker-periodic/info.txt1
-rw-r--r--test/654-checker-periodic/src/Main.java173
-rw-r--r--test/655-checker-simd-arm-opt/expected.txt1
-rw-r--r--test/655-checker-simd-arm-opt/info.txt1
-rw-r--r--test/655-checker-simd-arm-opt/src/Main.java99
-rw-r--r--test/655-jit-clinit/expected.txt1
-rw-r--r--test/655-jit-clinit/info.txt3
-rw-r--r--test/655-jit-clinit/src/Main.java51
-rw-r--r--test/802-deoptimization/src/DeoptimizationController.java4
-rw-r--r--test/909-attach-agent/attach.cc18
-rw-r--r--test/909-attach-agent/src/Main.java6
-rw-r--r--test/916-obsolete-jit/src/Main.java2
-rw-r--r--test/934-load-transform/src/Main.java2
-rw-r--r--test/935-non-retransformable/src/Main.java2
-rw-r--r--test/938-load-transform-bcp/src-ex/TestMain.java2
-rw-r--r--test/938-load-transform-bcp/src/Main.java2
-rw-r--r--test/941-recurive-obsolete-jit/src/Main.java2
-rw-r--r--test/943-private-recursive-jit/src/Main.java2
-rw-r--r--test/947-reflect-method/src/art/Test947.java2
-rw-r--r--test/953-invoke-polymorphic-compiler/src/Main.java34
-rw-r--r--test/972-default-imt-collision/src/Main.java2
-rw-r--r--test/972-iface-super-multidex/src/Main.java10
-rw-r--r--test/973-default-multidex/src/Main.java2
-rw-r--r--test/983-source-transform-verify/source_transform.cc2
-rw-r--r--test/988-method-trace/expected.txt276
-rw-r--r--test/988-method-trace/info.txt15
-rwxr-xr-xtest/988-method-trace/run18
-rw-r--r--test/988-method-trace/src/Main.java21
-rw-r--r--test/988-method-trace/src/art/Test988.java301
-rw-r--r--test/988-method-trace/src/art/Trace.java25
-rw-r--r--test/988-redefine-use-after-free/expected.txt0
-rw-r--r--test/988-redefine-use-after-free/info.txt13
-rwxr-xr-xtest/988-redefine-use-after-free/run17
-rw-r--r--test/988-redefine-use-after-free/src-ex/DexCacheSmash.java155
-rw-r--r--test/988-redefine-use-after-free/src-ex/art/Redefinition.java91
-rw-r--r--test/988-redefine-use-after-free/src/Main.java54
-rw-r--r--test/989-method-trace-throw/expected.txt188
-rw-r--r--test/989-method-trace-throw/info.txt15
-rw-r--r--test/989-method-trace-throw/method_trace.cc74
-rwxr-xr-xtest/989-method-trace-throw/run18
-rw-r--r--test/989-method-trace-throw/src/Main.java21
-rw-r--r--test/989-method-trace-throw/src/art/Test989.java465
-rw-r--r--test/989-method-trace-throw/src/art/Trace.java25
-rw-r--r--test/Android.bp1
-rw-r--r--test/Instrumentation/Instrumentation.java13
-rw-r--r--test/common/runtime_state.cc24
-rw-r--r--test/common/stack_inspect.cc2
-rwxr-xr-xtest/etc/run-test-jar35
-rw-r--r--test/knownfailures.json44
-rwxr-xr-xtest/run-test27
-rw-r--r--test/testrunner/target_config.py23
-rwxr-xr-xtest/testrunner/testrunner.py12
-rw-r--r--test/ti-agent/common_helper.cc208
-rw-r--r--test/ti-stress/stress.cc240
-rw-r--r--tools/add_package_property.sh29
-rw-r--r--tools/asan.sh21
-rwxr-xr-xtools/buildbot-build.sh11
-rw-r--r--tools/cpp-define-generator/offset_runtime.def16
-rw-r--r--tools/dexfuzz/src/dexfuzz/program/Mutation.java15
-rw-r--r--tools/libcore_failures.txt7
-rwxr-xr-xtools/run-jdwp-tests.sh29
-rwxr-xr-xtools/run-libcore-tests.sh28
599 files changed, 14315 insertions, 4525 deletions
diff --git a/build/Android.bp b/build/Android.bp
index c54f436b35..ed6de3546f 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -59,10 +59,8 @@ art_global_defaults {
"-Wunreachable-code-break",
"-Wunreachable-code-return",
- // Bug: http://b/29823425 Disable -Wconstant-conversion and
- // -Wundefined-var-template for Clang update to r271374
+ // Bug: http://b/29823425 Disable -Wconstant-conversion for Clang update to r271374
"-Wno-constant-conversion",
- "-Wno-undefined-var-template",
// Enable thread annotations for std::mutex, etc.
"-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
@@ -145,6 +143,15 @@ art_global_defaults {
tidy_checks: [
"-google-default-arguments",
+ // We have local stores that are only used for debug checks.
+ "-clang-analyzer-deadcode.DeadStores",
+ // We are OK with some static globals and that they can, in theory, throw.
+ "-cert-err58-cpp",
+ // We have lots of C-style variadic functions, and are OK with them. JNI ensures
+ // that working around this warning would be extra-painful.
+ "-cert-dcl50-cpp",
+ // No exceptions.
+ "-misc-noexcept-move-constructor",
],
}
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index bcf48fd891..b6d4ef6dfb 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -121,7 +121,7 @@ ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode Xan
ART_GTEST_proxy_test_DEX_DEPS := Interfaces
ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex
-ART_GTEST_profile_compilation_info_test_DEX_DEPS := ProfileTestMultiDex
+ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex
ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY
ART_GTEST_stub_test_DEX_DEPS := AllFields
ART_GTEST_transaction_test_DEX_DEPS := Transaction
diff --git a/build/art.go b/build/art.go
index b33b565899..f52c63525a 100644
--- a/build/art.go
+++ b/build/art.go
@@ -87,7 +87,7 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
"-DART_STACK_OVERFLOW_GAP_arm64=8192",
"-DART_STACK_OVERFLOW_GAP_mips=16384",
"-DART_STACK_OVERFLOW_GAP_mips64=16384",
- "-DART_STACK_OVERFLOW_GAP_x86=12288",
+ "-DART_STACK_OVERFLOW_GAP_x86=16384",
"-DART_STACK_OVERFLOW_GAP_x86_64=20480")
} else {
cflags = append(cflags,
@@ -170,12 +170,23 @@ func globalDefaults(ctx android.LoadHookContext) {
}
Cflags []string
Asflags []string
+ Sanitize struct {
+ Recover []string
+ }
}
p := &props{}
p.Cflags, p.Asflags = globalFlags(ctx)
p.Target.Android.Cflags = deviceFlags(ctx)
p.Target.Host.Cflags = hostFlags(ctx)
+
+ if envTrue(ctx, "ART_DEX_FILE_ACCESS_TRACKING") {
+ p.Cflags = append(p.Cflags, "-DART_DEX_FILE_ACCESS_TRACKING")
+ p.Sanitize.Recover = []string {
+ "address",
+ }
+ }
+
ctx.AppendProperties(p)
}
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 9f12f64a31..b224ec72de 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -35,7 +35,7 @@ namespace art {
return lhs.enabled_ == rhs.enabled_ &&
lhs.min_save_period_ms_ == rhs.min_save_period_ms_ &&
lhs.save_resolved_classes_delay_ms_ == rhs.save_resolved_classes_delay_ms_ &&
- lhs.startup_method_samples_ == rhs.startup_method_samples_ &&
+ lhs.hot_startup_method_samples_ == rhs.hot_startup_method_samples_ &&
lhs.min_methods_to_save_ == rhs.min_methods_to_save_ &&
lhs.min_classes_to_save_ == rhs.min_classes_to_save_ &&
lhs.min_notification_before_wake_ == rhs.min_notification_before_wake_ &&
@@ -484,18 +484,19 @@ TEST_F(CmdlineParserTest, TestJitOptions) {
* -Xps-*
*/
TEST_F(CmdlineParserTest, ProfileSaverOptions) {
- ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc");
+ ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc", true);
EXPECT_SINGLE_PARSE_VALUE(opt,
"-Xjitsaveprofilinginfo "
"-Xps-min-save-period-ms:1 "
"-Xps-save-resolved-classes-delay-ms:2 "
- "-Xps-startup-method-samples:3 "
+ "-Xps-hot-startup-method-samples:3 "
"-Xps-min-methods-to-save:4 "
"-Xps-min-classes-to-save:5 "
"-Xps-min-notification-before-wake:6 "
"-Xps-max-notification-before-wake:7 "
- "-Xps-profile-path:abc",
+ "-Xps-profile-path:abc "
+ "-Xps-profile-boot-class-path",
M::ProfileSaverOpts);
} // TEST_F
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 0d2aed8ad1..4de8a48d45 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -712,6 +712,11 @@ struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions>
return Result::SuccessNoValue();
}
+ if (option == "profile-boot-class-path") {
+ existing.profile_boot_class_path_ = true;
+ return Result::SuccessNoValue();
+ }
+
// The rest of these options are always the wildcard from '-Xps-*'
std::string suffix = RemovePrefix(option);
@@ -727,10 +732,10 @@ struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions>
&ProfileSaverOptions::save_resolved_classes_delay_ms_,
type_parser.Parse(suffix));
}
- if (android::base::StartsWith(option, "startup-method-samples:")) {
+ if (android::base::StartsWith(option, "hot-startup-method-samples:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
- &ProfileSaverOptions::startup_method_samples_,
+ &ProfileSaverOptions::hot_startup_method_samples_,
type_parser.Parse(suffix));
}
if (android::base::StartsWith(option, "min-methods-to-save:")) {
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 307a42cbba..a1269dcaf9 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -110,7 +110,6 @@ art_cc_defaults {
"optimizing/code_generator_vector_arm.cc",
"optimizing/code_generator_arm_vixl.cc",
"optimizing/code_generator_vector_arm_vixl.cc",
- "optimizing/dex_cache_array_fixups_arm.cc",
"optimizing/instruction_simplifier_arm.cc",
"optimizing/instruction_simplifier_shared.cc",
"optimizing/intrinsics_arm.cc",
@@ -145,7 +144,6 @@ art_cc_defaults {
"linker/mips/relative_patcher_mips.cc",
"optimizing/code_generator_mips.cc",
"optimizing/code_generator_vector_mips.cc",
- "optimizing/dex_cache_array_fixups_mips.cc",
"optimizing/intrinsics_mips.cc",
"optimizing/pc_relative_fixups_mips.cc",
"utils/mips/assembler_mips.cc",
@@ -342,6 +340,7 @@ art_cc_test {
"image_test.cc",
"image_write_read_test.cc",
"jni/jni_compiler_test.cc",
+ "linker/method_bss_mapping_encoder_test.cc",
"linker/multi_oat_relative_patcher_test.cc",
"linker/output_stream_test.cc",
"oat_test.cc",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 39edd1eb02..3683695a1b 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set_features.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "base/enums.h"
#include "class_linker.h"
#include "compiled_method.h"
@@ -33,7 +34,7 @@
#include "mirror/object-inl.h"
#include "oat_quick_method_header.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
@@ -166,8 +167,8 @@ void CommonCompilerTest::SetUp() {
instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
runtime_->SetInstructionSet(instruction_set);
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+ for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
+ CalleeSaveType type = CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 0ca23a5c50..761e9e19a8 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -120,13 +120,13 @@ class LinkerPatch {
// patch_type_ as an uintN_t and do explicit static_cast<>s.
enum class Type : uint8_t {
kMethodRelative, // NOTE: Actual patching is instruction_set-dependent.
+ kMethodBssEntry, // NOTE: Actual patching is instruction_set-dependent.
kCall,
kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
kTypeBssEntry, // NOTE: Actual patching is instruction_set-dependent.
kStringRelative, // NOTE: Actual patching is instruction_set-dependent.
kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent.
- kDexCacheArray, // NOTE: Actual patching is instruction_set-dependent.
kBakerReadBarrierBranch, // NOTE: Actual patching is instruction_set-dependent.
};
@@ -140,6 +140,16 @@ class LinkerPatch {
return patch;
}
+ static LinkerPatch MethodBssEntryPatch(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t target_method_idx) {
+ LinkerPatch patch(literal_offset, Type::kMethodBssEntry, target_dex_file);
+ patch.method_idx_ = target_method_idx;
+ patch.pc_insn_offset_ = pc_insn_offset;
+ return patch;
+ }
+
static LinkerPatch CodePatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t target_method_idx) {
@@ -196,16 +206,6 @@ class LinkerPatch {
return patch;
}
- static LinkerPatch DexCacheArrayPatch(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t pc_insn_offset,
- uint32_t element_offset) {
- LinkerPatch patch(literal_offset, Type::kDexCacheArray, target_dex_file);
- patch.pc_insn_offset_ = pc_insn_offset;
- patch.element_offset_ = element_offset;
- return patch;
- }
-
static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
uint32_t custom_value1 = 0u,
uint32_t custom_value2 = 0u) {
@@ -229,12 +229,12 @@ class LinkerPatch {
bool IsPcRelative() const {
switch (GetType()) {
case Type::kMethodRelative:
+ case Type::kMethodBssEntry:
case Type::kCallRelative:
case Type::kTypeRelative:
case Type::kTypeBssEntry:
case Type::kStringRelative:
case Type::kStringBssEntry:
- case Type::kDexCacheArray:
case Type::kBakerReadBarrierBranch:
return true;
default:
@@ -244,6 +244,7 @@ class LinkerPatch {
MethodReference TargetMethod() const {
DCHECK(patch_type_ == Type::kMethodRelative ||
+ patch_type_ == Type::kMethodBssEntry ||
patch_type_ == Type::kCall ||
patch_type_ == Type::kCallRelative);
return MethodReference(target_dex_file_, method_idx_);
@@ -273,23 +274,13 @@ class LinkerPatch {
return dex::StringIndex(string_idx_);
}
- const DexFile* TargetDexCacheDexFile() const {
- DCHECK(patch_type_ == Type::kDexCacheArray);
- return target_dex_file_;
- }
-
- size_t TargetDexCacheElementOffset() const {
- DCHECK(patch_type_ == Type::kDexCacheArray);
- return element_offset_;
- }
-
uint32_t PcInsnOffset() const {
DCHECK(patch_type_ == Type::kMethodRelative ||
+ patch_type_ == Type::kMethodBssEntry ||
patch_type_ == Type::kTypeRelative ||
patch_type_ == Type::kTypeBssEntry ||
patch_type_ == Type::kStringRelative ||
- patch_type_ == Type::kStringBssEntry ||
- patch_type_ == Type::kDexCacheArray);
+ patch_type_ == Type::kStringBssEntry);
return pc_insn_offset_;
}
@@ -324,12 +315,10 @@ class LinkerPatch {
uint32_t method_idx_; // Method index for Call/Method patches.
uint32_t type_idx_; // Type index for Type patches.
uint32_t string_idx_; // String index for String patches.
- uint32_t element_offset_; // Element offset in the dex cache arrays.
uint32_t baker_custom_value1_;
static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators");
static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators");
static_assert(sizeof(string_idx_) == sizeof(cmp1_), "needed by relational operators");
- static_assert(sizeof(element_offset_) == sizeof(cmp1_), "needed by relational operators");
static_assert(sizeof(baker_custom_value1_) == sizeof(cmp1_), "needed by relational operators");
};
union {
diff --git a/compiler/compiled_method_test.cc b/compiler/compiled_method_test.cc
index 72b2282ade..f4a72cf2cc 100644
--- a/compiler/compiled_method_test.cc
+++ b/compiler/compiled_method_test.cc
@@ -58,6 +58,14 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1000u),
LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1001u),
LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3000u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3001u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3000u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3001u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3000u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3000u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1001u),
LinkerPatch::CodePatch(16u, dex_file1, 1000u),
LinkerPatch::CodePatch(16u, dex_file1, 1001u),
LinkerPatch::CodePatch(16u, dex_file2, 1000u),
@@ -98,14 +106,6 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1000u),
LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3000u, 1001u),
LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1001u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2000u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2000u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2001u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2001u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2000u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2000u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2001u),
- LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2001u),
LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 0u),
LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 1u),
LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 0u),
@@ -119,6 +119,14 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1000u),
LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3000u, 1001u),
LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3000u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3001u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3000u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3001u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3000u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1000u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3000u, 1001u),
+ LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1001u),
LinkerPatch::CodePatch(32u, dex_file1, 1000u),
LinkerPatch::CodePatch(32u, dex_file1, 1001u),
LinkerPatch::CodePatch(32u, dex_file2, 1000u),
@@ -159,14 +167,6 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1000u),
LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3000u, 1001u),
LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1001u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2000u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2000u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2001u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2001u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2000u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2000u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2001u),
- LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2001u),
LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 0u),
LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 1u),
LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 0u),
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 1573062033..2db99cda3e 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -28,7 +28,7 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "mirror/dex_cache.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace optimizer {
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index e691a67dc0..257229101c 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -433,8 +433,11 @@ bool InlineMethodAnalyser::AnalyseMethodCode(ArtMethod* method, InlineMethod* re
// Native or abstract.
return false;
}
- return AnalyseMethodCode(
- code_item, method->ToMethodReference(), method->IsStatic(), method, result);
+ return AnalyseMethodCode(code_item,
+ MethodReference(method->GetDexFile(), method->GetDexMethodIndex()),
+ method->IsStatic(),
+ method,
+ result);
}
bool InlineMethodAnalyser::AnalyseMethodCode(const DexFile::CodeItem* code_item,
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 0338cfde8c..04ceca0513 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -17,12 +17,13 @@
#include "verification_results.h"
#include "base/logging.h"
-#include "base/stl_util.h"
#include "base/mutex-inl.h"
+#include "base/stl_util.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "runtime.h"
#include "thread.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils/atomic_method_ref_map-inl.h"
#include "verified_method.h"
#include "verifier/method_verifier-inl.h"
@@ -109,12 +110,12 @@ void VerificationResults::CreateVerifiedMethodFor(MethodReference ref) {
// This method should only be called for classes verified at compile time,
// which have no verifier error, nor has methods that we know will throw
// at runtime.
- atomic_verified_methods_.Insert(
- ref,
- /*expected*/ nullptr,
- new VerifiedMethod(/* encountered_error_types */ 0, /* has_runtime_throw */ false));
- // We don't check the result of `Insert` as we could insert twice for the same
- // MethodReference in the presence of duplicate methods.
+ std::unique_ptr<VerifiedMethod> verified_method = std::make_unique<VerifiedMethod>(
+ /* encountered_error_types */ 0, /* has_runtime_throw */ false);
+ if (atomic_verified_methods_.Insert(ref, /*expected*/ nullptr, verified_method.get()) ==
+ AtomicMap::InsertResult::kInsertResultSuccess) {
+ verified_method.release();
+ }
}
void VerificationResults::AddRejectedClass(ClassReference ref) {
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index e6a47ba60f..528b0a215b 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -21,7 +21,7 @@
#include "base/logging.h"
#include "compiled_method.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
#include "utils/dedupe_set-inl.h"
#include "utils/swap_space.h"
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index fc5f847354..91b58e1590 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1000,7 +1000,9 @@ bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_r
if (profile_compilation_info_ == nullptr) {
return false;
}
- bool result = profile_compilation_info_->ContainsMethod(method_ref);
+ // Compile only hot methods, it is the profile saver's job to decide what startup methods to mark
+ // as hot.
+ bool result = profile_compilation_info_->ContainsHotMethod(method_ref);
if (kDebugProfileGuidedCompilation) {
LOG(INFO) << "[ProfileGuidedCompilation] "
@@ -2270,6 +2272,8 @@ class InitializeClassVisitor : public CompilationVisitor {
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<3> hs(soa.Self());
+ const bool is_boot_image = manager_->GetCompiler()->GetCompilerOptions().IsBootImage();
+ const bool is_app_image = manager_->GetCompiler()->GetCompilerOptions().IsAppImage();
mirror::Class::Status old_status = klass->GetStatus();;
// Only try to initialize classes that were successfully verified.
@@ -2291,23 +2295,28 @@ class InitializeClassVisitor : public CompilationVisitor {
ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
// Attempt to initialize allowing initialization of parent classes but still not static
// fields.
- bool is_superclass_initialized = InitializeDependencies(klass, class_loader, soa.Self());
- if (is_superclass_initialized) {
- manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
- }
+ manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
old_status = klass->GetStatus();
- // If superclass cannot be initialized, no need to proceed.
+ // If the class was not initialized, we can proceed to see if we can initialize static
+ // fields.
if (!klass->IsInitialized() &&
- is_superclass_initialized &&
+ (is_app_image || is_boot_image) &&
manager_->GetCompiler()->IsImageClass(descriptor)) {
bool can_init_static_fields = false;
- if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage()) {
+ if (is_boot_image) {
// We need to initialize static fields, we only do this for image classes that aren't
- // marked with the $NoPreloadHolder (which implies this should not be initialized early).
+ // marked with the $NoPreloadHolder (which implies this should not be initialized
+ // early).
can_init_static_fields = !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
} else {
- can_init_static_fields = manager_->GetCompiler()->GetCompilerOptions().IsAppImage() &&
+ CHECK(is_app_image);
+ // The boot image case doesn't need to recursively initialize the dependencies with
+ // special logic since the class linker already does this.
+ bool is_superclass_initialized =
+ InitializeDependencies(klass, class_loader, soa.Self());
+ can_init_static_fields =
!soa.Self()->IsExceptionPending() &&
+ is_superclass_initialized &&
NoClinitInDependency(klass, soa.Self(), &class_loader);
// TODO The checking for clinit can be removed since it's already
// checked when init superclass. Currently keep it because it contains
@@ -2350,6 +2359,10 @@ class InitializeClassVisitor : public CompilationVisitor {
soa.Self()->ClearException();
transaction.Rollback();
CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
+ } else if (is_boot_image) {
+ // For boot image, we want to put the updated status in the oat class since we can't
+ // reject the image anyways.
+ old_status = klass->GetStatus();
}
}
@@ -2359,6 +2372,8 @@ class InitializeClassVisitor : public CompilationVisitor {
// above as we will allocate strings, so must be allowed to suspend.
if (&klass->GetDexFile() == manager_->GetDexFile()) {
InternStrings(klass, class_loader);
+ } else {
+ DCHECK(!is_boot_image) << "Boot image must have equal dex files";
}
}
}
@@ -2425,65 +2440,89 @@ class InitializeClassVisitor : public CompilationVisitor {
}
}
- bool ResolveTypesOfMethods(Thread* self, ArtMethod* m)
+ bool NoPotentialInternStrings(Handle<mirror::Class> klass,
+ Handle<mirror::ClassLoader>* class_loader)
REQUIRES_SHARED(Locks::mutator_lock_) {
- auto rtn_type = m->GetReturnType(true);
- if (rtn_type == nullptr) {
- self->ClearException();
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> h_dex_cache = hs.NewHandle(klass->GetDexCache());
+ const DexFile* dex_file = h_dex_cache->GetDexFile();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ annotations::RuntimeEncodedStaticFieldValueIterator value_it(*dex_file,
+ &h_dex_cache,
+ class_loader,
+ manager_->GetClassLinker(),
+ *class_def);
+
+ const auto jString = annotations::RuntimeEncodedStaticFieldValueIterator::kString;
+ for ( ; value_it.HasNext(); value_it.Next()) {
+ if (value_it.GetValueType() == jString) {
+ // We don't want cache the static encoded strings which is a potential intern.
return false;
}
- const DexFile::TypeList* types = m->GetParameterTypeList();
- if (types != nullptr) {
- for (uint32_t i = 0; i < types->Size(); ++i) {
- dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
- auto param_type = m->GetClassFromTypeIndex(param_type_idx, true);
- if (param_type == nullptr) {
- self->ClearException();
- return false;
- }
+ }
+
+ return true;
+ }
+
+ bool ResolveTypesOfMethods(Thread* self, ArtMethod* m)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto rtn_type = m->GetReturnType(true); // return value is discarded because resolve will be done internally.
+ if (rtn_type == nullptr) {
+ self->ClearException();
+ return false;
+ }
+ const DexFile::TypeList* types = m->GetParameterTypeList();
+ if (types != nullptr) {
+ for (uint32_t i = 0; i < types->Size(); ++i) {
+ dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
+ auto param_type = m->GetClassFromTypeIndex(param_type_idx, true);
+ if (param_type == nullptr) {
+ self->ClearException();
+ return false;
}
}
- return true;
+ }
+ return true;
}
// Pre resolve types mentioned in all method signatures before start a transaction
// since ResolveType doesn't work in transaction mode.
bool PreResolveTypes(Thread* self, const Handle<mirror::Class>& klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
- PointerSize pointer_size = manager_->GetClassLinker()->GetImagePointerSize();
- for (ArtMethod& m : klass->GetMethods(pointer_size)) {
- if (!ResolveTypesOfMethods(self, &m)) {
+ PointerSize pointer_size = manager_->GetClassLinker()->GetImagePointerSize();
+ for (ArtMethod& m : klass->GetMethods(pointer_size)) {
+ if (!ResolveTypesOfMethods(self, &m)) {
+ return false;
+ }
+ }
+ if (klass->IsInterface()) {
+ return true;
+ } else if (klass->HasSuperClass()) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(klass->GetSuperClass()));
+ for (int i = super_klass->GetVTableLength() - 1; i >= 0; --i) {
+ ArtMethod* m = klass->GetVTableEntry(i, pointer_size);
+ ArtMethod* super_m = super_klass->GetVTableEntry(i, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
return false;
}
}
- if (klass->IsInterface()) {
- return true;
- } else if (klass->HasSuperClass()) {
- StackHandleScope<1> hs(self);
- MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(klass->GetSuperClass()));
- for (int i = super_klass->GetVTableLength() - 1; i >= 0; --i) {
- ArtMethod* m = klass->GetVTableEntry(i, pointer_size);
- ArtMethod* super_m = super_klass->GetVTableEntry(i, pointer_size);
- if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
- return false;
- }
- }
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- super_klass.Assign(klass->GetIfTable()->GetInterface(i));
- if (klass->GetClassLoader() != super_klass->GetClassLoader()) {
- uint32_t num_methods = super_klass->NumVirtualMethods();
- for (uint32_t j = 0; j < num_methods; ++j) {
- ArtMethod* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
- j, pointer_size);
- ArtMethod* super_m = super_klass->GetVirtualMethod(j, pointer_size);
- if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
- return false;
- }
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ super_klass.Assign(klass->GetIfTable()->GetInterface(i));
+ if (klass->GetClassLoader() != super_klass->GetClassLoader()) {
+ uint32_t num_methods = super_klass->NumVirtualMethods();
+ for (uint32_t j = 0; j < num_methods; ++j) {
+ ArtMethod* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
+ j, pointer_size);
+ ArtMethod* super_m = super_klass->GetVirtualMethod(j, pointer_size);
+ if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) {
+ return false;
}
}
}
}
- return true;
+ }
+ return true;
}
// Initialize the klass's dependencies recursively before initializing itself.
@@ -2538,8 +2577,9 @@ class InitializeClassVisitor : public CompilationVisitor {
ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
StackHandleScope<1> hs(self);
Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
- if (!NoClinitInDependency(handle_scope_super, self, class_loader))
+ if (!NoClinitInDependency(handle_scope_super, self, class_loader)) {
return false;
+ }
}
uint32_t num_if = klass->NumDirectInterfaces();
@@ -2548,11 +2588,12 @@ class InitializeClassVisitor : public CompilationVisitor {
interface = mirror::Class::GetDirectInterface(self, klass.Get(), i);
StackHandleScope<1> hs(self);
Handle<mirror::Class> handle_interface(hs.NewHandle(interface));
- if (!NoClinitInDependency(handle_interface, self, class_loader))
+ if (!NoClinitInDependency(handle_interface, self, class_loader)) {
return false;
+ }
}
- return true;
+ return NoPotentialInternStrings(klass, class_loader);
}
const ParallelCompilationManager* const manager_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 26ea39f205..4b979d8125 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -239,8 +239,14 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
ProfileCompilationInfo info;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
- profile_info_.AddMethodIndex(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 1);
- profile_info_.AddMethodIndex(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 2);
+ profile_info_.AddMethodIndex(dex_file->GetLocation(),
+ dex_file->GetLocationChecksum(),
+ 1,
+ dex_file->NumMethodIds());
+ profile_info_.AddMethodIndex(dex_file->GetLocation(),
+ dex_file->GetLocationChecksum(),
+ 2,
+ dex_file->NumMethodIds());
}
return &profile_info_;
}
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 7c02384ff2..2ef9fa1ccb 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -670,6 +670,7 @@ class ElfBuilder FINAL {
Elf_Word rodata_size,
Elf_Word text_size,
Elf_Word bss_size,
+ Elf_Word bss_methods_offset,
Elf_Word bss_roots_offset) {
std::string soname(elf_file_path);
size_t directory_separator_pos = soname.rfind('/');
@@ -715,9 +716,18 @@ class ElfBuilder FINAL {
Elf_Word bss_index = rodata_index + 1u + (text_size != 0 ? 1u : 0u);
Elf_Word oatbss = dynstr_.Add("oatbss");
dynsym_.Add(oatbss, bss_index, bss_address, bss_roots_offset, STB_GLOBAL, STT_OBJECT);
+ DCHECK_LE(bss_methods_offset, bss_roots_offset);
+ DCHECK_LE(bss_roots_offset, bss_size);
+ // Add a symbol marking the start of the methods part of the .bss, if not empty.
+ if (bss_methods_offset != bss_roots_offset) {
+ Elf_Word bss_methods_address = bss_address + bss_methods_offset;
+ Elf_Word bss_methods_size = bss_roots_offset - bss_methods_offset;
+ Elf_Word oatbssroots = dynstr_.Add("oatbssmethods");
+ dynsym_.Add(
+ oatbssroots, bss_index, bss_methods_address, bss_methods_size, STB_GLOBAL, STT_OBJECT);
+ }
// Add a symbol marking the start of the GC roots part of the .bss, if not empty.
if (bss_roots_offset != bss_size) {
- DCHECK_LT(bss_roots_offset, bss_size);
Elf_Word bss_roots_address = bss_address + bss_roots_offset;
Elf_Word bss_roots_size = bss_size - bss_roots_offset;
Elf_Word oatbssroots = dynstr_.Add("oatbssroots");
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 7baae527ff..a8a5bc32b7 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -55,6 +55,7 @@ class ElfWriter {
virtual void PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t bss_size,
+ size_t bss_methods_offset,
size_t bss_roots_offset) = 0;
virtual void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
virtual OutputStream* StartRoData() = 0;
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 28c35e96b4..5d6dd2e1d7 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -22,7 +22,6 @@
#include "base/casts.h"
#include "base/logging.h"
-#include "base/stl_util.h"
#include "compiled_method.h"
#include "debug/elf_debug_writer.h"
#include "debug/method_debug_info.h"
@@ -34,7 +33,7 @@
#include "leb128.h"
#include "linker/buffered_output_stream.h"
#include "linker/file_output_stream.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_pool.h"
#include "utils.h"
@@ -80,7 +79,7 @@ class DebugInfoTask : public Task {
const InstructionSetFeatures* instruction_set_features_;
size_t rodata_section_size_;
size_t text_section_size_;
- const ArrayRef<const debug::MethodDebugInfo>& method_infos_;
+ const ArrayRef<const debug::MethodDebugInfo> method_infos_;
std::vector<uint8_t> result_;
};
@@ -97,6 +96,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
void PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t bss_size,
+ size_t bss_methods_offset,
size_t bss_roots_offset) OVERRIDE;
void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
OutputStream* StartRoData() OVERRIDE;
@@ -136,15 +136,15 @@ std::unique_ptr<ElfWriter> CreateElfWriterQuick(InstructionSet instruction_set,
const CompilerOptions* compiler_options,
File* elf_file) {
if (Is64BitInstructionSet(instruction_set)) {
- return MakeUnique<ElfWriterQuick<ElfTypes64>>(instruction_set,
- features,
- compiler_options,
- elf_file);
+ return std::make_unique<ElfWriterQuick<ElfTypes64>>(instruction_set,
+ features,
+ compiler_options,
+ elf_file);
} else {
- return MakeUnique<ElfWriterQuick<ElfTypes32>>(instruction_set,
- features,
- compiler_options,
- elf_file);
+ return std::make_unique<ElfWriterQuick<ElfTypes32>>(instruction_set,
+ features,
+ compiler_options,
+ elf_file);
}
}
@@ -160,7 +160,8 @@ ElfWriterQuick<ElfTypes>::ElfWriterQuick(InstructionSet instruction_set,
rodata_size_(0u),
text_size_(0u),
bss_size_(0u),
- output_stream_(MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file))),
+ output_stream_(
+ std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(elf_file))),
builder_(new ElfBuilder<ElfTypes>(instruction_set, features, output_stream_.get())) {}
template <typename ElfTypes>
@@ -178,6 +179,7 @@ template <typename ElfTypes>
void ElfWriterQuick<ElfTypes>::PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t bss_size,
+ size_t bss_methods_offset,
size_t bss_roots_offset) {
DCHECK_EQ(rodata_size_, 0u);
rodata_size_ = rodata_size;
@@ -189,6 +191,7 @@ void ElfWriterQuick<ElfTypes>::PrepareDynamicSection(size_t rodata_size,
rodata_size_,
text_size_,
bss_size_,
+ bss_methods_offset,
bss_roots_offset);
}
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index dc880b089e..b4777df0df 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -17,6 +17,7 @@
#include <memory>
#include "base/arena_allocator.h"
+#include "base/callee_save_type.h"
#include "base/enums.h"
#include "class_linker.h"
#include "common_runtime_test.h"
@@ -170,7 +171,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
Runtime* r = Runtime::Current();
r->SetInstructionSet(kRuntimeISA);
ArtMethod* save_method = r->CreateCalleeSaveMethod();
- r->SetCalleeSaveMethod(save_method, Runtime::kSaveAllCalleeSaves);
+ r->SetCalleeSaveMethod(save_method, CalleeSaveType::kSaveAllCalleeSaves);
QuickMethodFrameInfo frame_info = r->GetRuntimeMethodFrameInfo(save_method);
ASSERT_EQ(kStackAlignment, 16U);
diff --git a/compiler/image_test.h b/compiler/image_test.h
index 2f15ff4815..3d89757d51 100644
--- a/compiler/image_test.h
+++ b/compiler/image_test.h
@@ -290,9 +290,9 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
if (kIsVdexEnabled) {
for (size_t i = 0, size = vdex_files.size(); i != size; ++i) {
- std::unique_ptr<BufferedOutputStream> vdex_out(
- MakeUnique<BufferedOutputStream>(
- MakeUnique<FileOutputStream>(vdex_files[i].GetFile())));
+ std::unique_ptr<BufferedOutputStream> vdex_out =
+ std::make_unique<BufferedOutputStream>(
+ std::make_unique<FileOutputStream>(vdex_files[i].GetFile()));
oat_writers[i]->WriteVerifierDeps(vdex_out.get(), nullptr);
oat_writers[i]->WriteChecksumsAndVdexHeader(vdex_out.get());
}
@@ -311,6 +311,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
elf_writer->PrepareDynamicSection(rodata_size,
text_size,
oat_writer->GetBssSize(),
+ oat_writer->GetBssMethodsOffset(),
oat_writer->GetBssRootsOffset());
writer->UpdateOatFileLayout(i,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4d6db4745f..a8fdecaa4a 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -27,6 +27,8 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
@@ -44,10 +46,11 @@
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
+#include "gc/verification.h"
#include "globals.h"
+#include "handle_scope-inl.h"
#include "image.h"
#include "imt_conflict_table.h"
-#include "intern_table.h"
#include "jni_internal.h"
#include "linear_alloc.h"
#include "lock_word.h"
@@ -68,7 +71,6 @@
#include "oat_file_manager.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "handle_scope-inl.h"
#include "utils/dex_cache_arrays_layout-inl.h"
using ::art::mirror::Class;
@@ -1093,8 +1095,8 @@ void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
if (!image_writer->KeepClass(klass)) {
image_writer->DumpImageClasses();
std::string temp;
- CHECK(image_writer->KeepClass(klass)) << klass->GetDescriptor(&temp)
- << " " << klass->PrettyDescriptor();
+ CHECK(image_writer->KeepClass(klass))
+ << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass);
}
}
}
@@ -1572,13 +1574,13 @@ void ImageWriter::CalculateNewObjectOffsets() {
image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
image_methods_[ImageHeader::kImtUnimplementedMethod] = runtime->GetImtUnimplementedMethod();
image_methods_[ImageHeader::kSaveAllCalleeSavesMethod] =
- runtime->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves);
+ runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves);
image_methods_[ImageHeader::kSaveRefsOnlyMethod] =
- runtime->GetCalleeSaveMethod(Runtime::kSaveRefsOnly);
+ runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly);
image_methods_[ImageHeader::kSaveRefsAndArgsMethod] =
- runtime->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs);
+ runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
image_methods_[ImageHeader::kSaveEverythingMethod] =
- runtime->GetCalleeSaveMethod(Runtime::kSaveEverything);
+ runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything);
// Visit image methods first to have the main runtime methods in the first image.
for (auto* m : image_methods_) {
CHECK(m != nullptr);
@@ -2482,8 +2484,8 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
} else {
bool found_one = false;
- for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
- auto idx = static_cast<Runtime::CalleeSaveType>(i);
+ for (size_t i = 0; i < static_cast<size_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
+ auto idx = static_cast<CalleeSaveType>(i);
if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
found_one = true;
break;
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index a12d849f02..5e2db7d8f7 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -33,8 +33,10 @@
#include "base/enums.h"
#include "base/length_prefixed_array.h"
#include "base/macros.h"
+#include "class_table.h"
#include "driver/compiler_driver.h"
#include "image.h"
+#include "intern_table.h"
#include "lock_word.h"
#include "mem_map.h"
#include "mirror/dex_cache.h"
@@ -60,7 +62,6 @@ class ClassLoader;
} // namespace mirror
class ClassLoaderVisitor;
-class ClassTable;
class ImtConflictTable;
static constexpr int kInvalidFd = -1;
@@ -106,19 +107,6 @@ class ImageWriter FINAL {
ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
- template <typename PtrType>
- PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
- const REQUIRES_SHARED(Locks::mutator_lock_) {
- auto oat_it = dex_file_oat_index_map_.find(dex_file);
- DCHECK(oat_it != dex_file_oat_index_map_.end());
- const ImageInfo& image_info = GetImageInfo(oat_it->second);
- auto it = image_info.dex_cache_array_starts_.find(dex_file);
- DCHECK(it != image_info.dex_cache_array_starts_.end());
- return reinterpret_cast<PtrType>(
- image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
- it->second + offset);
- }
-
size_t GetOatFileOffset(size_t oat_index) const {
return GetImageInfo(oat_index).oat_offset_;
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index fed1f48d65..66135414f7 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -189,18 +189,12 @@ JitCompiler::~JitCompiler() {
bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
DCHECK(!method->IsProxyMethod());
+ DCHECK(method->GetDeclaringClass()->IsResolved());
+
TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit));
- StackHandleScope<2> hs(self);
self->AssertNoPendingException();
Runtime* runtime = Runtime::Current();
- // Ensure the class is initialized.
- Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
- if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- VLOG(jit) << "JIT failed to initialize " << method->PrettyMethod();
- return false;
- }
-
// Do the compilation.
bool success = false;
{
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index c1ac230d43..18ff1c9bb6 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -16,6 +16,7 @@
#include "linker/arm/relative_patcher_arm_base.h"
+#include "base/stl_util.h"
#include "compiled_method.h"
#include "linker/output_stream.h"
#include "oat.h"
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 117684a66b..bc21607c5b 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -59,11 +59,11 @@ inline bool IsAdrpPatch(const LinkerPatch& patch) {
case LinkerPatch::Type::kBakerReadBarrierBranch:
return false;
case LinkerPatch::Type::kMethodRelative:
+ case LinkerPatch::Type::kMethodBssEntry:
case LinkerPatch::Type::kTypeRelative:
case LinkerPatch::Type::kTypeBssEntry:
case LinkerPatch::Type::kStringRelative:
case LinkerPatch::Type::kStringBssEntry:
- case LinkerPatch::Type::kDexCacheArray:
return patch.LiteralOffset() == patch.PcInsnOffset();
}
}
@@ -251,20 +251,20 @@ void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
// ADD immediate, 64-bit with imm12 == 0 (unset).
if (!kEmitCompilerReadBarrier) {
DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
- patch.GetType() == LinkerPatch::Type::kStringRelative ||
- patch.GetType() == LinkerPatch::Type::kTypeRelative) << patch.GetType();
+ patch.GetType() == LinkerPatch::Type::kTypeRelative ||
+ patch.GetType() == LinkerPatch::Type::kStringRelative) << patch.GetType();
} else {
// With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry.
DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
- patch.GetType() == LinkerPatch::Type::kStringRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative ||
- patch.GetType() == LinkerPatch::Type::kStringBssEntry ||
- patch.GetType() == LinkerPatch::Type::kTypeBssEntry) << patch.GetType();
+ patch.GetType() == LinkerPatch::Type::kStringRelative ||
+ patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
+ patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
}
shift = 0u; // No shift for ADD.
} else {
// LDR/STR 32-bit or 64-bit with imm12 == 0 (unset).
- DCHECK(patch.GetType() == LinkerPatch::Type::kDexCacheArray ||
+ DCHECK(patch.GetType() == LinkerPatch::Type::kMethodBssEntry ||
patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn;
diff --git a/compiler/linker/method_bss_mapping_encoder.h b/compiler/linker/method_bss_mapping_encoder.h
new file mode 100644
index 0000000000..b2922ec6d2
--- /dev/null
+++ b/compiler/linker/method_bss_mapping_encoder.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
+#define ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
+
+#include "base/enums.h"
+#include "base/logging.h"
+#include "dex_file.h"
+#include "method_bss_mapping.h"
+
+namespace art {
+namespace linker {
+
+// Helper class for encoding compressed MethodBssMapping.
+class MethodBssMappingEncoder {
+ public:
+ explicit MethodBssMappingEncoder(PointerSize pointer_size)
+ : pointer_size_(static_cast<size_t>(pointer_size)) {
+ entry_.method_index = DexFile::kDexNoIndex16;
+ entry_.index_mask = 0u;
+ entry_.bss_offset = static_cast<uint32_t>(-1);
+ }
+
+ // Try to merge the next method_index -> bss_offset mapping into the current entry.
+ // Return true on success, false on failure.
+ bool TryMerge(uint32_t method_index, uint32_t bss_offset) {
+ DCHECK_NE(method_index, entry_.method_index);
+ if (entry_.bss_offset + pointer_size_ != bss_offset) {
+ return false;
+ }
+ uint32_t diff = method_index - entry_.method_index;
+ if (diff > 16u) {
+ return false;
+ }
+ if ((entry_.index_mask & ~(static_cast<uint32_t>(-1) << diff)) != 0u) {
+ return false;
+ }
+ entry_.method_index = method_index;
+ // Insert the bit indicating the method index we've just overwritten
+ // and shift bits indicating method indexes before that.
+ entry_.index_mask = dchecked_integral_cast<uint16_t>(
+ (static_cast<uint32_t>(entry_.index_mask) | 0x10000u) >> diff);
+ entry_.bss_offset = bss_offset;
+ return true;
+ }
+
+ void Reset(uint32_t method_index, uint32_t bss_offset) {
+ entry_.method_index = method_index;
+ entry_.index_mask = 0u;
+ entry_.bss_offset = bss_offset;
+ }
+
+ MethodBssMappingEntry GetEntry() {
+ return entry_;
+ }
+
+ private:
+ size_t pointer_size_;
+ MethodBssMappingEntry entry_;
+};
+
+} // namespace linker
+} // namespace art
+
+#endif // ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
diff --git a/compiler/linker/method_bss_mapping_encoder_test.cc b/compiler/linker/method_bss_mapping_encoder_test.cc
new file mode 100644
index 0000000000..1240389bef
--- /dev/null
+++ b/compiler/linker/method_bss_mapping_encoder_test.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "method_bss_mapping_encoder.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+namespace linker {
+
+TEST(MethodBssMappingEncoder, TryMerge) {
+ for (PointerSize pointer_size : {PointerSize::k32, PointerSize::k64}) {
+ size_t raw_pointer_size = static_cast<size_t>(pointer_size);
+ MethodBssMappingEncoder encoder(pointer_size);
+ encoder.Reset(1u, 0u);
+ ASSERT_FALSE(encoder.TryMerge(5u, raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(18u, raw_pointer_size)); // Method index out of range.
+ ASSERT_TRUE(encoder.TryMerge(5u, raw_pointer_size));
+ ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
+ ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
+ ASSERT_FALSE(encoder.GetEntry().CoversIndex(17u));
+ ASSERT_FALSE(encoder.TryMerge(17u, 2 * raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size)); // Method index out of range.
+ ASSERT_TRUE(encoder.TryMerge(17u, 2 * raw_pointer_size));
+ ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
+ ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
+ ASSERT_TRUE(encoder.GetEntry().CoversIndex(17u));
+ ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(1u, raw_pointer_size));
+ ASSERT_EQ(raw_pointer_size, encoder.GetEntry().GetBssOffset(5u, raw_pointer_size));
+ ASSERT_EQ(2 * raw_pointer_size, encoder.GetEntry().GetBssOffset(17u, raw_pointer_size));
+ ASSERT_EQ(0x0011u, encoder.GetEntry().index_mask);
+ ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size)); // Method index out of range.
+ }
+}
+
+} // namespace linker
+} // namespace art
diff --git a/compiler/linker/mips/relative_patcher_mips.cc b/compiler/linker/mips/relative_patcher_mips.cc
index 8da530f7cc..d99d237a23 100644
--- a/compiler/linker/mips/relative_patcher_mips.cc
+++ b/compiler/linker/mips/relative_patcher_mips.cc
@@ -50,7 +50,6 @@ void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
uint32_t anchor_literal_offset = patch.PcInsnOffset();
uint32_t literal_offset = patch.LiteralOffset();
uint32_t literal_low_offset;
- bool dex_cache_array = (patch.GetType() == LinkerPatch::Type::kDexCacheArray);
// Perform basic sanity checks and initialize `literal_low_offset` to point
// to the instruction containing the 16 least significant bits of the
@@ -72,16 +71,8 @@ void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
DCHECK_GE(code->size(), 16u);
DCHECK_LE(literal_offset, code->size() - 12u);
DCHECK_GE(literal_offset, 4u);
- // The NAL instruction may not precede immediately as the PC+0 value may
- // come from HMipsComputeBaseMethodAddress.
- if (dex_cache_array) {
- DCHECK_EQ(literal_offset + 4u, anchor_literal_offset);
- // NAL
- DCHECK_EQ((*code)[literal_offset - 4], 0x00);
- DCHECK_EQ((*code)[literal_offset - 3], 0x00);
- DCHECK_EQ((*code)[literal_offset - 2], 0x10);
- DCHECK_EQ((*code)[literal_offset - 1], 0x04);
- }
+ // The NAL instruction does not precede immediately as the PC+0
+ // comes from HMipsComputeBaseMethodAddress.
// LUI reg, offset_high
DCHECK_EQ((*code)[literal_offset + 0], 0x34);
DCHECK_EQ((*code)[literal_offset + 1], 0x12);
@@ -90,10 +81,6 @@ void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
// ADDU reg, reg, reg2
DCHECK_EQ((*code)[literal_offset + 4], 0x21);
DCHECK_EQ(((*code)[literal_offset + 5] & 0x07), 0x00);
- if (dex_cache_array) {
- // reg2 is either RA or from HMipsComputeBaseMethodAddress.
- DCHECK_EQ(((*code)[literal_offset + 6] & 0x1F), 0x1F);
- }
DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x00);
// instr reg(s), offset_low
DCHECK_EQ((*code)[literal_offset + 8], 0x78);
@@ -104,9 +91,6 @@ void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
// Apply patch.
uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
uint32_t diff = target_offset - anchor_offset;
- if (dex_cache_array && !is_r6) {
- diff += kDexCacheArrayLwOffset;
- }
diff += (diff & 0x8000) << 1; // Account for sign extension in "instr reg(s), offset_low".
// LUI reg, offset_high / AUIPC reg, offset_high
diff --git a/compiler/linker/mips/relative_patcher_mips.h b/compiler/linker/mips/relative_patcher_mips.h
index 852a345aa6..0b74bd33a4 100644
--- a/compiler/linker/mips/relative_patcher_mips.h
+++ b/compiler/linker/mips/relative_patcher_mips.h
@@ -46,9 +46,6 @@ class MipsRelativePatcher FINAL : public RelativePatcher {
uint32_t patch_offset) OVERRIDE;
private:
- // We'll maximize the range of a single load instruction for dex cache array accesses
- // by aligning offset -32768 with the offset of the first used element.
- static constexpr uint32_t kDexCacheArrayLwOffset = 0x8000;
bool is_r6;
DISALLOW_COPY_AND_ASSIGN(MipsRelativePatcher);
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index 961b31266f..49af7c614b 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -61,7 +61,6 @@ void MipsRelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPa
ASSERT_TRUE(result.first);
uint32_t diff = target_offset - (result.second + kAnchorOffset);
- CHECK_NE(patches[0].GetType(), LinkerPatch::Type::kDexCacheArray);
diff += (diff & 0x8000) << 1; // Account for sign extension in addiu.
const uint8_t expected_code[] = {
diff --git a/compiler/linker/output_stream_test.cc b/compiler/linker/output_stream_test.cc
index 84c76f2c6c..09fef29d48 100644
--- a/compiler/linker/output_stream_test.cc
+++ b/compiler/linker/output_stream_test.cc
@@ -19,7 +19,6 @@
#include "base/unix_file/fd_file.h"
#include "base/logging.h"
-#include "base/stl_util.h"
#include "buffered_output_stream.h"
#include "common_runtime_test.h"
@@ -79,7 +78,7 @@ TEST_F(OutputStreamTest, File) {
TEST_F(OutputStreamTest, Buffered) {
ScratchFile tmp;
{
- BufferedOutputStream buffered_output_stream(MakeUnique<FileOutputStream>(tmp.GetFile()));
+ BufferedOutputStream buffered_output_stream(std::make_unique<FileOutputStream>(tmp.GetFile()));
SetOutputStream(buffered_output_stream);
GenerateTestOutput();
}
@@ -125,7 +124,7 @@ TEST_F(OutputStreamTest, BufferedFlush) {
bool flush_called;
};
- std::unique_ptr<CheckingOutputStream> cos = MakeUnique<CheckingOutputStream>();
+ std::unique_ptr<CheckingOutputStream> cos = std::make_unique<CheckingOutputStream>();
CheckingOutputStream* checking_output_stream = cos.get();
BufferedOutputStream buffered(std::move(cos));
ASSERT_FALSE(checking_output_stream->flush_called);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 1578c0cd3e..55d0bd95d7 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set_features.h"
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "common_compiler_test.h"
@@ -220,11 +221,12 @@ class OatTest : public CommonCompilerTest {
elf_writer->PrepareDynamicSection(rodata_size,
text_size,
oat_writer.GetBssSize(),
+ oat_writer.GetBssMethodsOffset(),
oat_writer.GetBssRootsOffset());
if (kIsVdexEnabled) {
- std::unique_ptr<BufferedOutputStream> vdex_out(
- MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
+ std::unique_ptr<BufferedOutputStream> vdex_out =
+ std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(vdex_file));
if (!oat_writer.WriteVerifierDeps(vdex_out.get(), nullptr)) {
return false;
}
@@ -483,7 +485,7 @@ TEST_F(OatTest, WriteRead) {
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(72U, sizeof(OatHeader));
+ EXPECT_EQ(76U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(161 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index fed2d34cdb..59daf5a09e 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -22,7 +22,7 @@
#include "arch/arm64/instruction_set_features_arm64.h"
#include "art_method-inl.h"
#include "base/allocator.h"
-#include "base/bit_vector.h"
+#include "base/bit_vector-inl.h"
#include "base/enums.h"
#include "base/file_magic.h"
#include "base/stl_util.h"
@@ -41,6 +41,7 @@
#include "image_writer.h"
#include "linker/buffered_output_stream.h"
#include "linker/file_output_stream.h"
+#include "linker/method_bss_mapping_encoder.h"
#include "linker/multi_oat_relative_patcher.h"
#include "linker/output_stream.h"
#include "mirror/array.h"
@@ -230,12 +231,14 @@ class OatWriter::OatDexFile {
return dex_file_location_data_;
}
- void ReserveClassOffsets(OatWriter* oat_writer);
-
size_t SizeOf() const;
bool Write(OatWriter* oat_writer, OutputStream* out) const;
bool WriteClassOffsets(OatWriter* oat_writer, OutputStream* out);
+ size_t GetClassOffsetsRawSize() const {
+ return class_offsets_.size() * sizeof(class_offsets_[0]);
+ }
+
// The source of the dex file.
DexFileSource source_;
@@ -256,15 +259,12 @@ class OatWriter::OatDexFile {
uint32_t dex_file_offset_;
uint32_t class_offsets_offset_;
uint32_t lookup_table_offset_;
+ uint32_t method_bss_mapping_offset_;
// Data to write to a separate section.
dchecked_vector<uint32_t> class_offsets_;
private:
- size_t GetClassOffsetsRawSize() const {
- return class_offsets_.size() * sizeof(class_offsets_[0]);
- }
-
DISALLOW_COPY_AND_ASSIGN(OatDexFile);
};
@@ -294,7 +294,10 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo
oat_size_(0u),
bss_start_(0u),
bss_size_(0u),
+ bss_methods_offset_(0u),
bss_roots_offset_(0u),
+ bss_method_entry_references_(),
+ bss_method_entries_(),
bss_type_entries_(),
bss_string_entries_(),
oat_data_offset_(0u),
@@ -331,6 +334,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo
size_oat_dex_file_offset_(0),
size_oat_dex_file_class_offsets_offset_(0),
size_oat_dex_file_lookup_table_offset_(0),
+ size_oat_dex_file_method_bss_mapping_offset_(0),
size_oat_lookup_table_alignment_(0),
size_oat_lookup_table_(0),
size_oat_class_offsets_alignment_(0),
@@ -339,6 +343,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo
size_oat_class_status_(0),
size_oat_class_method_bitmaps_(0),
size_oat_class_method_offsets_(0),
+ size_method_bss_mappings_(0u),
relative_patcher_(nullptr),
absolute_patch_locations_(),
profile_compilation_info_(info) {
@@ -502,17 +507,16 @@ bool OatWriter::WriteAndOpenDexFiles(
// Reserve space for Vdex header and checksums.
vdex_size_ = sizeof(VdexFile::Header) + oat_dex_files_.size() * sizeof(VdexFile::VdexChecksum);
}
- size_t oat_data_offset = InitOatHeader(instruction_set,
- instruction_set_features,
- dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
- key_value_store);
- oat_size_ = InitOatDexFiles(oat_data_offset);
+ oat_size_ = InitOatHeader(instruction_set,
+ instruction_set_features,
+ dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
+ key_value_store);
ChecksumUpdatingOutputStream checksum_updating_rodata(oat_rodata, oat_header_.get());
if (kIsVdexEnabled) {
- std::unique_ptr<BufferedOutputStream> vdex_out(
- MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
+ std::unique_ptr<BufferedOutputStream> vdex_out =
+ std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(vdex_file));
// Write DEX files into VDEX, mmap and open them.
if (!WriteDexFiles(vdex_out.get(), vdex_file, update_input_vdex) ||
!OpenDexFiles(vdex_file, verify, &dex_files_map, &dex_files)) {
@@ -539,16 +543,6 @@ bool OatWriter::WriteAndOpenDexFiles(
return false;
}
- // Reserve space for class offsets in OAT and update class_offsets_offset_.
- for (OatDexFile& oat_dex_file : oat_dex_files_) {
- oat_dex_file.ReserveClassOffsets(this);
- }
-
- // Write OatDexFiles into OAT. Needs to be done last, once offsets are collected.
- if (!WriteOatDexFiles(&checksum_updating_rodata)) {
- return false;
- }
-
*opened_dex_files_map = std::move(dex_files_map);
*opened_dex_files = std::move(dex_files);
write_state_ = WriteState::kPrepareLayout;
@@ -567,16 +561,34 @@ void OatWriter::PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher)
InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
CHECK_EQ(instruction_set, oat_header_->GetInstructionSet());
+ {
+ TimingLogger::ScopedTiming split("InitBssLayout", timings_);
+ InitBssLayout(instruction_set);
+ }
+
uint32_t offset = oat_size_;
{
+ TimingLogger::ScopedTiming split("InitClassOffsets", timings_);
+ offset = InitClassOffsets(offset);
+ }
+ {
TimingLogger::ScopedTiming split("InitOatClasses", timings_);
offset = InitOatClasses(offset);
}
{
+ TimingLogger::ScopedTiming split("InitMethodBssMappings", timings_);
+ offset = InitMethodBssMappings(offset);
+ }
+ {
TimingLogger::ScopedTiming split("InitOatMaps", timings_);
offset = InitOatMaps(offset);
}
{
+ TimingLogger::ScopedTiming split("InitOatDexFiles", timings_);
+ oat_header_->SetOatDexFilesOffset(offset);
+ offset = InitOatDexFiles(offset);
+ }
+ {
TimingLogger::ScopedTiming split("InitOatCode", timings_);
offset = InitOatCode(offset);
}
@@ -585,11 +597,7 @@ void OatWriter::PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher)
offset = InitOatCodeDexFiles(offset);
}
oat_size_ = offset;
-
- {
- TimingLogger::ScopedTiming split("InitBssLayout", timings_);
- InitBssLayout(instruction_set);
- }
+ bss_start_ = (bss_size_ != 0u) ? RoundUp(oat_size_, kPageSize) : 0u;
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
if (compiling_boot_image_) {
@@ -606,11 +614,10 @@ OatWriter::~OatWriter() {
class OatWriter::DexMethodVisitor {
public:
DexMethodVisitor(OatWriter* writer, size_t offset)
- : writer_(writer),
- offset_(offset),
- dex_file_(nullptr),
- class_def_index_(DexFile::kDexNoIndex) {
- }
+ : writer_(writer),
+ offset_(offset),
+ dex_file_(nullptr),
+ class_def_index_(DexFile::kDexNoIndex) {}
virtual bool StartClass(const DexFile* dex_file, size_t class_def_index) {
DCHECK(dex_file_ == nullptr);
@@ -650,19 +657,18 @@ class OatWriter::DexMethodVisitor {
class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
public:
OatDexMethodVisitor(OatWriter* writer, size_t offset)
- : DexMethodVisitor(writer, offset),
- oat_class_index_(0u),
- method_offsets_index_(0u) {
- }
+ : DexMethodVisitor(writer, offset),
+ oat_class_index_(0u),
+ method_offsets_index_(0u) {}
- bool StartClass(const DexFile* dex_file, size_t class_def_index) {
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
DexMethodVisitor::StartClass(dex_file, class_def_index);
DCHECK_LT(oat_class_index_, writer_->oat_classes_.size());
method_offsets_index_ = 0u;
return true;
}
- bool EndClass() {
+ bool EndClass() OVERRIDE {
++oat_class_index_;
return DexMethodVisitor::EndClass();
}
@@ -672,21 +678,61 @@ class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
size_t method_offsets_index_;
};
+class OatWriter::InitBssLayoutMethodVisitor : public DexMethodVisitor {
+ public:
+ explicit InitBssLayoutMethodVisitor(OatWriter* writer)
+ : DexMethodVisitor(writer, /* offset */ 0u) {}
+
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
+ const ClassDataItemIterator& it) OVERRIDE {
+ // Look for patches with .bss references and prepare maps with placeholders for their offsets.
+ CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(
+ MethodReference(dex_file_, it.GetMemberIndex()));
+ if (compiled_method != nullptr) {
+ for (const LinkerPatch& patch : compiled_method->GetPatches()) {
+ if (patch.GetType() == LinkerPatch::Type::kMethodBssEntry) {
+ MethodReference target_method = patch.TargetMethod();
+ auto refs_it = writer_->bss_method_entry_references_.find(target_method.dex_file);
+ if (refs_it == writer_->bss_method_entry_references_.end()) {
+ refs_it = writer_->bss_method_entry_references_.Put(
+ target_method.dex_file,
+ BitVector(target_method.dex_file->NumMethodIds(),
+ /* expandable */ false,
+ Allocator::GetMallocAllocator()));
+ refs_it->second.ClearAllBits();
+ }
+ refs_it->second.SetBit(target_method.dex_method_index);
+ writer_->bss_method_entries_.Overwrite(target_method, /* placeholder */ 0u);
+ } else if (patch.GetType() == LinkerPatch::Type::kTypeBssEntry) {
+ TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
+ writer_->bss_type_entries_.Overwrite(ref, /* placeholder */ 0u);
+ } else if (patch.GetType() == LinkerPatch::Type::kStringBssEntry) {
+ StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
+ writer_->bss_string_entries_.Overwrite(ref, /* placeholder */ 0u);
+ }
+ }
+ }
+ return true;
+ }
+};
+
class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
public:
InitOatClassesMethodVisitor(OatWriter* writer, size_t offset)
- : DexMethodVisitor(writer, offset),
- compiled_methods_(),
- num_non_null_compiled_methods_(0u) {
+ : DexMethodVisitor(writer, offset),
+ compiled_methods_(),
+ num_non_null_compiled_methods_(0u) {
size_t num_classes = 0u;
for (const OatDexFile& oat_dex_file : writer_->oat_dex_files_) {
num_classes += oat_dex_file.class_offsets_.size();
}
writer_->oat_classes_.reserve(num_classes);
compiled_methods_.reserve(256u);
+ // If there are any classes, the class offsets allocation aligns the offset.
+ DCHECK(num_classes == 0u || IsAligned<4u>(offset));
}
- bool StartClass(const DexFile* dex_file, size_t class_def_index) {
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
DexMethodVisitor::StartClass(dex_file, class_def_index);
compiled_methods_.clear();
num_non_null_compiled_methods_ = 0u;
@@ -694,7 +740,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassDataItemIterator& it) {
+ const ClassDataItemIterator& it) OVERRIDE {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// num_non_null_compiled_methods_ since we only want to allocate
@@ -704,12 +750,12 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
compiled_methods_.push_back(compiled_method);
if (compiled_method != nullptr) {
- ++num_non_null_compiled_methods_;
+ ++num_non_null_compiled_methods_;
}
return true;
}
- bool EndClass() {
+ bool EndClass() OVERRIDE {
ClassReference class_ref(dex_file_, class_def_index_);
mirror::Class::Status status;
bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
@@ -740,14 +786,14 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
public:
InitCodeMethodVisitor(OatWriter* writer, size_t offset, size_t quickening_info_offset)
- : OatDexMethodVisitor(writer, offset),
- debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()),
- current_quickening_info_offset_(quickening_info_offset) {
+ : OatDexMethodVisitor(writer, offset),
+ debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()),
+ current_quickening_info_offset_(quickening_info_offset) {
writer_->absolute_patch_locations_.reserve(
writer_->compiler_driver_->GetNonRelativeLinkerPatchCount());
}
- bool EndClass() {
+ bool EndClass() OVERRIDE {
OatDexMethodVisitor::EndClass();
if (oat_class_index_ == writer_->oat_classes_.size()) {
offset_ = writer_->relative_patcher_->ReserveSpaceEnd(offset_);
@@ -755,7 +801,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -858,14 +904,6 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
if (!patch.IsPcRelative()) {
writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
}
- if (patch.GetType() == LinkerPatch::Type::kTypeBssEntry) {
- TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
- writer_->bss_type_entries_.Overwrite(ref, /* placeholder */ 0u);
- }
- if (patch.GetType() == LinkerPatch::Type::kStringBssEntry) {
- StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
- writer_->bss_string_entries_.Overwrite(ref, /* placeholder */ 0u);
- }
}
}
}
@@ -950,11 +988,10 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
public:
InitMapMethodVisitor(OatWriter* writer, size_t offset)
- : OatDexMethodVisitor(writer, offset) {
- }
+ : OatDexMethodVisitor(writer, offset) {}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -997,7 +1034,7 @@ class OatWriter::InitMethodInfoVisitor : public OatDexMethodVisitor {
InitMethodInfoVisitor(OatWriter* writer, size_t offset) : OatDexMethodVisitor(writer, offset) {}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1035,18 +1072,17 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
InitImageMethodVisitor(OatWriter* writer,
size_t offset,
const std::vector<const DexFile*>* dex_files)
- : OatDexMethodVisitor(writer, offset),
- pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())),
- dex_files_(dex_files),
- class_linker_(Runtime::Current()->GetClassLinker()) {
- }
+ : OatDexMethodVisitor(writer, offset),
+ pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())),
+ dex_files_(dex_files),
+ class_linker_(Runtime::Current()->GetClassLinker()) {}
// Handle copied methods here. Copy pointer to quick code from
// an origin method to a copied method only if they are
// in the same oat file. If the origin and the copied methods are
// in different oat files don't touch the copied method.
// References to other oat files are not supported yet.
- bool StartClass(const DexFile* dex_file, size_t class_def_index)
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
// Skip classes that are not in the image.
@@ -1085,7 +1121,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
// Skip methods that are not in the image.
if (!IsImageClass()) {
@@ -1131,8 +1167,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
// Should already have been resolved by the compiler, just peek into the dex cache.
// It may not be resolved if the class failed to verify, in this case, don't set the
// entrypoint. This is not fatal since the dex cache will contain a resolution method.
- method = dex_cache->GetResolvedMethod(it.GetMemberIndex(),
- class_linker_->GetImagePointerSize());
+ method = dex_cache->GetResolvedMethod(it.GetMemberIndex(), pointer_size_);
}
if (method != nullptr &&
compiled_method != nullptr &&
@@ -1171,7 +1206,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
}
}
- protected:
+ private:
const PointerSize pointer_size_;
const std::vector<const DexFile*>* dex_files_;
ClassLinker* const class_linker_;
@@ -1182,14 +1217,15 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
public:
WriteCodeMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset,
size_t relative_offset) SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : OatDexMethodVisitor(writer, relative_offset),
- class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
- out_(out),
- file_offset_(file_offset),
- soa_(Thread::Current()),
- no_thread_suspension_("OatWriter patching"),
- class_linker_(Runtime::Current()->GetClassLinker()),
- dex_cache_(nullptr) {
+ : OatDexMethodVisitor(writer, relative_offset),
+ pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())),
+ class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
+ out_(out),
+ file_offset_(file_offset),
+ soa_(Thread::Current()),
+ no_thread_suspension_("OatWriter patching"),
+ class_linker_(Runtime::Current()->GetClassLinker()),
+ dex_cache_(nullptr) {
patched_code_.reserve(16 * KB);
if (writer_->HasBootImage()) {
// If we're creating the image, the address space must be ready so that we can apply patches.
@@ -1200,7 +1236,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
~WriteCodeMethodVisitor() UNLOCK_FUNCTION(Locks::mutator_lock_) {
}
- bool StartClass(const DexFile* dex_file, size_t class_def_index)
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
@@ -1210,7 +1246,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return true;
}
- bool EndClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool EndClass() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
bool result = OatDexMethodVisitor::EndClass();
if (oat_class_index_ == writer_->oat_classes_.size()) {
DCHECK(result); // OatDexMethodVisitor::EndClass() never fails.
@@ -1223,7 +1259,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return result;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1275,6 +1311,15 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
uint32_t literal_offset = patch.LiteralOffset();
switch (patch.GetType()) {
+ case LinkerPatch::Type::kMethodBssEntry: {
+ uint32_t target_offset =
+ writer_->bss_start_ + writer_->bss_method_entries_.Get(patch.TargetMethod());
+ writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
+ patch,
+ offset_ + literal_offset,
+ target_offset);
+ break;
+ }
case LinkerPatch::Type::kCallRelative: {
// NOTE: Relative calls across oat files are not supported.
uint32_t target_offset = GetTargetOffset(patch);
@@ -1284,14 +1329,6 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
target_offset);
break;
}
- case LinkerPatch::Type::kDexCacheArray: {
- uint32_t target_offset = GetDexCacheOffset(patch);
- writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
- patch,
- offset_ + literal_offset,
- target_offset);
- break;
- }
case LinkerPatch::Type::kStringRelative: {
uint32_t target_offset = GetTargetObjectOffset(GetTargetString(patch));
writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
@@ -1302,7 +1339,8 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
case LinkerPatch::Type::kStringBssEntry: {
StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
- uint32_t target_offset = writer_->bss_string_entries_.Get(ref);
+ uint32_t target_offset =
+ writer_->bss_start_ + writer_->bss_string_entries_.Get(ref);
writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
patch,
offset_ + literal_offset,
@@ -1319,7 +1357,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
case LinkerPatch::Type::kTypeBssEntry: {
TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
- uint32_t target_offset = writer_->bss_type_entries_.Get(ref);
+ uint32_t target_offset = writer_->bss_start_ + writer_->bss_type_entries_.Get(ref);
writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
patch,
offset_ + literal_offset,
@@ -1368,6 +1406,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
private:
+ const PointerSize pointer_size_;
ObjPtr<mirror::ClassLoader> class_loader_;
OutputStream* const out_;
const size_t file_offset_;
@@ -1388,8 +1427,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
ObjPtr<mirror::DexCache> dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
Thread::Current(), *ref.dex_file);
- ArtMethod* method = dex_cache->GetResolvedMethod(
- ref.dex_method_index, class_linker_->GetImagePointerSize());
+ ArtMethod* method = dex_cache->GetResolvedMethod(ref.dex_method_index, pointer_size_);
CHECK(method != nullptr);
return method;
}
@@ -1401,9 +1439,8 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
if (UNLIKELY(target_offset == 0)) {
ArtMethod* target = GetTargetMethod(patch);
DCHECK(target != nullptr);
- PointerSize size =
- GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
- const void* oat_code_offset = target->GetEntryPointFromQuickCompiledCodePtrSize(size);
+ const void* oat_code_offset =
+ target->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
if (oat_code_offset != 0) {
DCHECK(!writer_->HasBootImage());
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(oat_code_offset));
@@ -1447,19 +1484,6 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
return string;
}
- uint32_t GetDexCacheOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (writer_->HasBootImage()) {
- uintptr_t element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<uintptr_t>(
- patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
- size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
- uintptr_t oat_data = writer_->image_writer_->GetOatDataBegin(oat_index);
- return element - oat_data;
- } else {
- size_t start = writer_->dex_cache_arrays_offsets_.Get(patch.TargetDexCacheDexFile());
- return start + patch.TargetDexCacheElementOffset();
- }
- }
-
uint32_t GetTargetMethodOffset(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(writer_->HasBootImage());
method = writer_->image_writer_->GetImageMethodAddress(method);
@@ -1525,12 +1549,11 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
OutputStream* out,
const size_t file_offset,
size_t relative_offset)
- : OatDexMethodVisitor(writer, relative_offset),
- out_(out),
- file_offset_(file_offset) {
- }
+ : OatDexMethodVisitor(writer, relative_offset),
+ out_(out),
+ file_offset_(file_offset) {}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1589,11 +1612,11 @@ class OatWriter::WriteMethodInfoVisitor : public OatDexMethodVisitor {
OutputStream* out,
const size_t file_offset,
size_t relative_offset)
- : OatDexMethodVisitor(writer, relative_offset),
- out_(out),
- file_offset_(file_offset) {}
+ : OatDexMethodVisitor(writer, relative_offset),
+ out_(out),
+ file_offset_(file_offset) {}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1698,12 +1721,17 @@ size_t OatWriter::InitOatHeader(InstructionSet instruction_set,
return oat_header_->GetHeaderSize();
}
-size_t OatWriter::InitOatDexFiles(size_t offset) {
- TimingLogger::ScopedTiming split("InitOatDexFiles", timings_);
- // Initialize offsets of dex files.
+size_t OatWriter::InitClassOffsets(size_t offset) {
+ // Reserve space for class offsets in OAT and update class_offsets_offset_.
for (OatDexFile& oat_dex_file : oat_dex_files_) {
- oat_dex_file.offset_ = offset;
- offset += oat_dex_file.SizeOf();
+ DCHECK_EQ(oat_dex_file.class_offsets_offset_, 0u);
+ if (!oat_dex_file.class_offsets_.empty()) {
+ // Class offsets are required to be 4 byte aligned.
+ offset = RoundUp(offset, 4u);
+ oat_dex_file.class_offsets_offset_ = offset;
+ offset += oat_dex_file.GetClassOffsetsRawSize();
+ DCHECK_ALIGNED(offset, 4u);
+ }
}
return offset;
}
@@ -1748,6 +1776,50 @@ size_t OatWriter::InitOatMaps(size_t offset) {
return offset;
}
+size_t OatWriter::InitMethodBssMappings(size_t offset) {
+ size_t number_of_dex_files = 0u;
+ for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
+ const DexFile* dex_file = (*dex_files_)[i];
+ auto it = bss_method_entry_references_.find(dex_file);
+ if (it != bss_method_entry_references_.end()) {
+ const BitVector& method_indexes = it->second;
+ ++number_of_dex_files;
+ // If there are any classes, the class offsets allocation aligns the offset
+ // and we cannot have method bss mappings without class offsets.
+ static_assert(alignof(MethodBssMapping) == 4u, "MethodBssMapping alignment check.");
+ DCHECK_ALIGNED(offset, 4u);
+ oat_dex_files_[i].method_bss_mapping_offset_ = offset;
+
+ linker::MethodBssMappingEncoder encoder(
+ GetInstructionSetPointerSize(oat_header_->GetInstructionSet()));
+ size_t number_of_entries = 0u;
+ bool first_index = true;
+ for (uint32_t method_index : method_indexes.Indexes()) {
+ uint32_t bss_offset = bss_method_entries_.Get(MethodReference(dex_file, method_index));
+ if (first_index || !encoder.TryMerge(method_index, bss_offset)) {
+ encoder.Reset(method_index, bss_offset);
+ ++number_of_entries;
+ first_index = false;
+ }
+ }
+ DCHECK_NE(number_of_entries, 0u);
+ offset += MethodBssMapping::ComputeSize(number_of_entries);
+ }
+ }
+ // Check that all dex files targeted by method bss entries are in `*dex_files_`.
+ CHECK_EQ(number_of_dex_files, bss_method_entry_references_.size());
+ return offset;
+}
+
+size_t OatWriter::InitOatDexFiles(size_t offset) {
+ // Initialize offsets of oat dex files.
+ for (OatDexFile& oat_dex_file : oat_dex_files_) {
+ oat_dex_file.offset_ = offset;
+ offset += oat_dex_file.SizeOf();
+ }
+ return offset;
+}
+
size_t OatWriter::InitOatCode(size_t offset) {
// calculate the offsets within OatHeader to executable code
size_t old_offset = offset;
@@ -1806,38 +1878,51 @@ size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
}
void OatWriter::InitBssLayout(InstructionSet instruction_set) {
+ {
+ InitBssLayoutMethodVisitor visitor(this);
+ bool success = VisitDexMethods(&visitor);
+ DCHECK(success);
+ }
+
+ DCHECK_EQ(bss_size_, 0u);
if (HasBootImage()) {
DCHECK(bss_string_entries_.empty());
- if (bss_type_entries_.empty()) {
+ if (bss_method_entries_.empty() && bss_type_entries_.empty()) {
// Nothing to put to the .bss section.
return;
}
}
// Allocate space for app dex cache arrays in the .bss section.
- bss_start_ = RoundUp(oat_size_, kPageSize);
- bss_size_ = 0u;
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
if (!HasBootImage()) {
- PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
for (const DexFile* dex_file : *dex_files_) {
- dex_cache_arrays_offsets_.Put(dex_file, bss_start_ + bss_size_);
DexCacheArraysLayout layout(pointer_size, dex_file);
bss_size_ += layout.Size();
}
}
+ bss_methods_offset_ = bss_size_;
+
+ // Prepare offsets for .bss ArtMethod entries.
+ for (auto& entry : bss_method_entries_) {
+ DCHECK_EQ(entry.second, 0u);
+ entry.second = bss_size_;
+ bss_size_ += static_cast<size_t>(pointer_size);
+ }
+
bss_roots_offset_ = bss_size_;
// Prepare offsets for .bss Class entries.
for (auto& entry : bss_type_entries_) {
DCHECK_EQ(entry.second, 0u);
- entry.second = bss_start_ + bss_size_;
+ entry.second = bss_size_;
bss_size_ += sizeof(GcRoot<mirror::Class>);
}
// Prepare offsets for .bss String entries.
for (auto& entry : bss_string_entries_) {
DCHECK_EQ(entry.second, 0u);
- entry.second = bss_start_ + bss_size_;
+ entry.second = bss_size_;
bss_size_ += sizeof(GcRoot<mirror::String>);
}
}
@@ -1845,30 +1930,45 @@ void OatWriter::InitBssLayout(InstructionSet instruction_set) {
bool OatWriter::WriteRodata(OutputStream* out) {
CHECK(write_state_ == WriteState::kWriteRoData);
+ size_t file_offset = oat_data_offset_;
+ off_t current_offset = out->Seek(0, kSeekCurrent);
+ if (current_offset == static_cast<off_t>(-1)) {
+ PLOG(ERROR) << "Failed to retrieve current position in " << out->GetLocation();
+ }
+ DCHECK_GE(static_cast<size_t>(current_offset), file_offset + oat_header_->GetHeaderSize());
+ size_t relative_offset = current_offset - file_offset;
+
// Wrap out to update checksum with each write.
ChecksumUpdatingOutputStream checksum_updating_out(out, oat_header_.get());
out = &checksum_updating_out;
- if (!WriteClassOffsets(out)) {
- LOG(ERROR) << "Failed to write class offsets to " << out->GetLocation();
+ relative_offset = WriteClassOffsets(out, file_offset, relative_offset);
+ if (relative_offset == 0) {
+ PLOG(ERROR) << "Failed to write class offsets to " << out->GetLocation();
return false;
}
- if (!WriteClasses(out)) {
- LOG(ERROR) << "Failed to write classes to " << out->GetLocation();
+ relative_offset = WriteClasses(out, file_offset, relative_offset);
+ if (relative_offset == 0) {
+ PLOG(ERROR) << "Failed to write classes to " << out->GetLocation();
return false;
}
- off_t tables_end_offset = out->Seek(0, kSeekCurrent);
- if (tables_end_offset == static_cast<off_t>(-1)) {
- LOG(ERROR) << "Failed to get oat code position in " << out->GetLocation();
+ relative_offset = WriteMethodBssMappings(out, file_offset, relative_offset);
+ if (relative_offset == 0) {
+ PLOG(ERROR) << "Failed to write method bss mappings to " << out->GetLocation();
return false;
}
- size_t file_offset = oat_data_offset_;
- size_t relative_offset = static_cast<size_t>(tables_end_offset) - file_offset;
+
relative_offset = WriteMaps(out, file_offset, relative_offset);
if (relative_offset == 0) {
- LOG(ERROR) << "Failed to write oat code to " << out->GetLocation();
+ PLOG(ERROR) << "Failed to write oat code to " << out->GetLocation();
+ return false;
+ }
+
+ relative_offset = WriteOatDexFiles(out, file_offset, relative_offset);
+ if (relative_offset == 0) {
+ PLOG(ERROR) << "Failed to write oat dex information to " << out->GetLocation();
return false;
}
@@ -1891,12 +1991,12 @@ bool OatWriter::WriteRodata(OutputStream* out) {
class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
public:
WriteQuickeningInfoMethodVisitor(OatWriter* writer, OutputStream* out, uint32_t offset)
- : DexMethodVisitor(writer, offset),
- out_(out),
- written_bytes_(0u) {}
+ : DexMethodVisitor(writer, offset),
+ out_(out),
+ written_bytes_(0u) {}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassDataItemIterator& it) {
+ const ClassDataItemIterator& it) OVERRIDE {
if (it.GetMethodCodeItem() == nullptr) {
// No CodeItem. Native or abstract method.
return true;
@@ -2092,6 +2192,7 @@ bool OatWriter::WriteCode(OutputStream* out) {
DO_STAT(size_oat_dex_file_offset_);
DO_STAT(size_oat_dex_file_class_offsets_offset_);
DO_STAT(size_oat_dex_file_lookup_table_offset_);
+ DO_STAT(size_oat_dex_file_method_bss_mapping_offset_);
DO_STAT(size_oat_lookup_table_alignment_);
DO_STAT(size_oat_lookup_table_);
DO_STAT(size_oat_class_offsets_alignment_);
@@ -2100,6 +2201,7 @@ bool OatWriter::WriteCode(OutputStream* out) {
DO_STAT(size_oat_class_status_);
DO_STAT(size_oat_class_method_bitmaps_);
DO_STAT(size_oat_class_method_offsets_);
+ DO_STAT(size_method_bss_mappings_);
#undef DO_STAT
VLOG(compiler) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)";
@@ -2172,35 +2274,41 @@ bool OatWriter::WriteHeader(OutputStream* out,
return true;
}
-bool OatWriter::WriteClassOffsets(OutputStream* out) {
+size_t OatWriter::WriteClassOffsets(OutputStream* out, size_t file_offset, size_t relative_offset) {
for (OatDexFile& oat_dex_file : oat_dex_files_) {
if (oat_dex_file.class_offsets_offset_ != 0u) {
- uint32_t expected_offset = oat_data_offset_ + oat_dex_file.class_offsets_offset_;
- off_t actual_offset = out->Seek(expected_offset, kSeekSet);
- if (static_cast<uint32_t>(actual_offset) != expected_offset) {
- PLOG(ERROR) << "Failed to seek to oat class offsets section. Actual: " << actual_offset
- << " Expected: " << expected_offset << " File: " << oat_dex_file.GetLocation();
- return false;
+ // Class offsets are required to be 4 byte aligned.
+ if (UNLIKELY(!IsAligned<4u>(relative_offset))) {
+ size_t padding_size = RoundUp(relative_offset, 4u) - relative_offset;
+ if (!WriteUpTo16BytesAlignment(out, padding_size, &size_oat_class_offsets_alignment_)) {
+ return 0u;
+ }
+ relative_offset += padding_size;
}
+ DCHECK_OFFSET();
if (!oat_dex_file.WriteClassOffsets(this, out)) {
- return false;
+ return 0u;
}
+ relative_offset += oat_dex_file.GetClassOffsetsRawSize();
}
}
- return true;
+ return relative_offset;
}
-bool OatWriter::WriteClasses(OutputStream* out) {
+size_t OatWriter::WriteClasses(OutputStream* out, size_t file_offset, size_t relative_offset) {
for (OatClass& oat_class : oat_classes_) {
+ // If there are any classes, the class offsets allocation aligns the offset.
+ DCHECK_ALIGNED(relative_offset, 4u);
+ DCHECK_OFFSET();
if (!oat_class.Write(this, out, oat_data_offset_)) {
- PLOG(ERROR) << "Failed to write oat methods information to " << out->GetLocation();
- return false;
+ return 0u;
}
+ relative_offset += oat_class.SizeOf();
}
- return true;
+ return relative_offset;
}
-size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) {
+size_t OatWriter::WriteMaps(OutputStream* out, size_t file_offset, size_t relative_offset) {
{
size_t vmap_tables_offset = relative_offset;
WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset);
@@ -2223,7 +2331,87 @@ size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t
return relative_offset;
}
-size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset) {
+size_t OatWriter::WriteMethodBssMappings(OutputStream* out,
+ size_t file_offset,
+ size_t relative_offset) {
+ TimingLogger::ScopedTiming split("WriteMethodBssMappings", timings_);
+
+ for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
+ const DexFile* dex_file = (*dex_files_)[i];
+ OatDexFile* oat_dex_file = &oat_dex_files_[i];
+ auto it = bss_method_entry_references_.find(dex_file);
+ if (it != bss_method_entry_references_.end()) {
+ const BitVector& method_indexes = it->second;
+ // If there are any classes, the class offsets allocation aligns the offset
+ // and we cannot have method bss mappings without class offsets.
+ static_assert(alignof(MethodBssMapping) == sizeof(uint32_t),
+ "MethodBssMapping alignment check.");
+ DCHECK_ALIGNED(relative_offset, sizeof(uint32_t));
+
+ linker::MethodBssMappingEncoder encoder(
+ GetInstructionSetPointerSize(oat_header_->GetInstructionSet()));
+ // Allocate a sufficiently large MethodBssMapping.
+ size_t number_of_method_indexes = method_indexes.NumSetBits();
+ DCHECK_NE(number_of_method_indexes, 0u);
+ size_t max_mappings_size = MethodBssMapping::ComputeSize(number_of_method_indexes);
+ DCHECK_ALIGNED(max_mappings_size, sizeof(uint32_t));
+ std::unique_ptr<uint32_t[]> storage(new uint32_t[max_mappings_size / sizeof(uint32_t)]);
+ MethodBssMapping* mappings = new(storage.get()) MethodBssMapping(number_of_method_indexes);
+ mappings->ClearPadding();
+ // Encode the MethodBssMapping.
+ auto init_it = mappings->begin();
+ bool first_index = true;
+ for (uint32_t method_index : method_indexes.Indexes()) {
+ size_t bss_offset = bss_method_entries_.Get(MethodReference(dex_file, method_index));
+ if (first_index) {
+ first_index = false;
+ encoder.Reset(method_index, bss_offset);
+ } else if (!encoder.TryMerge(method_index, bss_offset)) {
+ *init_it = encoder.GetEntry();
+ ++init_it;
+ encoder.Reset(method_index, bss_offset);
+ }
+ }
+ // Store the last entry and shrink the mapping to the actual size.
+ *init_it = encoder.GetEntry();
+ ++init_it;
+ DCHECK(init_it <= mappings->end());
+ mappings->SetSize(std::distance(mappings->begin(), init_it));
+ size_t mappings_size = MethodBssMapping::ComputeSize(mappings->size());
+
+ DCHECK_EQ(relative_offset, oat_dex_file->method_bss_mapping_offset_);
+ DCHECK_OFFSET();
+ if (!out->WriteFully(storage.get(), mappings_size)) {
+ return 0u;
+ }
+ size_method_bss_mappings_ += mappings_size;
+ relative_offset += mappings_size;
+ } else {
+ DCHECK_EQ(0u, oat_dex_file->method_bss_mapping_offset_);
+ }
+ }
+ return relative_offset;
+}
+
+size_t OatWriter::WriteOatDexFiles(OutputStream* out, size_t file_offset, size_t relative_offset) {
+ TimingLogger::ScopedTiming split("WriteOatDexFiles", timings_);
+
+ for (size_t i = 0, size = oat_dex_files_.size(); i != size; ++i) {
+ OatDexFile* oat_dex_file = &oat_dex_files_[i];
+ DCHECK_EQ(relative_offset, oat_dex_file->offset_);
+ DCHECK_OFFSET();
+
+ // Write OatDexFile.
+ if (!oat_dex_file->Write(this, out)) {
+ return 0u;
+ }
+ relative_offset += oat_dex_file->SizeOf();
+ }
+
+ return relative_offset;
+}
+
+size_t OatWriter::WriteCode(OutputStream* out, size_t file_offset, size_t relative_offset) {
if (compiler_driver_->GetCompilerOptions().IsBootImage()) {
InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
@@ -2253,7 +2441,7 @@ size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset, size_t
}
size_t OatWriter::WriteCodeDexFiles(OutputStream* out,
- const size_t file_offset,
+ size_t file_offset,
size_t relative_offset) {
#define VISIT(VisitorType) \
do { \
@@ -2667,50 +2855,6 @@ bool OatWriter::WriteDexFile(OutputStream* out,
return true;
}
-bool OatWriter::WriteOatDexFiles(OutputStream* rodata) {
- TimingLogger::ScopedTiming split("WriteOatDexFiles", timings_);
-
- off_t initial_offset = rodata->Seek(0, kSeekCurrent);
- if (initial_offset == static_cast<off_t>(-1)) {
- LOG(ERROR) << "Failed to get current position in " << rodata->GetLocation();
- return false;
- }
-
- // Seek to the start of OatDexFiles, i.e. to the end of the OatHeader. If there are
- // no OatDexFiles, no data is actually written to .rodata before WriteHeader() and
- // this Seek() ensures that we reserve the space for OatHeader in .rodata.
- DCHECK(oat_dex_files_.empty() || oat_dex_files_[0u].offset_ == oat_header_->GetHeaderSize());
- uint32_t expected_offset = oat_data_offset_ + oat_header_->GetHeaderSize();
- off_t actual_offset = rodata->Seek(expected_offset, kSeekSet);
- if (static_cast<uint32_t>(actual_offset) != expected_offset) {
- PLOG(ERROR) << "Failed to seek to OatDexFile table section. Actual: " << actual_offset
- << " Expected: " << expected_offset << " File: " << rodata->GetLocation();
- return false;
- }
-
- for (size_t i = 0, size = oat_dex_files_.size(); i != size; ++i) {
- OatDexFile* oat_dex_file = &oat_dex_files_[i];
-
- DCHECK_EQ(oat_data_offset_ + oat_dex_file->offset_,
- static_cast<size_t>(rodata->Seek(0, kSeekCurrent)));
-
- // Write OatDexFile.
- if (!oat_dex_file->Write(this, rodata)) {
- PLOG(ERROR) << "Failed to write oat dex information to " << rodata->GetLocation();
- return false;
- }
- }
-
- // Seek back to the initial position.
- if (rodata->Seek(initial_offset, kSeekSet) != initial_offset) {
- PLOG(ERROR) << "Failed to seek to initial position. Actual: " << actual_offset
- << " Expected: " << initial_offset << " File: " << rodata->GetLocation();
- return false;
- }
-
- return true;
-}
-
bool OatWriter::OpenDexFiles(
File* file,
bool verify,
@@ -2929,14 +3073,18 @@ bool OatWriter::WriteChecksumsAndVdexHeader(OutputStream* vdex_out) {
}
bool OatWriter::WriteCodeAlignment(OutputStream* out, uint32_t aligned_code_delta) {
+ return WriteUpTo16BytesAlignment(out, aligned_code_delta, &size_code_alignment_);
+}
+
+bool OatWriter::WriteUpTo16BytesAlignment(OutputStream* out, uint32_t size, uint32_t* stat) {
static const uint8_t kPadding[] = {
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u
};
- DCHECK_LE(aligned_code_delta, sizeof(kPadding));
- if (UNLIKELY(!out->WriteFully(kPadding, aligned_code_delta))) {
+ DCHECK_LE(size, sizeof(kPadding));
+ if (UNLIKELY(!out->WriteFully(kPadding, size))) {
return false;
}
- size_code_alignment_ += aligned_code_delta;
+ *stat += size;
return true;
}
@@ -2965,6 +3113,7 @@ OatWriter::OatDexFile::OatDexFile(const char* dex_file_location,
dex_file_offset_(0u),
class_offsets_offset_(0u),
lookup_table_offset_(0u),
+ method_bss_mapping_offset_(0u),
class_offsets_() {
}
@@ -2974,19 +3123,8 @@ size_t OatWriter::OatDexFile::SizeOf() const {
+ sizeof(dex_file_location_checksum_)
+ sizeof(dex_file_offset_)
+ sizeof(class_offsets_offset_)
- + sizeof(lookup_table_offset_);
-}
-
-void OatWriter::OatDexFile::ReserveClassOffsets(OatWriter* oat_writer) {
- DCHECK_EQ(class_offsets_offset_, 0u);
- if (!class_offsets_.empty()) {
- // Class offsets are required to be 4 byte aligned.
- size_t initial_offset = oat_writer->oat_size_;
- size_t offset = RoundUp(initial_offset, 4);
- oat_writer->size_oat_class_offsets_alignment_ += offset - initial_offset;
- class_offsets_offset_ = offset;
- oat_writer->oat_size_ = offset + GetClassOffsetsRawSize();
- }
+ + sizeof(lookup_table_offset_)
+ + sizeof(method_bss_mapping_offset_);
}
bool OatWriter::OatDexFile::Write(OatWriter* oat_writer, OutputStream* out) const {
@@ -3029,6 +3167,12 @@ bool OatWriter::OatDexFile::Write(OatWriter* oat_writer, OutputStream* out) cons
}
oat_writer->size_oat_dex_file_lookup_table_offset_ += sizeof(lookup_table_offset_);
+ if (!out->WriteFully(&method_bss_mapping_offset_, sizeof(method_bss_mapping_offset_))) {
+ PLOG(ERROR) << "Failed to write method bss mapping offset to " << out->GetLocation();
+ return false;
+ }
+ oat_writer->size_oat_dex_file_method_bss_mapping_offset_ += sizeof(method_bss_mapping_offset_);
+
return true;
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 66b70ade2e..9217701bc5 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -60,11 +60,6 @@ namespace verifier {
// OatHeader variable length with count of D OatDexFiles
//
-// OatDexFile[0] one variable sized OatDexFile with offsets to Dex and OatClasses
-// OatDexFile[1]
-// ...
-// OatDexFile[D]
-//
// TypeLookupTable[0] one descriptor to class def index hash table for each OatDexFile.
// TypeLookupTable[1]
// ...
@@ -80,20 +75,25 @@ namespace verifier {
// ...
// OatClass[C]
//
-// GcMap one variable sized blob with GC map.
-// GcMap GC maps are deduplicated.
+// MethodBssMapping one variable sized MethodBssMapping for each dex file, optional.
+// MethodBssMapping
// ...
-// GcMap
+// MethodBssMapping
//
-// VmapTable one variable sized VmapTable blob (quick compiler only).
+// VmapTable one variable sized VmapTable blob (CodeInfo or QuickeningInfo).
// VmapTable VmapTables are deduplicated.
// ...
// VmapTable
//
-// MappingTable one variable sized blob with MappingTable (quick compiler only).
-// MappingTable MappingTables are deduplicated.
+// MethodInfo one variable sized blob with MethodInfo.
+// MethodInfo MethodInfos are deduplicated.
+// ...
+// MethodInfo
+//
+// OatDexFile[0] one variable sized OatDexFile with offsets to Dex and OatClasses
+// OatDexFile[1]
// ...
-// MappingTable
+// OatDexFile[D]
//
// padding if necessary so that the following code will be page aligned
//
@@ -217,6 +217,10 @@ class OatWriter {
return bss_size_;
}
+ size_t GetBssMethodsOffset() const {
+ return bss_methods_offset_;
+ }
+
size_t GetBssRootsOffset() const {
return bss_roots_offset_;
}
@@ -251,6 +255,7 @@ class OatWriter {
// to actually write it.
class DexMethodVisitor;
class OatDexMethodVisitor;
+ class InitBssLayoutMethodVisitor;
class InitOatClassesMethodVisitor;
class InitCodeMethodVisitor;
class InitMapMethodVisitor;
@@ -295,26 +300,30 @@ class OatWriter {
const InstructionSetFeatures* instruction_set_features,
uint32_t num_dex_files,
SafeMap<std::string, std::string>* key_value_store);
- size_t InitOatDexFiles(size_t offset);
+ size_t InitClassOffsets(size_t offset);
size_t InitOatClasses(size_t offset);
size_t InitOatMaps(size_t offset);
+ size_t InitMethodBssMappings(size_t offset);
+ size_t InitOatDexFiles(size_t offset);
size_t InitOatCode(size_t offset);
size_t InitOatCodeDexFiles(size_t offset);
void InitBssLayout(InstructionSet instruction_set);
- bool WriteClassOffsets(OutputStream* out);
- bool WriteClasses(OutputStream* out);
- size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset);
- size_t WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset);
- size_t WriteCodeDexFiles(OutputStream* out, const size_t file_offset, size_t relative_offset);
+ size_t WriteClassOffsets(OutputStream* out, size_t file_offset, size_t relative_offset);
+ size_t WriteClasses(OutputStream* out, size_t file_offset, size_t relative_offset);
+ size_t WriteMaps(OutputStream* out, size_t file_offset, size_t relative_offset);
+ size_t WriteMethodBssMappings(OutputStream* out, size_t file_offset, size_t relative_offset);
+ size_t WriteOatDexFiles(OutputStream* out, size_t file_offset, size_t relative_offset);
+ size_t WriteCode(OutputStream* out, size_t file_offset, size_t relative_offset);
+ size_t WriteCodeDexFiles(OutputStream* out, size_t file_offset, size_t relative_offset);
bool RecordOatDataOffset(OutputStream* out);
bool ReadDexFileHeader(File* oat_file, OatDexFile* oat_dex_file);
bool ValidateDexFileHeader(const uint8_t* raw_header, const char* location);
- bool WriteOatDexFiles(OutputStream* oat_rodata);
bool WriteTypeLookupTables(OutputStream* oat_rodata,
const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files);
bool WriteCodeAlignment(OutputStream* out, uint32_t aligned_code_delta);
+ bool WriteUpTo16BytesAlignment(OutputStream* out, uint32_t size, uint32_t* stat);
void SetMultiOatRelativePatcherAdjustment();
void CloseSources();
@@ -368,9 +377,20 @@ class OatWriter {
// The size of the required .bss section holding the DexCache data and GC roots.
size_t bss_size_;
+ // The offset of the methods in .bss section.
+ size_t bss_methods_offset_;
+
// The offset of the GC roots in .bss section.
size_t bss_roots_offset_;
+ // Map for recording references to ArtMethod entries in .bss.
+ SafeMap<const DexFile*, BitVector> bss_method_entry_references_;
+
+ // Map for allocating ArtMethod entries in .bss. Indexed by MethodReference for the target
+ // method in the dex file with the "method reference value comparator" for deduplication.
+ // The value is the target offset for patching, starting at `bss_start_ + bss_methods_offset_`.
+ SafeMap<MethodReference, size_t, MethodReferenceValueComparator> bss_method_entries_;
+
// Map for allocating Class entries in .bss. Indexed by TypeReference for the source
// type in the dex file with the "type value comparator" for deduplication. The value
// is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
@@ -381,10 +401,6 @@ class OatWriter {
// is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
SafeMap<StringReference, size_t, StringReferenceValueComparator> bss_string_entries_;
- // Offsets of the dex cache arrays for each app dex file. For the
- // boot image, this information is provided by the ImageWriter.
- SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_; // DexFiles not owned.
-
// Offset of the oat data from the start of the mmapped region of the elf file.
size_t oat_data_offset_;
@@ -434,6 +450,7 @@ class OatWriter {
uint32_t size_oat_dex_file_offset_;
uint32_t size_oat_dex_file_class_offsets_offset_;
uint32_t size_oat_dex_file_lookup_table_offset_;
+ uint32_t size_oat_dex_file_method_bss_mapping_offset_;
uint32_t size_oat_lookup_table_alignment_;
uint32_t size_oat_lookup_table_;
uint32_t size_oat_class_offsets_alignment_;
@@ -442,6 +459,7 @@ class OatWriter {
uint32_t size_oat_class_status_;
uint32_t size_oat_class_method_bitmaps_;
uint32_t size_oat_class_method_offsets_;
+ uint32_t size_method_bss_mappings_;
// The helper for processing relative patches is external so that we can patch across oat files.
linker::MultiOatRelativePatcher* relative_patcher_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 65f3c72e99..93234f9630 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -41,6 +41,8 @@
#include "code_generator_mips64.h"
#endif
+#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "compiled_method.h"
@@ -58,7 +60,7 @@
#include "parallel_move_resolver.h"
#include "ssa_liveness_analysis.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils/assembler.h"
namespace art {
@@ -337,7 +339,7 @@ void CodeGenerator::CreateCommonInvokeLocationSummary(
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
locations->SetInAt(call->GetSpecialInputIndex(), visitor->GetMethodLocation());
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
locations->AddTemp(visitor->GetMethodLocation());
locations->SetInAt(call->GetSpecialInputIndex(), Location::RequiresRegister());
break;
@@ -350,6 +352,34 @@ void CodeGenerator::CreateCommonInvokeLocationSummary(
}
}
+void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
+ MoveConstant(temp, invoke->GetDexMethodIndex());
+
+ // The access check is unnecessary but we do not want to introduce
+ // extra entrypoints for the codegens that do not support some
+ // invoke type and fall back to the runtime call.
+
+ // Initialize to anything to silent compiler warnings.
+ QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
+ switch (invoke->GetInvokeType()) {
+ case kStatic:
+ entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
+ break;
+ case kDirect:
+ entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
+ break;
+ case kSuper:
+ entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
+ break;
+ case kVirtual:
+ case kInterface:
+ LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
+ UNREACHABLE();
+ }
+
+ InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
+}
void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
@@ -508,7 +538,7 @@ void CodeGenerator::GenerateUnresolvedFieldAccess(
void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
Location runtime_type_index_location,
Location runtime_return_location) {
- DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
DCHECK_EQ(cls->InputCount(), 1u);
LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
cls, LocationSummary::kCallOnMainOnly);
@@ -518,7 +548,7 @@ void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
}
void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
- DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
LocationSummary* locations = cls->GetLocations();
MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
if (cls->NeedsAccessCheck()) {
@@ -557,6 +587,9 @@ void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
}
void CodeGenerator::AllocateLocations(HInstruction* instruction) {
+ for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
+ env->AllocateLocations();
+ }
instruction->Accept(GetLocationBuilder());
DCHECK(CheckTypeConsistency(instruction));
LocationSummary* locations = instruction->GetLocations();
@@ -1400,20 +1433,6 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
locations->AddTemp(Location::RequiresRegister());
}
-uint32_t CodeGenerator::GetReferenceSlowFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetSlowPathFlagOffset().Uint32Value();
-}
-
-uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
-}
-
void CodeGenerator::EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
const uint8_t* roots_data) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index c2b2ebfade..7bf43f7971 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -31,6 +31,7 @@
#include "nodes.h"
#include "optimizing_compiler_stats.h"
#include "read_barrier_option.h"
+#include "stack.h"
#include "stack_map_stream.h"
#include "string_reference.h"
#include "type_reference.h"
@@ -495,6 +496,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
static void CreateCommonInvokeLocationSummary(
HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor);
+ void GenerateInvokeStaticOrDirectRuntimeCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path);
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke);
@@ -541,7 +544,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
case HLoadString::LoadKind::kBssEntry:
DCHECK(load->NeedsEnvironment());
return LocationSummary::kCallOnSlowPath;
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
DCHECK(load->NeedsEnvironment());
return LocationSummary::kCallOnMainOnly;
case HLoadString::LoadKind::kJitTableAddress:
@@ -563,18 +566,17 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
HInvokeStaticOrDirect* invoke) = 0;
// Generate a call to a static or direct method.
- virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
+ virtual void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) = 0;
// Generate a call to a virtual method.
- virtual void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) = 0;
+ virtual void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) = 0;
// Copy the result of a call into the given target.
virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0;
virtual void GenerateNop() = 0;
- uint32_t GetReferenceSlowFlagOffset() const;
- uint32_t GetReferenceDisableFlagOffset() const;
-
static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
protected:
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c66bd77d6b..e4efbef394 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -19,6 +19,8 @@
#include "arch/arm/asm_support_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
#include "art_method.h"
+#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "code_generator_utils.h"
#include "common_arm.h"
#include "compiled_method.h"
@@ -47,7 +49,6 @@ static bool ExpectedPairLayout(Location location) {
return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
}
-static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = R0;
static constexpr Register kCoreAlwaysSpillRegister = R5;
@@ -2396,8 +2397,8 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -3554,18 +3555,10 @@ void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
IntrinsicLocationsBuilderARM intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
- invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
- }
return;
}
HandleInvoke(invoke);
-
- // For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
- if (invoke->HasPcRelativeDexCache()) {
- invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
- }
}
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
@@ -3589,7 +3582,6 @@ void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
@@ -3613,7 +3605,6 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -7137,7 +7128,7 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
break;
}
return desired_class_load_kind;
@@ -7145,7 +7136,7 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
@@ -7198,7 +7189,7 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
// move.
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
@@ -7270,7 +7261,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -7332,7 +7323,7 @@ HLoadString::LoadKind CodeGeneratorARM::GetSupportedLoadStringKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kBootImageAddress:
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
break;
}
return desired_string_load_kind;
@@ -7342,7 +7333,7 @@ void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
- if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(Location::RegisterLocation(R0));
} else {
locations->SetOut(Location::RequiresRegister());
@@ -7429,7 +7420,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_S
}
// TODO: Consider re-adding the compiler code to do string dex cache lookup again.
- DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
+ DCHECK(load_kind == HLoadString::LoadKind::kRuntimeCall);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
__ LoadImmediate(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
@@ -8946,7 +8937,8 @@ Register CodeGeneratorARM::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr
// save one load. However, since this is just an intrinsic slow path we prefer this
// simple and more robust approach rather that trying to determine if that's the case.
SlowPathCode* slow_path = GetCurrentSlowPath();
- if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
+ if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
__ LoadFromOffset(kLoadWord, temp, SP, stack_offset);
return temp;
@@ -8954,8 +8946,8 @@ Register CodeGeneratorARM::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr
return location.AsRegister<Register>();
}
-Location CodeGeneratorARM::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+void CodeGeneratorARM::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -8983,44 +8975,24 @@ Location CodeGeneratorARM::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ LoadImmediate(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
- HArmDexCacheArraysBase* base =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsArmDexCacheArraysBase();
- Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
- temp.AsRegister<Register>());
- int32_t offset = invoke->GetDexCacheArrayOffset() - base->GetElementOffset();
- __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
- break;
- }
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
- Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
- Register method_reg;
- Register reg = temp.AsRegister<Register>();
- if (current_method.IsRegister()) {
- method_reg = current_method.AsRegister<Register>();
- } else {
- DCHECK(invoke->GetLocations()->Intrinsified());
- DCHECK(!current_method.IsValid());
- method_reg = reg;
- __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
- }
- // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
- __ LoadFromOffset(kLoadWord,
- reg,
- method_reg,
- ArtMethod::DexCacheResolvedMethodsOffset(kArmPointerSize).Int32Value());
- // temp = temp[index_in_cache];
- // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
- uint32_t index_in_cache = invoke->GetDexMethodIndex();
- __ LoadFromOffset(kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
+ Register temp_reg = temp.AsRegister<Register>();
+ PcRelativePatchInfo* labels = NewMethodBssEntryPatch(
+ MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(temp_reg, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(temp_reg, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(temp_reg, temp_reg, ShifterOperand(PC));
+ __ LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset */ 0);
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
+ GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
+ return; // No code pointer retrieval; the runtime performs the call directly.
+ }
}
- return callee_method;
-}
-
-void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
- Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
@@ -9035,11 +9007,13 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
__ blx(LR);
break;
}
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
DCHECK(!IsLeafMethod());
}
-void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
+void CodeGeneratorARM::GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
Register temp = temp_location.AsRegister<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
@@ -9070,6 +9044,7 @@ void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
__ blx(LR);
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeMethodPatch(
@@ -9079,6 +9054,13 @@ CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeMethodPatc
&pc_relative_method_patches_);
}
+CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewMethodBssEntryPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &method_bss_entry_patches_);
+}
+
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeTypePatch(
const DexFile& dex_file, dex::TypeIndex type_index) {
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
@@ -9094,11 +9076,6 @@ CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeStringPatc
return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
-CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeDexCacheArrayPatch(
- const DexFile& dex_file, uint32_t element_offset) {
- return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
-}
-
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativePatch(
const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
patches->emplace_back(dex_file, offset_or_index);
@@ -9157,15 +9134,13 @@ inline void CodeGeneratorARM::EmitPcRelativeLinkerPatches(
void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- /* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_method_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * method_bss_entry_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
- EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
- linker_patches);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
@@ -9179,6 +9154,8 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
@@ -9315,23 +9292,6 @@ void InstructionCodeGeneratorARM::VisitPackedSwitch(HPackedSwitch* switch_instr)
}
}
-void LocationsBuilderARM::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(base);
- locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorARM::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
- Register base_reg = base->GetLocations()->Out().AsRegister<Register>();
- CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
- __ BindTrackedLabel(&labels->movw_label);
- __ movw(base_reg, /* placeholder */ 0u);
- __ BindTrackedLabel(&labels->movt_label);
- __ movt(base_reg, /* placeholder */ 0u);
- __ BindTrackedLabel(&labels->add_pc_label);
- __ add(base_reg, base_reg, ShifterOperand(PC));
-}
-
void CodeGeneratorARM::MoveFromReturnRegister(Location trg, Primitive::Type type) {
if (!trg.IsValid()) {
DCHECK_EQ(type, Primitive::kPrimVoid);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 2409a4d38d..9280e6377c 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -455,9 +455,10 @@ class CodeGeneratorARM : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
- Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+ void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -482,12 +483,11 @@ class CodeGeneratorARM : public CodeGenerator {
};
PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
+ PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
dex::StringIndex string_index);
- PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset);
// Add a new baker read barrier patch and return the label to be bound
// before the BNE instruction.
@@ -668,10 +668,10 @@ class CodeGeneratorARM : public CodeGenerator {
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
- // PC-relative patch info for each HArmDexCacheArraysBase.
- ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// PC-relative method patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
+ // PC-relative method patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7d9c61b76c..34397e66bc 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -19,6 +19,8 @@
#include "arch/arm64/asm_support_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
#include "art_method.h"
+#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "code_generator_utils.h"
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -78,7 +80,6 @@ using helpers::VIXLRegCodeFromART;
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
-static constexpr int kCurrentMethodStackOffset = 0;
// The compare/jump sequence will generate about (1.5 * num_entries + 3) instructions. While jump
// table version generates 7 instructions and num_entries literals. Compare/jump sequence will
// generates less code/data with a small num_entries.
@@ -1449,8 +1450,8 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -4497,8 +4498,8 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStatic
return desired_dispatch_info;
}
-Location CodeGeneratorARM64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+void CodeGeneratorARM64::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
// Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
@@ -4527,52 +4528,33 @@ Location CodeGeneratorARM64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStati
// Load method address from literal pool.
__ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
// Add ADRP with its PC-relative DexCache access patch.
- const DexFile& dex_file = invoke->GetDexFileForPcRelativeDexCache();
- uint32_t element_offset = invoke->GetDexCacheArrayOffset();
- vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ MethodReference target_method(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex());
+ vixl::aarch64::Label* adrp_label = NewMethodBssEntryPatch(target_method);
EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
// Add LDR with its PC-relative DexCache access patch.
vixl::aarch64::Label* ldr_label =
- NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
+ NewMethodBssEntryPatch(target_method, adrp_label);
EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
- Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
- Register reg = XRegisterFrom(temp);
- Register method_reg;
- if (current_method.IsRegister()) {
- method_reg = XRegisterFrom(current_method);
- } else {
- DCHECK(invoke->GetLocations()->Intrinsified());
- DCHECK(!current_method.IsValid());
- method_reg = reg;
- __ Ldr(reg.X(), MemOperand(sp, kCurrentMethodStackOffset));
- }
-
- // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
- __ Ldr(reg.X(),
- MemOperand(method_reg.X(),
- ArtMethod::DexCacheResolvedMethodsOffset(kArm64PointerSize).Int32Value()));
- // temp = temp[index_in_cache];
- // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
- uint32_t index_in_cache = invoke->GetDexMethodIndex();
- __ Ldr(reg.X(), MemOperand(reg.X(), GetCachePointerOffset(index_in_cache)));
- break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
+ GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
+ return; // No code pointer retrieval; the runtime performs the call directly.
}
}
- return callee_method;
-}
-
-void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
- // All registers are assumed to be correctly set up.
- Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
- __ Bl(&frame_entry_label_);
+ {
+ // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
+ ExactAssemblyScope eas(GetVIXLAssembler(),
+ kInstructionSize,
+ CodeBufferCheckScope::kExactSize);
+ __ bl(&frame_entry_label_);
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
+ }
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// LR = callee_method->entry_point_from_quick_compiled_code_;
@@ -4580,14 +4562,13 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
XRegisterFrom(callee_method),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value()));
{
- // To ensure that the pc position is recorded immediately after the `blr` instruction
- // BLR must be the last instruction emitted in this function.
- // Recording the pc will occur right after returning from this function.
+ // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope eas(GetVIXLAssembler(),
kInstructionSize,
CodeBufferCheckScope::kExactSize);
// lr()
__ blr(lr);
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
break;
}
@@ -4595,7 +4576,8 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
DCHECK(!IsLeafMethod());
}
-void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
+void CodeGeneratorARM64::GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp_in, SlowPathCode* slow_path) {
// Use the calling convention instead of the location of the receiver, as
// intrinsics may have put the receiver in a different register. In the intrinsics
// slow path, the arguments have been moved to the right place, so here we are
@@ -4629,12 +4611,11 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te
// lr = temp->GetEntryPoint();
__ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
{
- // To ensure that the pc position is recorded immediately after the `blr` instruction
- // BLR should be the last instruction emitted in this function.
- // Recording the pc will occur right after returning from this function.
+ // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
// lr();
__ blr(lr);
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
}
@@ -4655,6 +4636,15 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeMethodPatch(
&pc_relative_method_patches_);
}
+vixl::aarch64::Label* CodeGeneratorARM64::NewMethodBssEntryPatch(
+ MethodReference target_method,
+ vixl::aarch64::Label* adrp_label) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ adrp_label,
+ &method_bss_entry_patches_);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
const DexFile& dex_file,
dex::TypeIndex type_index,
@@ -4677,13 +4667,6 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
NewPcRelativePatch(dex_file, string_index.index_, adrp_label, &pc_relative_string_patches_);
}
-vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(
- const DexFile& dex_file,
- uint32_t element_offset,
- vixl::aarch64::Label* adrp_label) {
- return NewPcRelativePatch(dex_file, element_offset, adrp_label, &pc_relative_dex_cache_patches_);
-}
-
vixl::aarch64::Label* CodeGeneratorARM64::NewBakerReadBarrierPatch(uint32_t custom_data) {
baker_read_barrier_patches_.emplace_back(custom_data);
return &baker_read_barrier_patches_.back().label;
@@ -4705,7 +4688,7 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(
uint64_t address) {
- return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
+ return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address));
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
@@ -4768,19 +4751,13 @@ inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches(
void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- pc_relative_dex_cache_patches_.size() +
pc_relative_method_patches_.size() +
+ method_bss_entry_patches_.size() +
pc_relative_type_patches_.size() +
type_bss_entry_patches_.size() +
pc_relative_string_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
- for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.GetLocation(),
- &info.target_dex_file,
- info.pc_insn_label->GetLocation(),
- info.offset_or_index));
- }
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
@@ -4794,6 +4771,8 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
@@ -4803,9 +4782,8 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
DCHECK_EQ(size, linker_patches->size());
}
-vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
- Uint32ToLiteralMap* map) {
- return map->GetOrCreate(
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value) {
+ return uint32_literals_.GetOrCreate(
value,
[this, value]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(value); });
}
@@ -4831,7 +4809,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4844,7 +4821,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
@@ -4863,7 +4839,7 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
break;
}
return desired_class_load_kind;
@@ -4871,7 +4847,7 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
@@ -4916,7 +4892,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
// move.
void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
@@ -4998,7 +4974,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -5054,7 +5030,7 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kBootImageAddress:
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
break;
}
return desired_string_load_kind;
@@ -5063,7 +5039,7 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
- if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
+ if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
} else {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7a4b3d4805..d9c49d19bb 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -540,9 +540,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
- Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+ void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
@@ -556,6 +557,13 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Label* NewPcRelativeMethodPatch(MethodReference target_method,
vixl::aarch64::Label* adrp_label = nullptr);
+ // Add a new .bss entry method patch for an instruction and return
+ // the label to be bound before the instruction. The instruction will be
+ // either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
+ // pointing to the associated ADRP patch label).
+ vixl::aarch64::Label* NewMethodBssEntryPatch(MethodReference target_method,
+ vixl::aarch64::Label* adrp_label = nullptr);
+
// Add a new PC-relative type patch for an instruction and return the label
// to be bound before the instruction. The instruction will be either the
// ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
@@ -580,15 +588,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label = nullptr);
- // Add a new PC-relative dex cache array patch for an instruction and return
- // the label to be bound before the instruction. The instruction will be
- // either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
- // pointing to the associated ADRP patch label).
- vixl::aarch64::Label* NewPcRelativeDexCacheArrayPatch(
- const DexFile& dex_file,
- uint32_t element_offset,
- vixl::aarch64::Label* adrp_label = nullptr);
-
// Add a new baker read barrier patch and return the label to be bound
// before the CBNZ instruction.
vixl::aarch64::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
@@ -740,8 +739,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Literal<uint32_t>*,
TypeReferenceValueComparator>;
- vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value,
- Uint32ToLiteralMap* map);
+ vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value);
vixl::aarch64::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
// The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
@@ -792,10 +790,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
Uint32ToLiteralMap uint32_literals_;
// Deduplication map for 64-bit literals, used for non-patchable method address or method code.
Uint64ToLiteralMap uint64_literals_;
- // PC-relative DexCache access info.
- ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// PC-relative method patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
+ // PC-relative method patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 1f8e1efd5e..c6bd871bc5 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -19,6 +19,8 @@
#include "arch/arm/asm_support_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
#include "art_method.h"
+#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "code_generator_utils.h"
#include "common_arm.h"
#include "compiled_method.h"
@@ -76,7 +78,6 @@ static bool ExpectedPairLayout(Location location) {
// Use a local definition to prevent copying mistakes.
static constexpr size_t kArmWordSize = static_cast<size_t>(kArmPointerSize);
static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte;
-static constexpr int kCurrentMethodStackOffset = 0;
static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
// Reference load (except object array loads) is using LDR Rt, [Rn, #offset] which can handle
@@ -2500,8 +2501,8 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -3643,18 +3644,10 @@ void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* i
IntrinsicLocationsBuilderARMVIXL intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
- invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
- }
return;
}
HandleInvoke(invoke);
-
- // For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
- if (invoke->HasPcRelativeDexCache()) {
- invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
- }
}
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARMVIXL* codegen) {
@@ -3678,7 +3671,6 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrD
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) {
@@ -3701,7 +3693,6 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
DCHECK(!codegen_->IsLeafMethod());
}
@@ -7252,7 +7243,7 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
break;
}
return desired_class_load_kind;
@@ -7260,7 +7251,7 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
@@ -7313,7 +7304,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
// move.
void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
@@ -7375,7 +7366,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -7444,7 +7435,7 @@ HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kBootImageAddress:
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
break;
}
return desired_string_load_kind;
@@ -7454,7 +7445,7 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
- if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(LocationFrom(r0));
} else {
locations->SetOut(Location::RequiresRegister());
@@ -7532,7 +7523,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kRuntimeCall);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
__ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
@@ -9119,8 +9110,8 @@ vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter(
return RegisterFrom(location);
}
-Location CodeGeneratorARMVIXL::GenerateCalleeMethodStaticOrDirectCall(
- HInvokeStaticOrDirect* invoke, Location temp) {
+void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -9143,50 +9134,30 @@ Location CodeGeneratorARMVIXL::GenerateCalleeMethodStaticOrDirectCall(
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
- HArmDexCacheArraysBase* base =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsArmDexCacheArraysBase();
- vixl32::Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke, RegisterFrom(temp));
- int32_t offset = invoke->GetDexCacheArrayOffset() - base->GetElementOffset();
- GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), base_reg, offset);
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
+ PcRelativePatchInfo* labels = NewMethodBssEntryPatch(
+ MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
+ vixl32::Register temp_reg = RegisterFrom(temp);
+ EmitMovwMovtPlaceholder(labels, temp_reg);
+ GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0);
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
- Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
- vixl32::Register method_reg;
- vixl32::Register reg = RegisterFrom(temp);
- if (current_method.IsRegister()) {
- method_reg = RegisterFrom(current_method);
- } else {
- DCHECK(invoke->GetLocations()->Intrinsified());
- DCHECK(!current_method.IsValid());
- method_reg = reg;
- GetAssembler()->LoadFromOffset(kLoadWord, reg, sp, kCurrentMethodStackOffset);
- }
- // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
- GetAssembler()->LoadFromOffset(
- kLoadWord,
- reg,
- method_reg,
- ArtMethod::DexCacheResolvedMethodsOffset(kArmPointerSize).Int32Value());
- // temp = temp[index_in_cache];
- // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
- uint32_t index_in_cache = invoke->GetDexMethodIndex();
- GetAssembler()->LoadFromOffset(
- kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
- break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
+ GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
+ return; // No code pointer retrieval; the runtime performs the call directly.
}
}
- return callee_method;
-}
-
-void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
- Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
- __ Bl(GetFrameEntryLabel());
+ {
+ // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k32BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ bl(GetFrameEntryLabel());
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
+ }
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// LR = callee_method->entry_point_from_quick_compiled_code_
@@ -9196,12 +9167,14 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* inv
RegisterFrom(callee_method),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
{
+ // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
// LR()
__ blx(lr);
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
break;
}
@@ -9209,7 +9182,8 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* inv
DCHECK(!IsLeafMethod());
}
-void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
+void CodeGeneratorARMVIXL::GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
vixl32::Register temp = RegisterFrom(temp_location);
uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
@@ -9245,15 +9219,16 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location
GetAssembler()->LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point);
- // LR();
- // This `blx` *must* be the *last* instruction generated by this stub, so that calls to
- // `RecordPcInfo()` immediately following record the correct pc. Use a scope to help guarantee
- // that.
- // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- ExactAssemblyScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
- __ blx(lr);
+ {
+ // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
+ // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+ // LR();
+ __ blx(lr);
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
+ }
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeMethodPatch(
@@ -9263,6 +9238,13 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeMe
&pc_relative_method_patches_);
}
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewMethodBssEntryPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &method_bss_entry_patches_);
+}
+
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTypePatch(
const DexFile& dex_file, dex::TypeIndex type_index) {
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
@@ -9278,11 +9260,6 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeSt
return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
-CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeDexCacheArrayPatch(
- const DexFile& dex_file, uint32_t element_offset) {
- return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
-}
-
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePatch(
const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
patches->emplace_back(dex_file, offset_or_index);
@@ -9346,15 +9323,13 @@ inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches(
void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- /* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_method_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * method_bss_entry_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
- EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
- linker_patches);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
@@ -9368,6 +9343,8 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
@@ -9517,17 +9494,6 @@ void InstructionCodeGeneratorARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_in
}
}
}
-void LocationsBuilderARMVIXL::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(base);
- locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
- vixl32::Register base_reg = OutputRegister(base);
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
- codegen_->EmitMovwMovtPlaceholder(labels, base_reg);
-}
// Copy the result of a call into the given target.
void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, Primitive::Type type) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index ef809510ad..805a3f4366 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -538,9 +538,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
- Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+ void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -565,12 +566,11 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
};
PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
+ PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
dex::StringIndex string_index);
- PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset);
// Add a new baker read barrier patch and return the label to be bound
// before the BNE instruction.
@@ -765,10 +765,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
- // PC-relative patch info for each HArmDexCacheArraysBase.
- ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// PC-relative method patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
+ // PC-relative method patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 1978534112..b39d412ac2 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -40,10 +40,6 @@ namespace mips {
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = A0;
-// We'll maximize the range of a single load instruction for dex cache array accesses
-// by aligning offset -32768 with the offset of the first used element.
-static constexpr uint32_t kDexCacheArrayLwOffset = 0x8000;
-
Location MipsReturnLocation(Primitive::Type return_type) {
switch (return_type) {
case Primitive::kPrimBoolean:
@@ -1060,8 +1056,8 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -1602,14 +1598,12 @@ inline void CodeGeneratorMIPS::EmitPcRelativeLinkerPatches(
void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- pc_relative_dex_cache_patches_.size() +
pc_relative_method_patches_.size() +
+ method_bss_entry_patches_.size() +
pc_relative_type_patches_.size() +
type_bss_entry_patches_.size() +
pc_relative_string_patches_.size();
linker_patches->reserve(size);
- EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
- linker_patches);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
@@ -1623,6 +1617,8 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
DCHECK_EQ(size, linker_patches->size());
@@ -1635,6 +1631,13 @@ CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeMethodPa
&pc_relative_method_patches_);
}
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewMethodBssEntryPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &method_bss_entry_patches_);
+}
+
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
const DexFile& dex_file, dex::TypeIndex type_index) {
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
@@ -1650,11 +1653,6 @@ CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPa
return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeDexCacheArrayPatch(
- const DexFile& dex_file, uint32_t element_offset) {
- return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
-}
-
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativePatch(
const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
patches->emplace_back(dex_file, offset_or_index);
@@ -7000,7 +6998,7 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
// We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
- // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
+ // TODO: Create as many HMipsComputeBaseMethodAddress instructions as needed for methods
// with irreducible loops.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
bool is_r6 = GetInstructionSetFeatures().IsR6();
@@ -7016,12 +7014,12 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = false;
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
fallback_load = false;
break;
}
if (fallback_load) {
- desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
+ desired_string_load_kind = HLoadString::LoadKind::kRuntimeCall;
}
return desired_string_load_kind;
}
@@ -7030,6 +7028,8 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
// We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
+ // TODO: Create as many HMipsComputeBaseMethodAddress instructions as needed for methods
+ // with irreducible loops.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
bool is_r6 = GetInstructionSetFeatures().IsR6();
bool fallback_load = has_irreducible_loops && !is_r6;
@@ -7050,12 +7050,12 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = false;
break;
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
fallback_load = false;
break;
}
if (fallback_load) {
- desired_class_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ desired_class_load_kind = HLoadClass::LoadKind::kRuntimeCall;
}
return desired_class_load_kind;
}
@@ -7093,25 +7093,28 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticO
HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
// We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
// is incompatible with it.
+ // TODO: Create as many HMipsComputeBaseMethodAddress instructions as needed for methods
+ // with irreducible loops.
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
bool is_r6 = GetInstructionSetFeatures().IsR6();
bool fallback_load = has_irreducible_loops && !is_r6;
switch (dispatch_info.method_load_kind) {
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry:
break;
default:
fallback_load = false;
break;
}
if (fallback_load) {
- dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
+ dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall;
dispatch_info.method_load_data = 0;
}
return dispatch_info;
}
-void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
// All registers are assumed to be correctly set up per the calling convention.
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
@@ -7148,51 +7151,20 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- if (is_r6) {
- uint32_t offset = invoke->GetDexCacheArrayOffset();
- CodeGeneratorMIPS::PcRelativePatchInfo* info =
- NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
- bool reordering = __ SetReorder(false);
- EmitPcRelativeAddressPlaceholderHigh(info, TMP, ZERO);
- __ Lw(temp.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
- __ SetReorder(reordering);
- } else {
- HMipsDexCacheArraysBase* base =
- invoke->InputAt(invoke->GetSpecialInputIndex())->AsMipsDexCacheArraysBase();
- int32_t offset =
- invoke->GetDexCacheArrayOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
- __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
- }
- break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
- Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
- Register reg = temp.AsRegister<Register>();
- Register method_reg;
- if (current_method.IsRegister()) {
- method_reg = current_method.AsRegister<Register>();
- } else {
- // TODO: use the appropriate DCHECK() here if possible.
- // DCHECK(invoke->GetLocations()->Intrinsified());
- DCHECK(!current_method.IsValid());
- method_reg = reg;
- __ Lw(reg, SP, kCurrentMethodStackOffset);
- }
-
- // temp = temp->dex_cache_resolved_methods_;
- __ LoadFromOffset(kLoadWord,
- reg,
- method_reg,
- ArtMethod::DexCacheResolvedMethodsOffset(kMipsPointerSize).Int32Value());
- // temp = temp[index_in_cache];
- // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
- uint32_t index_in_cache = invoke->GetDexMethodIndex();
- __ LoadFromOffset(kLoadWord,
- reg,
- reg,
- CodeGenerator::GetCachePointerOffset(index_in_cache));
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
+ PcRelativePatchInfo* info = NewMethodBssEntryPatch(
+ MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
+ Register temp_reg = temp.AsRegister<Register>();
+ bool reordering = __ SetReorder(false);
+ EmitPcRelativeAddressPlaceholderHigh(info, TMP, base_reg);
+ __ Lw(temp_reg, TMP, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
+ GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
+ return; // No code pointer retrieval; the runtime performs the call directly.
+ }
}
switch (code_ptr_location) {
@@ -7211,6 +7183,8 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke
__ NopIfNoReordering();
break;
}
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
+
DCHECK(!IsLeafMethod());
}
@@ -7228,10 +7202,10 @@ void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDire
locations->HasTemps()
? locations->GetTemp(0)
: Location::NoLocation());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
-void CodeGeneratorMIPS::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
+void CodeGeneratorMIPS::GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
// Use the calling convention instead of the location of the receiver, as
// intrinsics may have put the receiver in a different register. In the intrinsics
// slow path, the arguments have been moved to the right place, so here we are
@@ -7263,6 +7237,7 @@ void CodeGeneratorMIPS::GenerateVirtualCall(HInvokeVirtual* invoke, Location tem
// T9();
__ Jalr(T9);
__ NopIfNoReordering();
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -7272,12 +7247,11 @@ void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(cls, loc, loc);
@@ -7331,7 +7305,7 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
// move.
void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
@@ -7350,7 +7324,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
case HLoadClass::LoadKind::kReferrersClass:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
break;
default:
@@ -7428,7 +7402,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
__ SetReorder(reordering);
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -7488,13 +7462,13 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
}
FALLTHROUGH_INTENDED;
// We need an extra register for PC-relative dex cache accesses.
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
locations->SetInAt(0, Location::RequiresRegister());
break;
default:
break;
}
- if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
} else {
@@ -7610,7 +7584,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
+ DCHECK(load_kind == HLoadString::LoadKind::kRuntimeCall);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
__ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
@@ -8732,29 +8706,11 @@ void InstructionCodeGeneratorMIPS::VisitMipsComputeBaseMethodAddress(
__ Nal();
// Grab the return address off RA.
__ Move(reg, RA);
- // TODO: Can we share this code with that of VisitMipsDexCacheArraysBase()?
// Remember this offset (the obtained PC value) for later use with constant area.
__ BindPcRelBaseLabel();
}
-void LocationsBuilderMIPS::VisitMipsDexCacheArraysBase(HMipsDexCacheArraysBase* base) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(base);
- locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS::VisitMipsDexCacheArraysBase(HMipsDexCacheArraysBase* base) {
- Register reg = base->GetLocations()->Out().AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
- CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
- bool reordering = __ SetReorder(false);
- // TODO: Reuse MipsComputeBaseMethodAddress on R2 instead of passing ZERO to force emitting NAL.
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info, reg, ZERO);
- __ Addiu(reg, reg, /* placeholder */ 0x5678);
- __ SetReorder(reordering);
-}
-
void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
// The trampoline uses the same calling convention as dex calling conventions,
// except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 736b5070d9..e72e838dd9 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -552,8 +552,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+ void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
@@ -583,12 +585,11 @@ class CodeGeneratorMIPS : public CodeGenerator {
};
PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
+ PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
dex::StringIndex string_index);
- PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base);
@@ -643,10 +644,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
- // PC-relative patch info for each HMipsDexCacheArraysBase.
- ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// PC-relative method patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
+ // PC-relative method patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 096139191e..e4f1cbd600 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -951,14 +951,14 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ assembler_(graph->GetArena(), &isa_features),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -1440,14 +1440,12 @@ inline void CodeGeneratorMIPS64::EmitPcRelativeLinkerPatches(
void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- pc_relative_dex_cache_patches_.size() +
pc_relative_method_patches_.size() +
+ method_bss_entry_patches_.size() +
pc_relative_type_patches_.size() +
type_bss_entry_patches_.size() +
pc_relative_string_patches_.size();
linker_patches->reserve(size);
- EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
- linker_patches);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
@@ -1461,6 +1459,8 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
DCHECK_EQ(size, linker_patches->size());
@@ -1473,6 +1473,13 @@ CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeMeth
&pc_relative_method_patches_);
}
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewMethodBssEntryPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &method_bss_entry_patches_);
+}
+
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeTypePatch(
const DexFile& dex_file, dex::TypeIndex type_index) {
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
@@ -1488,11 +1495,6 @@ CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeStri
return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
- const DexFile& dex_file, uint32_t element_offset) {
- return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
-}
-
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativePatch(
const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
patches->emplace_back(dex_file, offset_or_index);
@@ -4873,11 +4875,11 @@ HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kBootImageAddress:
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
break;
}
if (fallback_load) {
- desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
+ desired_string_load_kind = HLoadString::LoadKind::kRuntimeCall;
}
return desired_string_load_kind;
}
@@ -4899,11 +4901,11 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
break;
}
if (fallback_load) {
- desired_class_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ desired_class_load_kind = HLoadClass::LoadKind::kRuntimeCall;
}
return desired_class_load_kind;
}
@@ -4915,7 +4917,8 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStati
return desired_dispatch_info;
}
-void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
// All registers are assumed to be correctly set up per the calling convention.
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
@@ -4948,41 +4951,16 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
kLoadDoubleword,
DeduplicateUint64Literal(invoke->GetMethodAddress()));
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
- uint32_t offset = invoke->GetDexCacheArrayOffset();
- CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
+ PcRelativePatchInfo* info = NewMethodBssEntryPatch(
+ MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
- Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
- GpuRegister reg = temp.AsRegister<GpuRegister>();
- GpuRegister method_reg;
- if (current_method.IsRegister()) {
- method_reg = current_method.AsRegister<GpuRegister>();
- } else {
- // TODO: use the appropriate DCHECK() here if possible.
- // DCHECK(invoke->GetLocations()->Intrinsified());
- DCHECK(!current_method.IsValid());
- method_reg = reg;
- __ Ld(reg, SP, kCurrentMethodStackOffset);
- }
-
- // temp = temp->dex_cache_resolved_methods_;
- __ LoadFromOffset(kLoadDoubleword,
- reg,
- method_reg,
- ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
- // temp = temp[index_in_cache];
- // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
- uint32_t index_in_cache = invoke->GetDexMethodIndex();
- __ LoadFromOffset(kLoadDoubleword,
- reg,
- reg,
- CodeGenerator::GetCachePointerOffset(index_in_cache));
- break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
+ GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
+ return; // No code pointer retrieval; the runtime performs the call directly.
}
}
@@ -5002,6 +4980,8 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
__ Nop();
break;
}
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
+
DCHECK(!IsLeafMethod());
}
@@ -5019,10 +4999,10 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDi
locations->HasTemps()
? locations->GetTemp(0)
: Location::NoLocation());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
-void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
+void CodeGeneratorMIPS64::GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
// Use the calling convention instead of the location of the receiver, as
// intrinsics may have put the receiver in a different register. In the intrinsics
// slow path, the arguments have been moved to the right place, so here we are
@@ -5054,6 +5034,7 @@ void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t
// T9();
__ Jalr(T9);
__ Nop();
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -5063,12 +5044,11 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke)
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(cls, loc, loc);
@@ -5105,7 +5085,7 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
// move.
void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
@@ -5116,7 +5096,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
GpuRegister out = out_loc.AsRegister<GpuRegister>();
GpuRegister current_method_reg = ZERO;
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ load_kind == HLoadClass::LoadKind::kRuntimeCall) {
current_method_reg = locations->InAt(0).AsRegister<GpuRegister>();
}
@@ -5170,7 +5150,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
cls->GetClass()));
GenerateGcRootFieldLoad(cls, out_loc, out, 0, read_barrier_option);
break;
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -5219,7 +5199,7 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
- if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
} else {
@@ -5293,7 +5273,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
+ DCHECK(load_kind == HLoadString::LoadKind::kRuntimeCall);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
__ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 8405040386..6260c73614 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -314,6 +314,9 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
uint32_t num_entries,
HBasicBlock* switch_block,
HBasicBlock* default_block);
+ int32_t VecAddress(LocationSummary* locations,
+ size_t size,
+ /* out */ GpuRegister* adjusted_base);
Mips64Assembler* const assembler_;
CodeGeneratorMIPS64* const codegen_;
@@ -518,8 +521,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+ void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
@@ -546,12 +551,11 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
};
PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
+ PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
dex::StringIndex string_index);
- PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset);
PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
uint32_t method_index);
Literal* DeduplicateBootImageAddressLiteral(uint64_t address);
@@ -604,10 +608,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
// Deduplication map for 64-bit literals, used for non-patchable method address or method code
// address.
Uint64ToLiteralMap uint64_literals_;
- // PC-relative patch info.
- ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// PC-relative method patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
+ // PC-relative method patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index a41adca02c..f422b9fc8b 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -22,6 +22,8 @@ using namespace vixl::aarch64; // NOLINT(build/namespaces)
namespace art {
namespace arm64 {
+using helpers::ARM64EncodableConstantOrRegister;
+using helpers::Arm64CanEncodeConstantAsImmediate;
using helpers::DRegisterFrom;
using helpers::VRegisterFrom;
using helpers::HeapOperand;
@@ -34,6 +36,7 @@ using helpers::WRegisterFrom;
void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ HInstruction* input = instruction->InputAt(0);
switch (instruction->GetPackedType()) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -41,13 +44,19 @@ void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruc
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimLong:
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, ARM64EncodableConstantOrRegister(input, instruction));
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ if (input->IsConstant() &&
+ Arm64CanEncodeConstantAsImmediate(input->AsConstant(), instruction)) {
+ locations->SetInAt(0, Location::ConstantLocation(input->AsConstant()));
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ }
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -57,33 +66,58 @@ void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruc
void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
LocationSummary* locations = instruction->GetLocations();
+ Location src_loc = locations->InAt(0);
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
DCHECK_EQ(16u, instruction->GetVectorLength());
- __ Dup(dst.V16B(), InputRegisterAt(instruction, 0));
+ if (src_loc.IsConstant()) {
+ __ Movi(dst.V16B(), Int64ConstantFrom(src_loc));
+ } else {
+ __ Dup(dst.V16B(), InputRegisterAt(instruction, 0));
+ }
break;
case Primitive::kPrimChar:
case Primitive::kPrimShort:
DCHECK_EQ(8u, instruction->GetVectorLength());
- __ Dup(dst.V8H(), InputRegisterAt(instruction, 0));
+ if (src_loc.IsConstant()) {
+ __ Movi(dst.V8H(), Int64ConstantFrom(src_loc));
+ } else {
+ __ Dup(dst.V8H(), InputRegisterAt(instruction, 0));
+ }
break;
case Primitive::kPrimInt:
DCHECK_EQ(4u, instruction->GetVectorLength());
- __ Dup(dst.V4S(), InputRegisterAt(instruction, 0));
+ if (src_loc.IsConstant()) {
+ __ Movi(dst.V4S(), Int64ConstantFrom(src_loc));
+ } else {
+ __ Dup(dst.V4S(), InputRegisterAt(instruction, 0));
+ }
break;
case Primitive::kPrimLong:
DCHECK_EQ(2u, instruction->GetVectorLength());
- __ Dup(dst.V2D(), XRegisterFrom(locations->InAt(0)));
+ if (src_loc.IsConstant()) {
+ __ Movi(dst.V2D(), Int64ConstantFrom(src_loc));
+ } else {
+ __ Dup(dst.V2D(), XRegisterFrom(src_loc));
+ }
break;
case Primitive::kPrimFloat:
DCHECK_EQ(4u, instruction->GetVectorLength());
- __ Dup(dst.V4S(), VRegisterFrom(locations->InAt(0)).V4S(), 0);
+ if (src_loc.IsConstant()) {
+ __ Fmov(dst.V4S(), src_loc.GetConstant()->AsFloatConstant()->GetValue());
+ } else {
+ __ Dup(dst.V4S(), VRegisterFrom(src_loc).V4S(), 0);
+ }
break;
case Primitive::kPrimDouble:
DCHECK_EQ(2u, instruction->GetVectorLength());
- __ Dup(dst.V2D(), VRegisterFrom(locations->InAt(0)).V2D(), 0);
+ if (src_loc.IsConstant()) {
+ __ Fmov(dst.V2D(), src_loc.GetConstant()->AsDoubleConstant()->GetValue());
+ } else {
+ __ Dup(dst.V2D(), VRegisterFrom(src_loc).V2D(), 0);
+ }
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 50b95c17cb..0395db1df9 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -15,6 +15,7 @@
*/
#include "code_generator_mips64.h"
+#include "mirror/array-inl.h"
namespace art {
namespace mips64 {
@@ -22,12 +23,72 @@ namespace mips64 {
// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
+VectorRegister VectorRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister());
+ return static_cast<VectorRegister>(location.AsFpuRegister<FpuRegister>());
+}
+
void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ FillB(dst, locations->InAt(0).AsRegister<GpuRegister>());
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ FillH(dst, locations->InAt(0).AsRegister<GpuRegister>());
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FillW(dst, locations->InAt(0).AsRegister<GpuRegister>());
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FillD(dst, locations->InAt(0).AsRegister<GpuRegister>());
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ ReplicateFPToVectorRegister(dst,
+ locations->InAt(0).AsFpuRegister<FpuRegister>(),
+ /* is_double */ false);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ ReplicateFPToVectorRegister(dst,
+ locations->InAt(0).AsFpuRegister<FpuRegister>(),
+ /* is_double */ true);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
@@ -51,13 +112,23 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in
LocationSummary* locations = new (arena) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case Primitive::kPrimBoolean:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(),
+ instruction->IsVecNot() ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
+ break;
case Primitive::kPrimByte:
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK(locations);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(),
+ (instruction->IsVecNeg() || instruction->IsVecAbs())
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -70,7 +141,18 @@ void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ Primitive::Type from = instruction->GetInputType();
+ Primitive::Type to = instruction->GetResultType();
+ if (from == Primitive::kPrimInt && to == Primitive::kPrimFloat) {
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Ffint_sW(dst, src);
+ } else {
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) {
@@ -78,7 +160,45 @@ void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ FillB(dst, ZERO);
+ __ SubvB(dst, dst, src);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ FillH(dst, ZERO);
+ __ SubvH(dst, dst, src);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FillW(dst, ZERO);
+ __ SubvW(dst, dst, src);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FillD(dst, ZERO);
+ __ SubvD(dst, dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FillW(dst, ZERO);
+ __ FsubW(dst, dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FillD(dst, ZERO);
+ __ FsubD(dst, dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
@@ -86,7 +206,47 @@ void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ FillB(dst, ZERO); // all zeroes
+ __ Add_aB(dst, dst, src); // dst = abs(0) + abs(src)
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ FillH(dst, ZERO); // all zeroes
+ __ Add_aH(dst, dst, src); // dst = abs(0) + abs(src)
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FillW(dst, ZERO); // all zeroes
+ __ Add_aW(dst, dst, src); // dst = abs(0) + abs(src)
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FillD(dst, ZERO); // all zeroes
+ __ Add_aD(dst, dst, src); // dst = abs(0) + abs(src)
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ LdiW(dst, -1); // all ones
+ __ SrliW(dst, dst, 1);
+ __ AndV(dst, dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ LdiD(dst, -1); // all ones
+ __ SrliD(dst, dst, 1);
+ __ AndV(dst, dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
@@ -94,7 +254,30 @@ void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean: // special case boolean-not
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ LdiB(dst, 1);
+ __ XorV(dst, dst, src);
+ break;
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ NorV(dst, src, src); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector binary operations.
@@ -106,9 +289,12 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation*
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK(locations);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -121,7 +307,40 @@ void LocationsBuilderMIPS64::VisitVecAdd(HVecAdd* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ AddvB(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ AddvH(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ AddvW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ AddvD(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FaddW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FaddD(dst, lhs, rhs);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -129,7 +348,40 @@ void LocationsBuilderMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ instruction->IsRounded()
+ ? __ Aver_uB(dst, lhs, rhs)
+ : __ Ave_uB(dst, lhs, rhs);
+ } else {
+ instruction->IsRounded()
+ ? __ Aver_sB(dst, lhs, rhs)
+ : __ Ave_sB(dst, lhs, rhs);
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ instruction->IsRounded()
+ ? __ Aver_uH(dst, lhs, rhs)
+ : __ Ave_uH(dst, lhs, rhs);
+ } else {
+ instruction->IsRounded()
+ ? __ Aver_sH(dst, lhs, rhs)
+ : __ Ave_sH(dst, lhs, rhs);
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) {
@@ -137,7 +389,40 @@ void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ SubvB(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ SubvH(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ SubvW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ SubvD(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FsubW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FsubD(dst, lhs, rhs);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) {
@@ -145,7 +430,40 @@ void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ MulvB(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ MulvH(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ MulvW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ MulvD(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FmulW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FmulD(dst, lhs, rhs);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) {
@@ -153,7 +471,23 @@ void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ FdivW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ FdivD(dst, lhs, rhs);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecMin(HVecMin* instruction) {
@@ -161,7 +495,60 @@ void LocationsBuilderMIPS64::VisitVecMin(HVecMin* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Min_uB(dst, lhs, rhs);
+ } else {
+ __ Min_sB(dst, lhs, rhs);
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Min_uH(dst, lhs, rhs);
+ } else {
+ __ Min_sH(dst, lhs, rhs);
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Min_uW(dst, lhs, rhs);
+ } else {
+ __ Min_sW(dst, lhs, rhs);
+ }
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Min_uD(dst, lhs, rhs);
+ } else {
+ __ Min_sD(dst, lhs, rhs);
+ }
+ break;
+ // When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
+ // TODO: Fix min(x, NaN) cases for float and double.
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ FminW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ FminD(dst, lhs, rhs);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecMax(HVecMax* instruction) {
@@ -169,7 +556,60 @@ void LocationsBuilderMIPS64::VisitVecMax(HVecMax* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Max_uB(dst, lhs, rhs);
+ } else {
+ __ Max_sB(dst, lhs, rhs);
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Max_uH(dst, lhs, rhs);
+ } else {
+ __ Max_sH(dst, lhs, rhs);
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Max_uW(dst, lhs, rhs);
+ } else {
+ __ Max_sW(dst, lhs, rhs);
+ }
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Max_uD(dst, lhs, rhs);
+ } else {
+ __ Max_sD(dst, lhs, rhs);
+ }
+ break;
+ // When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
+ // TODO: Fix max(x, NaN) cases for float and double.
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ FmaxW(dst, lhs, rhs);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ FmaxD(dst, lhs, rhs);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) {
@@ -177,7 +617,27 @@ void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ AndV(dst, lhs, rhs); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
@@ -193,7 +653,27 @@ void LocationsBuilderMIPS64::VisitVecOr(HVecOr* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ OrV(dst, lhs, rhs); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) {
@@ -201,7 +681,27 @@ void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 16u);
+ __ XorV(dst, lhs, rhs); // lanes do not matter
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector shift operations.
@@ -213,7 +713,9 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation*
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimLong:
- DCHECK(locations);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -226,7 +728,32 @@ void LocationsBuilderMIPS64::VisitVecShl(HVecShl* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ SlliB(dst, lhs, value);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ SlliH(dst, lhs, value);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ SlliW(dst, lhs, value);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ SlliD(dst, lhs, value);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) {
@@ -234,7 +761,32 @@ void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ SraiB(dst, lhs, value);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ SraiH(dst, lhs, value);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ SraiW(dst, lhs, value);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ SraiD(dst, lhs, value);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) {
@@ -242,7 +794,32 @@ void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ SrliB(dst, lhs, value);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ SrliH(dst, lhs, value);
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ SrliW(dst, lhs, value);
+ break;
+ case Primitive::kPrimLong:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ SrliD(dst, lhs, value);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instr) {
@@ -253,20 +830,143 @@ void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccu
LOG(FATAL) << "No SIMD for " << instr->GetId();
}
+// Helper to set up locations for vector memory operations.
+static void CreateVecMemLocations(ArenaAllocator* arena,
+ HVecMemoryOperation* instruction,
+ bool is_load) {
+ LocationSummary* locations = new (arena) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (is_load) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
+}
+
+// Helper to prepare register and offset for vector memory operations. Returns the offset and sets
+// the output parameter adjusted_base to the original base or to a reserved temporary register (AT).
+int32_t InstructionCodeGeneratorMIPS64::VecAddress(LocationSummary* locations,
+ size_t size,
+ /* out */ GpuRegister* adjusted_base) {
+ GpuRegister base = locations->InAt(0).AsRegister<GpuRegister>();
+ Location index = locations->InAt(1);
+ int scale = TIMES_1;
+ switch (size) {
+ case 2: scale = TIMES_2; break;
+ case 4: scale = TIMES_4; break;
+ case 8: scale = TIMES_8; break;
+ default: break;
+ }
+ int32_t offset = mirror::Array::DataOffset(size).Int32Value();
+
+ if (index.IsConstant()) {
+ offset += index.GetConstant()->AsIntConstant()->GetValue() << scale;
+ __ AdjustBaseOffsetAndElementSizeShift(base, offset, scale);
+ *adjusted_base = base;
+ } else {
+ GpuRegister index_reg = index.AsRegister<GpuRegister>();
+ if (scale != TIMES_1) {
+ __ Dlsa(AT, index_reg, base, scale);
+ } else {
+ __ Daddu(AT, base, index_reg);
+ }
+ *adjusted_base = AT;
+ }
+ return offset;
+}
+
void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true);
}
void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ size_t size = Primitive::ComponentSize(instruction->GetPackedType());
+ VectorRegister reg = VectorRegisterFrom(locations->Out());
+ GpuRegister base;
+ int32_t offset = VecAddress(locations, size, &base);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ LdB(reg, base, offset);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ // Loading 8-bytes (needed if dealing with compressed strings in StringCharAt) from unaligned
+ // memory address may cause a trap to the kernel if the CPU doesn't directly support unaligned
+ // loads and stores.
+ // TODO: Implement support for StringCharAt.
+ DCHECK(!instruction->IsStringCharAt());
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ LdH(reg, base, offset);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ LdW(reg, base, offset);
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ LdD(reg, base, offset);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false);
}
void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ size_t size = Primitive::ComponentSize(instruction->GetPackedType());
+ VectorRegister reg = VectorRegisterFrom(locations->InAt(2));
+ GpuRegister base;
+ int32_t offset = VecAddress(locations, size, &base);
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ StB(reg, base, offset);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ StH(reg, base, offset);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ StW(reg, base, offset);
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ StD(reg, base, offset);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
#undef __
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 317ca71136..83a261d334 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1032,8 +1032,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
move_resolver_(graph->GetArena(), this),
assembler_(graph->GetArena()),
isa_features_(isa_features),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -2204,7 +2204,6 @@ void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -2228,7 +2227,6 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -4521,18 +4519,17 @@ Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr
// save one load. However, since this is just an intrinsic slow path we prefer this
// simple and more robust approach rather that trying to determine if that's the case.
SlowPathCode* slow_path = GetCurrentSlowPath();
- if (slow_path != nullptr) {
- if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
- int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
- __ movl(temp, Address(ESP, stack_offset));
- return temp;
- }
+ DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
+ if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
+ __ movl(temp, Address(ESP, stack_offset));
+ return temp;
}
return location.AsRegister<Register>();
}
-Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+void CodeGeneratorX86::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -4556,45 +4553,21 @@ Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
temp.AsRegister<Register>());
__ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
// Bind a new fixup label at the end of the "movl" insn.
- uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(
+ __ Bind(NewMethodBssEntryPatch(
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress(),
- invoke->GetDexFileForPcRelativeDexCache(),
- offset));
+ MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex())));
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
- Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
- Register method_reg;
- Register reg = temp.AsRegister<Register>();
- if (current_method.IsRegister()) {
- method_reg = current_method.AsRegister<Register>();
- } else {
- DCHECK(invoke->GetLocations()->Intrinsified());
- DCHECK(!current_method.IsValid());
- method_reg = reg;
- __ movl(reg, Address(ESP, kCurrentMethodStackOffset));
- }
- // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
- __ movl(reg, Address(method_reg,
- ArtMethod::DexCacheResolvedMethodsOffset(kX86PointerSize).Int32Value()));
- // temp = temp[index_in_cache];
- // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
- uint32_t index_in_cache = invoke->GetDexMethodIndex();
- __ movl(reg, Address(reg, CodeGenerator::GetCachePointerOffset(index_in_cache)));
- break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
+ GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
+ return; // No code pointer retrieval; the runtime performs the call directly.
}
}
- return callee_method;
-}
-
-void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
- Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
@@ -4607,11 +4580,13 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
kX86PointerSize).Int32Value()));
break;
}
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
DCHECK(!IsLeafMethod());
}
-void CodeGeneratorX86::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
+void CodeGeneratorX86::GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp_in, SlowPathCode* slow_path) {
Register temp = temp_in.AsRegister<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kX86PointerSize).Uint32Value();
@@ -4639,6 +4614,7 @@ void CodeGeneratorX86::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp
// call temp->GetEntryPoint();
__ call(Address(
temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
void CodeGeneratorX86::RecordBootMethodPatch(HInvokeStaticOrDirect* invoke) {
@@ -4651,6 +4627,16 @@ void CodeGeneratorX86::RecordBootMethodPatch(HInvokeStaticOrDirect* invoke) {
__ Bind(&boot_image_method_patches_.back().label);
}
+Label* CodeGeneratorX86::NewMethodBssEntryPatch(
+ HX86ComputeBaseMethodAddress* method_address,
+ MethodReference target_method) {
+ // Add the patch entry and bind its label at the end of the instruction.
+ method_bss_entry_patches_.emplace_back(method_address,
+ *target_method.dex_file,
+ target_method.dex_method_index);
+ return &method_bss_entry_patches_.back().label;
+}
+
void CodeGeneratorX86::RecordBootTypePatch(HLoadClass* load_class) {
HX86ComputeBaseMethodAddress* address = load_class->InputAt(0)->AsX86ComputeBaseMethodAddress();
boot_image_type_patches_.emplace_back(address,
@@ -4685,15 +4671,6 @@ Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
return &string_patches_.back().label;
}
-Label* CodeGeneratorX86::NewPcRelativeDexCacheArrayPatch(
- HX86ComputeBaseMethodAddress* method_address,
- const DexFile& dex_file,
- uint32_t element_offset) {
- // Add the patch entry and bind its label at the end of the instruction.
- pc_relative_dex_cache_patches_.emplace_back(method_address, dex_file, element_offset);
- return &pc_relative_dex_cache_patches_.back().label;
-}
-
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
@@ -4712,14 +4689,12 @@ inline void CodeGeneratorX86::EmitPcRelativeLinkerPatches(
void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- pc_relative_dex_cache_patches_.size() +
boot_image_method_patches_.size() +
+ method_bss_entry_patches_.size() +
boot_image_type_patches_.size() +
type_bss_entry_patches_.size() +
string_patches_.size();
linker_patches->reserve(size);
- EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
- linker_patches);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(boot_image_method_patches_,
linker_patches);
@@ -4731,6 +4706,8 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
DCHECK_EQ(size, linker_patches->size());
@@ -6066,7 +6043,7 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
break;
}
return desired_class_load_kind;
@@ -6074,7 +6051,7 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
@@ -6128,7 +6105,7 @@ Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
// move.
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
@@ -6188,7 +6165,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
case HLoadClass::LoadKind::kInvalid:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -6251,7 +6228,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kBootImageAddress:
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
break;
}
return desired_string_load_kind;
@@ -6265,7 +6242,7 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
load_kind == HLoadString::LoadKind::kBssEntry) {
locations->SetInAt(0, Location::RequiresRegister());
}
- if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(Location::RegisterLocation(EAX));
} else {
locations->SetOut(Location::RequiresRegister());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 21c527e8b0..f48753b614 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -408,19 +408,19 @@ class CodeGeneratorX86 : public CodeGenerator {
HInvokeStaticOrDirect* invoke) OVERRIDE;
// Generate a call to a static or direct method.
- Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
+ void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
// Generate a call to a virtual method.
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+ void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
void RecordBootMethodPatch(HInvokeStaticOrDirect* invoke);
+ Label* NewMethodBssEntryPatch(HX86ComputeBaseMethodAddress* method_address,
+ MethodReference target_method);
void RecordBootTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
- Label* NewPcRelativeDexCacheArrayPatch(HX86ComputeBaseMethodAddress* method_address,
- const DexFile& dex_file,
- uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex dex_index,
Handle<mirror::String> handle);
@@ -632,10 +632,10 @@ class CodeGeneratorX86 : public CodeGenerator {
X86Assembler assembler_;
const X86InstructionSetFeatures& isa_features_;
- // PC-relative DexCache access info.
- ArenaDeque<X86PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// PC-relative method patch info for kBootImageLinkTimePcRelative.
ArenaDeque<X86PcRelativePatchInfo> boot_image_method_patches_;
+ // PC-relative method patch info for kBssEntry.
+ ArenaDeque<X86PcRelativePatchInfo> method_bss_entry_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<X86PcRelativePatchInfo> boot_image_type_patches_;
// Type patch locations for kBssEntry.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 6b5e4d602d..7331a9e98e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -977,9 +977,10 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStati
return desired_dispatch_info;
}
-Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
// All registers are assumed to be correctly set up.
+
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -1001,44 +1002,19 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress());
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
__ movq(temp.AsRegister<CpuRegister>(),
Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
// Bind a new fixup label at the end of the "movl" insn.
- uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset));
+ __ Bind(NewMethodBssEntryPatch(
+ MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex())));
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
- Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
- Register method_reg;
- CpuRegister reg = temp.AsRegister<CpuRegister>();
- if (current_method.IsRegister()) {
- method_reg = current_method.AsRegister<Register>();
- } else {
- DCHECK(invoke->GetLocations()->Intrinsified());
- DCHECK(!current_method.IsValid());
- method_reg = reg.AsRegister();
- __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
- }
- // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
- __ movq(reg,
- Address(CpuRegister(method_reg),
- ArtMethod::DexCacheResolvedMethodsOffset(kX86_64PointerSize).SizeValue()));
- // temp = temp[index_in_cache];
- // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
- uint32_t index_in_cache = invoke->GetDexMethodIndex();
- __ movq(reg, Address(reg, CodeGenerator::GetCachePointerOffset(index_in_cache)));
- break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
+ GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
+ return; // No code pointer retrieval; the runtime performs the call directly.
}
}
- return callee_method;
-}
-
-void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
- // All registers are assumed to be correctly set up.
- Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
@@ -1051,11 +1027,13 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
kX86_64PointerSize).SizeValue()));
break;
}
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
DCHECK(!IsLeafMethod());
}
-void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
+void CodeGeneratorX86_64::GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp_in, SlowPathCode* slow_path) {
CpuRegister temp = temp_in.AsRegister<CpuRegister>();
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue();
@@ -1084,6 +1062,7 @@ void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t
// call temp->GetEntryPoint();
__ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64PointerSize).SizeValue()));
+ RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
void CodeGeneratorX86_64::RecordBootMethodPatch(HInvokeStaticOrDirect* invoke) {
@@ -1092,6 +1071,12 @@ void CodeGeneratorX86_64::RecordBootMethodPatch(HInvokeStaticOrDirect* invoke) {
__ Bind(&boot_image_method_patches_.back().label);
}
+Label* CodeGeneratorX86_64::NewMethodBssEntryPatch(MethodReference target_method) {
+ // Add a patch entry and return the label.
+ method_bss_entry_patches_.emplace_back(*target_method.dex_file, target_method.dex_method_index);
+ return &method_bss_entry_patches_.back().label;
+}
+
void CodeGeneratorX86_64::RecordBootTypePatch(HLoadClass* load_class) {
boot_image_type_patches_.emplace_back(load_class->GetDexFile(),
load_class->GetTypeIndex().index_);
@@ -1115,13 +1100,6 @@ Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
return &string_patches_.back().label;
}
-Label* CodeGeneratorX86_64::NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset) {
- // Add a patch entry and return the label.
- pc_relative_dex_cache_patches_.emplace_back(dex_file, element_offset);
- return &pc_relative_dex_cache_patches_.back().label;
-}
-
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
@@ -1140,14 +1118,12 @@ inline void CodeGeneratorX86_64::EmitPcRelativeLinkerPatches(
void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- pc_relative_dex_cache_patches_.size() +
boot_image_method_patches_.size() +
+ method_bss_entry_patches_.size() +
boot_image_type_patches_.size() +
type_bss_entry_patches_.size() +
string_patches_.size();
linker_patches->reserve(size);
- EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
- linker_patches);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(boot_image_method_patches_,
linker_patches);
@@ -1159,6 +1135,8 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
DCHECK_EQ(size, linker_patches->size());
@@ -1247,8 +1225,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
assembler_(graph->GetArena()),
isa_features_(isa_features),
constant_area_start_(0),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -2393,7 +2371,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDi
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
@@ -2417,7 +2394,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -5483,7 +5459,7 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
+ case HLoadClass::LoadKind::kRuntimeCall:
break;
}
return desired_class_load_kind;
@@ -5491,7 +5467,7 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
// Custom calling convention: RAX serves as both input and output.
CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
@@ -5542,7 +5518,7 @@ Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
// move.
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
@@ -5653,7 +5629,7 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kBootImageAddress:
- case HLoadString::LoadKind::kDexCacheViaMethod:
+ case HLoadString::LoadKind::kRuntimeCall:
break;
}
return desired_string_load_kind;
@@ -5662,7 +5638,7 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
- if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
+ if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(Location::RegisterLocation(RAX));
} else {
locations->SetOut(Location::RequiresRegister());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 3039e0519c..33c64290d4 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -404,16 +404,17 @@ class CodeGeneratorX86_64 : public CodeGenerator {
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
- Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+ void GenerateStaticOrDirectCall(
+ HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void GenerateVirtualCall(
+ HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
void RecordBootMethodPatch(HInvokeStaticOrDirect* invoke);
+ Label* NewMethodBssEntryPatch(MethodReference target_method);
void RecordBootTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
- Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex dex_index,
Handle<mirror::String> handle);
@@ -602,10 +603,10 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Used for fixups to the constant area.
int constant_area_start_;
- // PC-relative DexCache access info.
- ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
// PC-relative method patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PatchInfo<Label>> boot_image_method_patches_;
+ // PC-relative method patch info for kBssEntry.
+ ArenaDeque<PatchInfo<Label>> method_bss_entry_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
// Type patch locations for kBssEntry.
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 721f74eeee..e73fd7ddc8 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -234,9 +234,20 @@ inline vixl::aarch64::Operand OperandFromMemOperand(
}
}
-inline bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
- DCHECK(constant->IsIntConstant() || constant->IsLongConstant() || constant->IsNullConstant())
- << constant->DebugName();
+inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
+ int64_t value = CodeGenerator::GetInt64ValueOf(constant);
+
+ // TODO: Improve this when IsSIMDConstantEncodable method is implemented in VIXL.
+ if (instr->IsVecReplicateScalar()) {
+ if (constant->IsLongConstant()) {
+ return false;
+ } else if (constant->IsFloatConstant()) {
+ return vixl::aarch64::Assembler::IsImmFP32(constant->AsFloatConstant()->GetValue());
+ } else if (constant->IsDoubleConstant()) {
+ return vixl::aarch64::Assembler::IsImmFP64(constant->AsDoubleConstant()->GetValue());
+ }
+ return IsUint<8>(value);
+ }
// For single uses we let VIXL handle the constant generation since it will
// use registers that are not managed by the register allocator (wip0, wip1).
@@ -249,8 +260,6 @@ inline bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst
return true;
}
- int64_t value = CodeGenerator::GetInt64ValueOf(constant);
-
if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
// Uses logical operations.
return vixl::aarch64::Assembler::IsImmLogical(value, vixl::aarch64::kXRegSize);
@@ -276,7 +285,7 @@ inline bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst
inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
HInstruction* instr) {
if (constant->IsConstant()
- && CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
+ && Arm64CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
return Location::ConstantLocation(constant->AsConstant());
}
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
deleted file mode 100644
index 0c832a5c35..0000000000
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex_cache_array_fixups_arm.h"
-
-#include "base/arena_containers.h"
-#ifdef ART_USE_OLD_ARM_BACKEND
-#include "code_generator_arm.h"
-#include "intrinsics_arm.h"
-#else
-#include "code_generator_arm_vixl.h"
-#include "intrinsics_arm_vixl.h"
-#endif
-#include "utils/dex_cache_arrays_layout-inl.h"
-
-namespace art {
-namespace arm {
-#ifdef ART_USE_OLD_ARM_BACKEND
-typedef CodeGeneratorARM CodeGeneratorARMType;
-typedef IntrinsicLocationsBuilderARM IntrinsicLocationsBuilderARMType;
-#else
-typedef CodeGeneratorARMVIXL CodeGeneratorARMType;
-typedef IntrinsicLocationsBuilderARMVIXL IntrinsicLocationsBuilderARMType;
-#endif
-
-/**
- * Finds instructions that need the dex cache arrays base as an input.
- */
-class DexCacheArrayFixupsVisitor : public HGraphVisitor {
- public:
- DexCacheArrayFixupsVisitor(HGraph* graph, CodeGenerator* codegen)
- : HGraphVisitor(graph),
- codegen_(down_cast<CodeGeneratorARMType*>(codegen)),
- dex_cache_array_bases_(std::less<const DexFile*>(),
- // Attribute memory use to code generator.
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {}
-
- void MoveBasesIfNeeded() {
- for (const auto& entry : dex_cache_array_bases_) {
- // Bring the base closer to the first use (previously, it was in the
- // entry block) and relieve some pressure on the register allocator
- // while avoiding recalculation of the base in a loop.
- HArmDexCacheArraysBase* base = entry.second;
- base->MoveBeforeFirstUserAndOutOfLoops();
- }
- }
-
- private:
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- // If this is an invoke with PC-relative access to the dex cache methods array,
- // we need to add the dex cache arrays base as the special input.
- if (invoke->HasPcRelativeDexCache() &&
- !IsCallFreeIntrinsic<IntrinsicLocationsBuilderARMType>(invoke, codegen_)) {
- HArmDexCacheArraysBase* base =
- GetOrCreateDexCacheArrayBase(invoke, invoke->GetDexFileForPcRelativeDexCache());
- // Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
- base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
- // Add the special argument base to the method.
- DCHECK(!invoke->HasCurrentMethodInput());
- invoke->AddSpecialInput(base);
- }
- }
-
- HArmDexCacheArraysBase* GetOrCreateDexCacheArrayBase(HInstruction* cursor,
- const DexFile& dex_file) {
- if (GetGraph()->HasIrreducibleLoops()) {
- HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
- cursor->GetBlock()->InsertInstructionBefore(base, cursor);
- return base;
- } else {
- // Ensure we only initialize the pointer once for each dex file.
- auto lb = dex_cache_array_bases_.lower_bound(&dex_file);
- if (lb != dex_cache_array_bases_.end() &&
- !dex_cache_array_bases_.key_comp()(&dex_file, lb->first)) {
- return lb->second;
- }
-
- // Insert the base at the start of the entry block, move it to a better
- // position later in MoveBaseIfNeeded().
- HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
- HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
- entry_block->InsertInstructionBefore(base, entry_block->GetFirstInstruction());
- dex_cache_array_bases_.PutBefore(lb, &dex_file, base);
- return base;
- }
- }
-
- CodeGeneratorARMType* codegen_;
-
- using DexCacheArraysBaseMap =
- ArenaSafeMap<const DexFile*, HArmDexCacheArraysBase*, std::less<const DexFile*>>;
- DexCacheArraysBaseMap dex_cache_array_bases_;
-};
-
-void DexCacheArrayFixups::Run() {
- DexCacheArrayFixupsVisitor visitor(graph_, codegen_);
- visitor.VisitInsertionOrder();
- visitor.MoveBasesIfNeeded();
-}
-
-} // namespace arm
-} // namespace art
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.h b/compiler/optimizing/dex_cache_array_fixups_arm.h
deleted file mode 100644
index 9d67a319b9..0000000000
--- a/compiler/optimizing/dex_cache_array_fixups_arm.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_ARM_H_
-#define ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_ARM_H_
-
-#include "nodes.h"
-#include "optimization.h"
-
-namespace art {
-
-class CodeGenerator;
-
-namespace arm {
-
-class DexCacheArrayFixups : public HOptimization {
- public:
- DexCacheArrayFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, kDexCacheArrayFixupsArmPassName, stats),
- codegen_(codegen) {}
-
- static constexpr const char* kDexCacheArrayFixupsArmPassName = "dex_cache_array_fixups_arm";
-
- void Run() OVERRIDE;
-
- private:
- CodeGenerator* codegen_;
-};
-
-} // namespace arm
-} // namespace art
-
-#endif // ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_ARM_H_
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
deleted file mode 100644
index 7734f9197d..0000000000
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "code_generator_mips.h"
-#include "dex_cache_array_fixups_mips.h"
-
-#include "base/arena_containers.h"
-#include "intrinsics_mips.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
-
-namespace art {
-namespace mips {
-
-/**
- * Finds instructions that need the dex cache arrays base as an input.
- */
-class DexCacheArrayFixupsVisitor : public HGraphVisitor {
- public:
- explicit DexCacheArrayFixupsVisitor(HGraph* graph, CodeGenerator* codegen)
- : HGraphVisitor(graph),
- codegen_(down_cast<CodeGeneratorMIPS*>(codegen)),
- dex_cache_array_bases_(std::less<const DexFile*>(),
- // Attribute memory use to code generator.
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {}
-
- void MoveBasesIfNeeded() {
- for (const auto& entry : dex_cache_array_bases_) {
- // Bring the base closer to the first use (previously, it was in the
- // entry block) and relieve some pressure on the register allocator
- // while avoiding recalculation of the base in a loop.
- HMipsDexCacheArraysBase* base = entry.second;
- base->MoveBeforeFirstUserAndOutOfLoops();
- }
- // Computing the dex cache base for PC-relative accesses will clobber RA with
- // the NAL instruction on R2. Take a note of this before generating the method
- // entry.
- if (!dex_cache_array_bases_.empty()) {
- codegen_->ClobberRA();
- }
- }
-
- private:
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- // If this is an invoke with PC-relative access to the dex cache methods array,
- // we need to add the dex cache arrays base as the special input.
- if (invoke->HasPcRelativeDexCache() &&
- !IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
- // Initialize base for target method dex file if needed.
- HMipsDexCacheArraysBase* base =
- GetOrCreateDexCacheArrayBase(invoke->GetDexFileForPcRelativeDexCache());
- // Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
- base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
- // Add the special argument base to the method.
- DCHECK(!invoke->HasCurrentMethodInput());
- invoke->AddSpecialInput(base);
- }
- }
-
- HMipsDexCacheArraysBase* GetOrCreateDexCacheArrayBase(const DexFile& dex_file) {
- return dex_cache_array_bases_.GetOrCreate(
- &dex_file,
- [this, &dex_file]() {
- HMipsDexCacheArraysBase* base =
- new (GetGraph()->GetArena()) HMipsDexCacheArraysBase(dex_file);
- HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
- // Insert the base at the start of the entry block, move it to a better
- // position later in MoveBaseIfNeeded().
- entry_block->InsertInstructionBefore(base, entry_block->GetFirstInstruction());
- return base;
- });
- }
-
- CodeGeneratorMIPS* codegen_;
-
- using DexCacheArraysBaseMap =
- ArenaSafeMap<const DexFile*, HMipsDexCacheArraysBase*, std::less<const DexFile*>>;
- DexCacheArraysBaseMap dex_cache_array_bases_;
-};
-
-void DexCacheArrayFixups::Run() {
- CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen_);
- if (mips_codegen->GetInstructionSetFeatures().IsR6()) {
- // Do nothing for R6 because it has PC-relative addressing.
- return;
- }
- if (graph_->HasIrreducibleLoops()) {
- // Do not run this optimization, as irreducible loops do not work with an instruction
- // that can be live-in at the irreducible loop header.
- return;
- }
- DexCacheArrayFixupsVisitor visitor(graph_, codegen_);
- visitor.VisitInsertionOrder();
- visitor.MoveBasesIfNeeded();
-}
-
-} // namespace mips
-} // namespace art
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.h b/compiler/optimizing/dex_cache_array_fixups_mips.h
deleted file mode 100644
index 861a199d6c..0000000000
--- a/compiler/optimizing/dex_cache_array_fixups_mips.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_MIPS_H_
-#define ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_MIPS_H_
-
-#include "nodes.h"
-#include "optimization.h"
-
-namespace art {
-
-class CodeGenerator;
-
-namespace mips {
-
-class DexCacheArrayFixups : public HOptimization {
- public:
- DexCacheArrayFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, kDexCacheArrayFixupsMipsPassName, stats),
- codegen_(codegen) {}
-
- static constexpr const char* kDexCacheArrayFixupsMipsPassName = "dex_cache_array_fixups_mips";
-
- void Run() OVERRIDE;
-
- private:
- CodeGenerator* codegen_;
-};
-
-} // namespace mips
-} // namespace art
-
-#endif // ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_MIPS_H_
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 02816cf7ce..7dcf2440b2 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -34,6 +34,7 @@
#include "register_allocator_linear_scan.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
+#include "utils/intrusive_forward_list.h"
namespace art {
@@ -66,6 +67,13 @@ class StringList {
current->Dump(NewEntryStream());
}
}
+ // Construct StringList from a list of elements. The value type must provide method `Dump`.
+ template <typename Container>
+ explicit StringList(const Container& list, Format format = kArrayBrackets) : StringList(format) {
+ for (const typename Container::value_type& current : list) {
+ current.Dump(NewEntryStream());
+ }
+ }
std::ostream& NewEntryStream() {
if (is_empty_) {
@@ -584,8 +592,8 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
LiveInterval* interval = instruction->GetLiveInterval();
StartAttributeStream("ranges")
<< StringList(interval->GetFirstRange(), StringList::kSetBrackets);
- StartAttributeStream("uses") << StringList(interval->GetFirstUse());
- StartAttributeStream("env_uses") << StringList(interval->GetFirstEnvironmentUse());
+ StartAttributeStream("uses") << StringList(interval->GetUses());
+ StartAttributeStream("env_uses") << StringList(interval->GetEnvironmentUses());
StartAttributeStream("is_fixed") << interval->IsFixed();
StartAttributeStream("is_split") << interval->IsSplit();
StartAttributeStream("is_low") << interval->IsLowInterval();
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 88473f02e5..84b20f65e3 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -695,8 +695,8 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveOp(HLoopInform
/*fetch*/ nullptr,
type_);
default:
- CHECK(false) << op;
- break;
+ LOG(FATAL) << op;
+ UNREACHABLE();
}
}
}
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 7c833cf70c..c0ec58f824 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -1132,11 +1132,27 @@ bool InductionVarRange::GenerateLastValuePeriodic(HInductionVarAnalysis::Inducti
/*out*/bool* needs_taken_test) const {
DCHECK(info != nullptr);
DCHECK_EQ(info->induction_class, HInductionVarAnalysis::kPeriodic);
- // Count period.
+ // Count period and detect all-invariants.
int64_t period = 1;
- for (HInductionVarAnalysis::InductionInfo* p = info;
- p->induction_class == HInductionVarAnalysis::kPeriodic;
- p = p->op_b, ++period) {}
+ bool all_invariants = true;
+ HInductionVarAnalysis::InductionInfo* p = info;
+ for (; p->induction_class == HInductionVarAnalysis::kPeriodic; p = p->op_b, ++period) {
+ DCHECK_EQ(p->op_a->induction_class, HInductionVarAnalysis::kInvariant);
+ if (p->op_a->operation != HInductionVarAnalysis::kFetch) {
+ all_invariants = false;
+ }
+ }
+ DCHECK_EQ(p->induction_class, HInductionVarAnalysis::kInvariant);
+ if (p->operation != HInductionVarAnalysis::kFetch) {
+ all_invariants = false;
+ }
+ // Don't rely on FP arithmetic to be precise, unless the full period
+ // consist of pre-computed expressions only.
+ if (info->type == Primitive::kPrimFloat || info->type == Primitive::kPrimDouble) {
+ if (!all_invariants) {
+ return false;
+ }
+ }
// Handle any periodic(x, periodic(.., y)) for known maximum index value m.
int64_t m = 0;
if (IsConstant(trip->op_a, kExact, &m) && m >= 1) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9be6a512f5..142c95780e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -56,7 +56,7 @@ static constexpr size_t kMaximumNumberOfInstructionsForSmallMethod = 3;
// Limit the number of dex registers that we accumulate while inlining
// to avoid creating large amount of nested environments.
-static constexpr size_t kMaximumNumberOfCumulatedDexRegisters = 64;
+static constexpr size_t kMaximumNumberOfCumulatedDexRegisters = 32;
// Limit recursive call inlining, which do not benefit from too
// much inlining compared to code locality.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index df9e7164ed..a73b1246d8 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -888,7 +888,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
}
HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
- HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
0u
};
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index b1d2727e39..b664d41013 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -25,7 +25,7 @@
#include "mirror/dex_cache-inl.h"
#include "nodes.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 9803c9a0e9..ae5f8d1760 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -28,7 +28,7 @@
#include "mirror/reference.h"
#include "mirror/string.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils/arm/assembler_arm.h"
namespace art {
@@ -2624,58 +2624,6 @@ void IntrinsicCodeGeneratorARM::VisitDoubleIsInfinite(HInvoke* invoke) {
codegen_->GenerateConditionWithZero(kCondEQ, out, out);
}
-void IntrinsicLocationsBuilderARM::VisitReferenceGetReferent(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier) {
- // Do not intrinsify this call with the read barrier configuration.
- return;
- }
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorARM::VisitReferenceGetReferent(HInvoke* invoke) {
- DCHECK(!kEmitCompilerReadBarrier);
- ArmAssembler* const assembler = GetAssembler();
- LocationSummary* locations = invoke->GetLocations();
-
- Register obj = locations->InAt(0).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load ArtMethod first.
- HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(invoke_direct != nullptr);
- Register temp = codegen_->GenerateCalleeMethodStaticOrDirectCall(
- invoke_direct, locations->GetTemp(0)).AsRegister<Register>();
-
- // Now get declaring class.
- __ ldr(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
- uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
- DCHECK_NE(slow_path_flag_offset, 0u);
- DCHECK_NE(disable_flag_offset, 0u);
- DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
-
- // Check static flags that prevent using intrinsic.
- __ ldr(IP, Address(temp, disable_flag_offset));
- __ ldr(temp, Address(temp, slow_path_flag_offset));
- __ orr(IP, IP, ShifterOperand(temp));
- __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
-
- // Fast path.
- __ ldr(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(out);
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderARM::VisitIntegerValueOf(HInvoke* invoke) {
InvokeRuntimeCallingConvention calling_convention;
IntrinsicVisitor::ComputeIntegerValueOfLocations(
@@ -2782,6 +2730,7 @@ UNIMPLEMENTED_INTRINSIC(ARM, MathRoundDouble) // Could be done by changing rou
UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index b511c5a18d..37d79814be 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -28,7 +28,7 @@
#include "mirror/reference.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils/arm64/assembler_arm64.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
@@ -124,12 +124,12 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
// are no pools emitted.
vixl::EmissionCheckScope guard(codegen->GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- LocationFrom(kArtMethodRegister));
+ codegen->GenerateStaticOrDirectCall(
+ invoke_->AsInvokeStaticOrDirect(), LocationFrom(kArtMethodRegister), this);
} else {
- codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), LocationFrom(kArtMethodRegister));
+ codegen->GenerateVirtualCall(
+ invoke_->AsInvokeVirtual(), LocationFrom(kArtMethodRegister), this);
}
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
}
// Copy the result back to the expected output.
@@ -2897,69 +2897,6 @@ void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
}
-void IntrinsicLocationsBuilderARM64::VisitReferenceGetReferent(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier) {
- // Do not intrinsify this call with the read barrier configuration.
- return;
- }
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) {
- DCHECK(!kEmitCompilerReadBarrier);
- MacroAssembler* masm = GetVIXLAssembler();
- LocationSummary* locations = invoke->GetLocations();
-
- Register obj = InputRegisterAt(invoke, 0);
- Register out = OutputRegister(invoke);
-
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load ArtMethod first.
- HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(invoke_direct != nullptr);
- Register temp0 = XRegisterFrom(codegen_->GenerateCalleeMethodStaticOrDirectCall(
- invoke_direct, locations->GetTemp(0)));
-
- // Now get declaring class.
- __ Ldr(temp0.W(), MemOperand(temp0, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
- uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
- DCHECK_NE(slow_path_flag_offset, 0u);
- DCHECK_NE(disable_flag_offset, 0u);
- DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
-
- // Check static flags that prevent using intrinsic.
- if (slow_path_flag_offset == disable_flag_offset + 1) {
- // Load two adjacent flags in one 64-bit load.
- __ Ldr(temp0, MemOperand(temp0, disable_flag_offset));
- } else {
- UseScratchRegisterScope temps(masm);
- Register temp1 = temps.AcquireW();
- __ Ldr(temp1.W(), MemOperand(temp0, disable_flag_offset));
- __ Ldr(temp0.W(), MemOperand(temp0, slow_path_flag_offset));
- __ Orr(temp0, temp1, temp0);
- }
- __ Cbnz(temp0, slow_path->GetEntryLabel());
-
- {
- // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
- vixl::EmissionCheckScope guard(codegen_->GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- // Fast path.
- __ Ldr(out, HeapOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- }
- codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out);
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
InvokeRuntimeCallingConvention calling_convention;
IntrinsicVisitor::ComputeIntegerValueOfLocations(
@@ -3055,6 +2992,7 @@ void IntrinsicCodeGeneratorARM64::VisitThreadInterrupted(HInvoke* invoke) {
__ Bind(&done);
}
+UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 1a33b0ee01..3c9b613803 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -26,7 +26,7 @@
#include "mirror/reference.h"
#include "mirror/string.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "aarch32/constants-aarch32.h"
@@ -97,11 +97,10 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL {
Location method_loc = MoveArguments(codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), method_loc);
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), method_loc, this);
} else {
- codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), method_loc);
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), method_loc, this);
}
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -3000,60 +2999,6 @@ void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
codegen_->GenerateConditionWithZero(kCondEQ, out, out);
}
-void IntrinsicLocationsBuilderARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier) {
- // Do not intrinsify this call with the read barrier configuration.
- return;
- }
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) {
- DCHECK(!kEmitCompilerReadBarrier);
- ArmVIXLAssembler* assembler = GetAssembler();
- LocationSummary* locations = invoke->GetLocations();
-
- vixl32::Register obj = InputRegisterAt(invoke, 0);
- vixl32::Register out = OutputRegister(invoke);
-
- SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load ArtMethod first.
- HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(invoke_direct != nullptr);
- vixl32::Register temp0 = RegisterFrom(codegen_->GenerateCalleeMethodStaticOrDirectCall(
- invoke_direct, locations->GetTemp(0)));
-
- // Now get declaring class.
- __ Ldr(temp0, MemOperand(temp0, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
- uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
- DCHECK_NE(slow_path_flag_offset, 0u);
- DCHECK_NE(disable_flag_offset, 0u);
- DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
-
- // Check static flags that prevent using intrinsic.
- UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
- vixl32::Register temp1 = temps.Acquire();
- __ Ldr(temp1, MemOperand(temp0, disable_flag_offset));
- __ Ldr(temp0, MemOperand(temp0, slow_path_flag_offset));
- __ Orr(temp0, temp1, temp0);
- __ CompareAndBranchIfNonZero(temp0, slow_path->GetEntryLabel());
-
- // Fast path.
- __ Ldr(out, MemOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- assembler->MaybeUnpoisonHeapReference(out);
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
CreateFPToFPLocations(arena_, invoke);
@@ -3178,6 +3123,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 4731da1ea9..4cea6dfdfb 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -23,6 +23,7 @@
#include "intrinsics.h"
#include "mirror/array-inl.h"
#include "mirror/string.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "utils/mips/assembler_mips.h"
#include "utils/mips/constants_mips.h"
@@ -111,12 +112,12 @@ class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- Location::RegisterLocation(A0));
+ codegen->GenerateStaticOrDirectCall(
+ invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(A0), this);
} else {
- codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0));
+ codegen->GenerateVirtualCall(
+ invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0), this);
}
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 00afbcd8f2..d785567e0f 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -23,6 +23,7 @@
#include "intrinsics.h"
#include "mirror/array-inl.h"
#include "mirror/string.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "utils/mips64/assembler_mips64.h"
#include "utils/mips64/constants_mips64.h"
@@ -100,12 +101,12 @@ class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- Location::RegisterLocation(A0));
+ codegen->GenerateStaticOrDirectCall(
+ invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(A0), this);
} else {
- codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0));
+ codegen->GenerateVirtualCall(
+ invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0), this);
}
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index c1f9ae6425..8c69d9b643 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -56,11 +56,10 @@ class IntrinsicSlowPath : public SlowPathCode {
Location method_loc = MoveArguments(codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), method_loc);
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), method_loc, this);
} else {
- codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), method_loc);
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), method_loc, this);
}
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 57adcc3c2f..6b4851d541 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -31,7 +31,7 @@
#include "mirror/reference.h"
#include "mirror/string.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils/x86/assembler_x86.h"
#include "utils/x86/constants_x86.h"
@@ -796,7 +796,6 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke)
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(),
Location::RegisterLocation(EAX));
- codegen->RecordPcInfo(invoke, invoke->GetDexPc());
// Copy the result back to the expected output.
Location out = invoke->GetLocations()->Out();
@@ -2819,65 +2818,6 @@ void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke)
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-void IntrinsicLocationsBuilderX86::VisitReferenceGetReferent(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier) {
- // Do not intrinsify this call with the read barrier configuration.
- return;
- }
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) {
- DCHECK(!kEmitCompilerReadBarrier);
- LocationSummary* locations = invoke->GetLocations();
- X86Assembler* assembler = GetAssembler();
-
- Register obj = locations->InAt(0).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load ArtMethod first.
- HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(invoke_direct != nullptr);
- Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
- invoke_direct, locations->GetTemp(0));
- DCHECK(temp_loc.Equals(locations->GetTemp(0)));
- Register temp = temp_loc.AsRegister<Register>();
-
- // Now get declaring class.
- __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
- uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
- DCHECK_NE(slow_path_flag_offset, 0u);
- DCHECK_NE(disable_flag_offset, 0u);
- DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
-
- // Check static flags preventing us for using intrinsic.
- if (slow_path_flag_offset == disable_flag_offset + 1) {
- __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- } else {
- __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
-
- // Fast path.
- __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(out);
- __ Bind(slow_path->GetExitLabel());
-}
-
static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) {
return instruction->InputAt(input0) == instruction->InputAt(input1);
}
@@ -3429,6 +3369,7 @@ void IntrinsicCodeGeneratorX86::VisitThreadInterrupted(HInvoke* invoke) {
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 773383ef1b..ef98b7be30 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -31,7 +31,7 @@
#include "mirror/reference.h"
#include "mirror/string.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils/x86_64/assembler_x86_64.h"
#include "utils/x86_64/constants_x86_64.h"
@@ -567,7 +567,6 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86_64* codegen, HInvoke* invo
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(
invoke->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
- codegen->RecordPcInfo(invoke, invoke->GetDexPc());
// Copy the result back to the expected output.
Location out = invoke->GetLocations()->Out();
@@ -2959,65 +2958,6 @@ void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invok
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier) {
- // Do not intrinsify this call with the read barrier configuration.
- return;
- }
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
- DCHECK(!kEmitCompilerReadBarrier);
- LocationSummary* locations = invoke->GetLocations();
- X86_64Assembler* assembler = GetAssembler();
-
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load ArtMethod first.
- HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(invoke_direct != nullptr);
- Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
- invoke_direct, locations->GetTemp(0));
- DCHECK(temp_loc.Equals(locations->GetTemp(0)));
- CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
-
- // Now get declaring class.
- __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
- uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
- DCHECK_NE(slow_path_flag_offset, 0u);
- DCHECK_NE(disable_flag_offset, 0u);
- DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
-
- // Check static flags preventing us for using intrinsic.
- if (slow_path_flag_offset == disable_flag_offset + 1) {
- __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- } else {
- __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
-
- // Fast path.
- __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(out);
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
InvokeRuntimeCallingConvention calling_convention;
IntrinsicVisitor::ComputeIntegerValueOfLocations(
@@ -3106,6 +3046,7 @@ void IntrinsicCodeGeneratorX86_64::VisitThreadInterrupted(HInvoke* invoke) {
__ Bind(&done);
}
+UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 94787c99b2..9c8a632d40 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -499,6 +499,7 @@ void HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
body = it.Current();
}
}
+ CHECK(body != nullptr);
// Ensure there is only a single exit point.
if (header->GetSuccessors().size() != 2) {
return;
@@ -811,6 +812,11 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
}
return true;
} else if (instruction->IsArrayGet()) {
+ // Deal with vector restrictions.
+ if (instruction->AsArrayGet()->IsStringCharAt() &&
+ HasVectorRestrictions(restrictions, kNoStringCharAt)) {
+ return false;
+ }
// Accept a right-hand-side array base[index] for
// (1) exact matching vector type,
// (2) loop-invariant base,
@@ -1072,9 +1078,36 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric
}
return false;
case kMips:
- case kMips64:
// TODO: implement MIPS SIMD.
return false;
+ case kMips64:
+ if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ *restrictions |= kNoDiv;
+ return TrySetVectorLength(16);
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ *restrictions |= kNoDiv | kNoStringCharAt;
+ return TrySetVectorLength(8);
+ case Primitive::kPrimInt:
+ *restrictions |= kNoDiv;
+ return TrySetVectorLength(4);
+ case Primitive::kPrimLong:
+ *restrictions |= kNoDiv;
+ return TrySetVectorLength(2);
+ case Primitive::kPrimFloat:
+ *restrictions |= kNoMinMax; // min/max(x, NaN)
+ return TrySetVectorLength(4);
+ case Primitive::kPrimDouble:
+ *restrictions |= kNoMinMax; // min/max(x, NaN)
+ return TrySetVectorLength(2);
+ default:
+ break;
+ } // switch type
+ }
+ return false;
default:
return false;
} // switch instruction set
@@ -1270,9 +1303,10 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
// corresponding new scalar instructions in the loop. The instruction will get an
// environment while being inserted from the instruction map in original program order.
DCHECK(vector_mode_ == kSequential);
+ size_t num_args = invoke->GetNumberOfArguments();
HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect(
global_allocator_,
- invoke->GetNumberOfArguments(),
+ num_args,
invoke->GetType(),
invoke->GetDexPc(),
invoke->GetDexMethodIndex(),
@@ -1282,8 +1316,14 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
invoke->GetTargetMethod(),
invoke->GetClinitCheckRequirement());
HInputsRef inputs = invoke->GetInputs();
- for (size_t index = 0; index < inputs.size(); ++index) {
- new_invoke->SetArgumentAt(index, vector_map_->Get(inputs[index]));
+ size_t num_inputs = inputs.size();
+ DCHECK_LE(num_args, num_inputs);
+ DCHECK_EQ(num_inputs, new_invoke->GetInputs().size()); // both invokes agree
+ for (size_t index = 0; index < num_inputs; ++index) {
+ HInstruction* new_input = index < num_args
+ ? vector_map_->Get(inputs[index])
+ : inputs[index]; // beyond arguments: just pass through
+ new_invoke->SetArgumentAt(index, new_input);
}
new_invoke->SetIntrinsic(invoke->GetIntrinsic(),
kNeedsEnvironmentOrCache,
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 35298d4076..75a42f3297 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -72,6 +72,7 @@ class HLoopOptimization : public HOptimization {
kNoUnroundedHAdd = 64, // no unrounded halving add
kNoAbs = 128, // no absolute value
kNoMinMax = 256, // no min/max
+ kNoStringCharAt = 512, // no StringCharAt
};
/*
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 689991010e..d0047c54f2 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2623,7 +2623,7 @@ const DexFile& HInvokeStaticOrDirect::GetDexFileForPcRelativeDexCache() const {
}
bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
- if (GetMethodLoadKind() != MethodLoadKind::kDexCacheViaMethod) {
+ if (GetMethodLoadKind() != MethodLoadKind::kRuntimeCall) {
return false;
}
if (!IsIntrinsic()) {
@@ -2643,10 +2643,10 @@ std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind
return os << "BootImageLinkTimePcRelative";
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
return os << "DirectAddress";
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- return os << "DexCachePcRelative";
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
- return os << "DexCacheViaMethod";
+ case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry:
+ return os << "BssEntry";
+ case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
+ return os << "RuntimeCall";
default:
LOG(FATAL) << "Unknown MethodLoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
@@ -2690,7 +2690,7 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
void HLoadClass::SetLoadKind(LoadKind load_kind) {
SetPackedField<LoadKindField>(load_kind);
- if (load_kind != LoadKind::kDexCacheViaMethod &&
+ if (load_kind != LoadKind::kRuntimeCall &&
load_kind != LoadKind::kReferrersClass) {
RemoveAsUserOfInput(0u);
SetRawInputAt(0u, nullptr);
@@ -2714,8 +2714,8 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
return os << "BssEntry";
case HLoadClass::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
- case HLoadClass::LoadKind::kDexCacheViaMethod:
- return os << "DexCacheViaMethod";
+ case HLoadClass::LoadKind::kRuntimeCall:
+ return os << "RuntimeCall";
default:
LOG(FATAL) << "Unknown HLoadClass::LoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
@@ -2743,10 +2743,10 @@ bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
void HLoadString::SetLoadKind(LoadKind load_kind) {
// Once sharpened, the load kind should not be changed again.
- DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(GetLoadKind(), LoadKind::kRuntimeCall);
SetPackedField<LoadKindField>(load_kind);
- if (load_kind != LoadKind::kDexCacheViaMethod) {
+ if (load_kind != LoadKind::kRuntimeCall) {
RemoveAsUserOfInput(0u);
SetRawInputAt(0u, nullptr);
}
@@ -2766,8 +2766,8 @@ std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs) {
return os << "BssEntry";
case HLoadString::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
- case HLoadString::LoadKind::kDexCacheViaMethod:
- return os << "DexCacheViaMethod";
+ case HLoadString::LoadKind::kRuntimeCall:
+ return os << "RuntimeCall";
default:
LOG(FATAL) << "Unknown HLoadString::LoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4d96fbe24c..ffa16dd787 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1410,12 +1410,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(IntermediateAddressIndex, Instruction)
#endif
-#ifndef ART_ENABLE_CODEGEN_arm
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
-#else
-#define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) \
- M(ArmDexCacheArraysBase, Instruction)
-#endif
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
@@ -1424,7 +1419,6 @@ class HLoopInformationOutwardIterator : public ValueObject {
#else
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
M(MipsComputeBaseMethodAddress, Instruction) \
- M(MipsDexCacheArraysBase, Instruction) \
M(MipsPackedSwitch, Instruction)
#endif
@@ -1485,7 +1479,8 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
H##type* As##type() { return this; }
template <typename T>
-class HUseListNode : public ArenaObject<kArenaAllocUseListNode> {
+class HUseListNode : public ArenaObject<kArenaAllocUseListNode>,
+ public IntrusiveForwardListNode<HUseListNode<T>> {
public:
// Get the instruction which has this use as one of the inputs.
T GetUser() const { return user_; }
@@ -1494,10 +1489,6 @@ class HUseListNode : public ArenaObject<kArenaAllocUseListNode> {
// Set the position of the input record that this use corresponds to.
void SetIndex(size_t index) { index_ = index; }
- // Hook for the IntrusiveForwardList<>.
- // TODO: Hide this better.
- IntrusiveForwardListHook hook;
-
private:
HUseListNode(T user, size_t index)
: user_(user), index_(index) {}
@@ -1790,7 +1781,7 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
uint32_t dex_pc,
HInstruction* holder)
: vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
- locations_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentLocations)),
+ locations_(arena->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
method_(method),
dex_pc_(dex_pc),
@@ -1804,6 +1795,11 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
to_copy.GetDexPc(),
holder) {}
+ void AllocateLocations() {
+ DCHECK(locations_.empty());
+ locations_.resize(vregs_.size());
+ }
+
void SetAndCopyParentChain(ArenaAllocator* allocator, HEnvironment* parent) {
if (parent_ != nullptr) {
parent_->SetAndCopyParentChain(allocator, parent);
@@ -4161,17 +4157,13 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
// Used for app->boot calls with non-relocatable image and for JIT-compiled calls.
kDirectAddress,
- // Load from resolved methods array in the dex cache using a PC-relative load.
- // Used when we need to use the dex cache, for example for invoke-static that
- // may cause class initialization (the entry may point to a resolution method),
- // and we know that we can access the dex cache arrays using a PC-relative load.
- kDexCachePcRelative,
-
- // Use ArtMethod* from the resolved methods of the compiled method's own ArtMethod*.
- // Used for JIT when we need to use the dex cache. This is also the last-resort-kind
- // used when other kinds are unavailable (say, dex cache arrays are not PC-relative)
- // or unimplemented or impractical (i.e. slow) on a particular architecture.
- kDexCacheViaMethod,
+ // Load from an entry in the .bss section using a PC-relative load.
+ // Used for classes outside boot image when .bss is accessible with a PC-relative load.
+ kBssEntry,
+
+ // Make a runtime call to resolve and call the method. This is the last-resort-kind
+ // used when other kinds are unimplemented on a particular architecture.
+ kRuntimeCall,
};
// Determines the location of the code pointer.
@@ -4192,7 +4184,6 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
// - thread entrypoint offset for kStringInit method if this is a string init invoke.
// Note that there are multiple string init methods, each having its own offset.
// - the method address for kDirectAddress
- // - the dex cache arrays offset for kDexCachePcRel.
uint64_t method_load_data;
};
@@ -4293,12 +4284,9 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
- bool HasPcRelativeDexCache() const {
- return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
- }
bool HasPcRelativeMethodLoadKind() const {
return GetMethodLoadKind() == MethodLoadKind::kBootImageLinkTimePcRelative ||
- GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
+ GetMethodLoadKind() == MethodLoadKind::kBssEntry;
}
bool HasCurrentMethodInput() const {
// This function can be called only after the invoke has been fully initialized by the builder.
@@ -4322,11 +4310,6 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
return dispatch_info_.method_load_data;
}
- uint32_t GetDexCacheArrayOffset() const {
- DCHECK(HasPcRelativeDexCache());
- return dispatch_info_.method_load_data;
- }
-
const DexFile& GetDexFileForPcRelativeDexCache() const;
ClinitCheckRequirement GetClinitCheckRequirement() const {
@@ -4371,7 +4354,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
// Does this method load kind need the current method as an input?
static bool NeedsCurrentMethodInput(MethodLoadKind kind) {
- return kind == MethodLoadKind::kRecursive || kind == MethodLoadKind::kDexCacheViaMethod;
+ return kind == MethodLoadKind::kRecursive || kind == MethodLoadKind::kRuntimeCall;
}
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
@@ -5687,12 +5670,11 @@ class HLoadClass FINAL : public HInstruction {
// Load from the root table associated with the JIT compiled method.
kJitTableAddress,
- // Load from resolved types array accessed through the class loaded from
- // the compiled method's own ArtMethod*. This is the default access type when
- // all other types are unavailable.
- kDexCacheViaMethod,
+ // Load using a simple runtime call. This is the fall-back load kind when
+ // the codegen is unable to use another appropriate kind.
+ kRuntimeCall,
- kLast = kDexCacheViaMethod
+ kLast = kRuntimeCall
};
HLoadClass(HCurrentMethod* current_method,
@@ -5713,7 +5695,7 @@ class HLoadClass FINAL : public HInstruction {
DCHECK(!is_referrers_class || !needs_access_check);
SetPackedField<LoadKindField>(
- is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kDexCacheViaMethod);
+ is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kRuntimeCall);
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagIsInBootImage>(false);
SetPackedFlag<kFlagGenerateClInitCheck>(false);
@@ -5747,7 +5729,7 @@ class HLoadClass FINAL : public HInstruction {
bool CanCallRuntime() const {
return NeedsAccessCheck() ||
MustGenerateClinitCheck() ||
- GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ GetLoadKind() == LoadKind::kRuntimeCall ||
GetLoadKind() == LoadKind::kBssEntry;
}
@@ -5757,7 +5739,7 @@ class HLoadClass FINAL : public HInstruction {
// If the class is in the boot image, the lookup in the runtime call cannot throw.
// This keeps CanThrow() consistent between non-PIC (using kBootImageAddress) and
// PIC and subsequently avoids a DCE behavior dependency on the PIC option.
- ((GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ ((GetLoadKind() == LoadKind::kRuntimeCall ||
GetLoadKind() == LoadKind::kBssEntry) &&
!IsInBootImage());
}
@@ -5776,7 +5758,7 @@ class HLoadClass FINAL : public HInstruction {
const DexFile& GetDexFile() const { return dex_file_; }
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
- return GetLoadKind() == LoadKind::kDexCacheViaMethod;
+ return GetLoadKind() == LoadKind::kRuntimeCall;
}
static SideEffects SideEffectsForArchRuntimeCalls() {
@@ -5827,12 +5809,12 @@ class HLoadClass FINAL : public HInstruction {
return load_kind == LoadKind::kReferrersClass ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
load_kind == LoadKind::kBssEntry ||
- load_kind == LoadKind::kDexCacheViaMethod;
+ load_kind == LoadKind::kRuntimeCall;
}
void SetLoadKindInternal(LoadKind load_kind);
- // The special input is the HCurrentMethod for kDexCacheViaMethod or kReferrersClass.
+ // The special input is the HCurrentMethod for kRuntimeCall or kReferrersClass.
// For other load kinds it's empty or possibly some architecture-specific instruction
// for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
@@ -5841,7 +5823,7 @@ class HLoadClass FINAL : public HInstruction {
// - The compiling method's dex file if the class is defined there too.
// - The compiling method's dex file if the class is referenced there.
// - The dex file where the class is defined. When the load kind can only be
- // kBssEntry or kDexCacheViaMethod, we cannot emit code for this `HLoadClass`.
+ // kBssEntry or kRuntimeCall, we cannot emit code for this `HLoadClass`.
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
@@ -5884,12 +5866,11 @@ class HLoadString FINAL : public HInstruction {
// Load from the root table associated with the JIT compiled method.
kJitTableAddress,
- // Load from resolved strings array accessed through the class loaded from
- // the compiled method's own ArtMethod*. This is the default access type when
- // all other types are unavailable.
- kDexCacheViaMethod,
+ // Load using a simple runtime call. This is the fall-back load kind when
+ // the codegen is unable to use another appropriate kind.
+ kRuntimeCall,
- kLast = kDexCacheViaMethod,
+ kLast = kRuntimeCall,
};
HLoadString(HCurrentMethod* current_method,
@@ -5900,7 +5881,7 @@ class HLoadString FINAL : public HInstruction {
special_input_(HUserRecord<HInstruction*>(current_method)),
string_index_(string_index),
dex_file_(dex_file) {
- SetPackedField<LoadKindField>(LoadKind::kDexCacheViaMethod);
+ SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
}
void SetLoadKind(LoadKind load_kind);
@@ -5944,7 +5925,7 @@ class HLoadString FINAL : public HInstruction {
}
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
- return GetLoadKind() == LoadKind::kDexCacheViaMethod;
+ return GetLoadKind() == LoadKind::kRuntimeCall;
}
bool CanBeNull() const OVERRIDE { return false; }
@@ -5978,7 +5959,7 @@ class HLoadString FINAL : public HInstruction {
void SetLoadKindInternal(LoadKind load_kind);
- // The special input is the HCurrentMethod for kDexCacheViaMethod.
+ // The special input is the HCurrentMethod for kRuntimeCall.
// For other load kinds it's empty or possibly some architecture-specific instruction
// for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
@@ -6878,9 +6859,6 @@ class HParallelMove FINAL : public HTemplateInstruction<0> {
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
#include "nodes_shared.h"
#endif
-#ifdef ART_ENABLE_CODEGEN_arm
-#include "nodes_arm.h"
-#endif
#ifdef ART_ENABLE_CODEGEN_mips
#include "nodes_mips.h"
#endif
diff --git a/compiler/optimizing/nodes_arm.h b/compiler/optimizing/nodes_arm.h
deleted file mode 100644
index d9f9740e73..0000000000
--- a/compiler/optimizing/nodes_arm.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_NODES_ARM_H_
-#define ART_COMPILER_OPTIMIZING_NODES_ARM_H_
-
-namespace art {
-
-class HArmDexCacheArraysBase FINAL : public HExpression<0> {
- public:
- explicit HArmDexCacheArraysBase(const DexFile& dex_file)
- : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc),
- dex_file_(&dex_file),
- element_offset_(static_cast<size_t>(-1)) { }
-
- bool CanBeMoved() const OVERRIDE { return true; }
-
- void UpdateElementOffset(size_t element_offset) {
- // Use the lowest offset from the requested elements so that all offsets from
- // this base are non-negative because our assemblers emit negative-offset loads
- // as a sequence of two or more instructions. (However, positive offsets beyond
- // 4KiB also require two or more instructions, so this simple heuristic could
- // be improved for cases where there is a dense cluster of elements far from
- // the lowest offset. This is expected to be rare enough though, so we choose
- // not to spend compile time on elaborate calculations.)
- element_offset_ = std::min(element_offset_, element_offset);
- }
-
- const DexFile& GetDexFile() const {
- return *dex_file_;
- }
-
- size_t GetElementOffset() const {
- return element_offset_;
- }
-
- DECLARE_INSTRUCTION(ArmDexCacheArraysBase);
-
- private:
- const DexFile* dex_file_;
- size_t element_offset_;
-
- DISALLOW_COPY_AND_ASSIGN(HArmDexCacheArraysBase);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_OPTIMIZING_NODES_ARM_H_
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index 36431c1fb9..8e439d9621 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -34,38 +34,6 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HMipsComputeBaseMethodAddress);
};
-class HMipsDexCacheArraysBase : public HExpression<0> {
- public:
- explicit HMipsDexCacheArraysBase(const DexFile& dex_file)
- : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc),
- dex_file_(&dex_file),
- element_offset_(static_cast<size_t>(-1)) { }
-
- bool CanBeMoved() const OVERRIDE { return true; }
-
- void UpdateElementOffset(size_t element_offset) {
- // We'll maximize the range of a single load instruction for dex cache array accesses
- // by aligning offset -32768 with the offset of the first used element.
- element_offset_ = std::min(element_offset_, element_offset);
- }
-
- const DexFile& GetDexFile() const {
- return *dex_file_;
- }
-
- size_t GetElementOffset() const {
- return element_offset_;
- }
-
- DECLARE_INSTRUCTION(MipsDexCacheArraysBase);
-
- private:
- const DexFile* dex_file_;
- size_t element_offset_;
-
- DISALLOW_COPY_AND_ASSIGN(HMipsDexCacheArraysBase);
-};
-
// Mips version of HPackedSwitch that holds a pointer to the base method address.
class HMipsPackedSwitch FINAL : public HTemplateInstruction<2> {
public:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index e5ab00bce3..890ba674b5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -24,16 +24,11 @@
#include "android-base/strings.h"
-#ifdef ART_ENABLE_CODEGEN_arm
-#include "dex_cache_array_fixups_arm.h"
-#endif
-
#ifdef ART_ENABLE_CODEGEN_arm64
#include "instruction_simplifier_arm64.h"
#endif
#ifdef ART_ENABLE_CODEGEN_mips
-#include "dex_cache_array_fixups_mips.h"
#include "pc_relative_fixups_mips.h"
#endif
@@ -522,8 +517,6 @@ static HOptimization* BuildOptimization(
} else if (opt_name == CodeSinking::kCodeSinkingPassName) {
return new (arena) CodeSinking(graph, stats);
#ifdef ART_ENABLE_CODEGEN_arm
- } else if (opt_name == arm::DexCacheArrayFixups::kDexCacheArrayFixupsArmPassName) {
- return new (arena) arm::DexCacheArrayFixups(graph, codegen, stats);
} else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
return new (arena) arm::InstructionSimplifierArm(graph, stats);
#endif
@@ -532,8 +525,6 @@ static HOptimization* BuildOptimization(
return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- } else if (opt_name == mips::DexCacheArrayFixups::kDexCacheArrayFixupsMipsPassName) {
- return new (arena) mips::DexCacheArrayFixups(graph, codegen, stats);
} else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
#endif
@@ -641,8 +632,6 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#if defined(ART_ENABLE_CODEGEN_arm)
case kThumb2:
case kArm: {
- arm::DexCacheArrayFixups* fixups =
- new (arena) arm::DexCacheArrayFixups(graph, codegen, stats);
arm::InstructionSimplifierArm* simplifier =
new (arena) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
@@ -653,7 +642,6 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
simplifier,
side_effects,
gvn,
- fixups,
scheduling,
};
RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer);
@@ -682,11 +670,8 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
case kMips: {
mips::PcRelativeFixups* pc_relative_fixups =
new (arena) mips::PcRelativeFixups(graph, codegen, stats);
- mips::DexCacheArrayFixups* dex_cache_array_fixups =
- new (arena) mips::DexCacheArrayFixups(graph, codegen, stats);
HOptimization* mips_optimizations[] = {
pc_relative_fixups,
- dex_cache_array_fixups
};
RunOptimizations(mips_optimizations, arraysize(mips_optimizations), pass_observer);
break;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index bce54bf49a..21b645279e 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -59,10 +59,9 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- // If this is an invoke with PC-relative pointer to a method,
+ // If this is an invoke with PC-relative load kind,
// we need to add the base as the special input.
- if (invoke->GetMethodLoadKind() ==
- HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative &&
+ if (invoke->HasPcRelativeMethodLoadKind() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
InitializePCRelativeBasePointer();
// Add the special argument base to the method.
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index c6a0b6a0d2..ce3a4966aa 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -308,8 +308,10 @@ void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
}
InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc);
}
- UsePosition* use = current->GetFirstUse();
- EnvUsePosition* env_use = current->GetFirstEnvironmentUse();
+ UsePositionList::const_iterator use_it = current->GetUses().begin();
+ const UsePositionList::const_iterator use_end = current->GetUses().end();
+ EnvUsePositionList::const_iterator env_use_it = current->GetEnvironmentUses().begin();
+ const EnvUsePositionList::const_iterator env_use_end = current->GetEnvironmentUses().end();
// Walk over all siblings, updating locations of use positions, and
// connecting them when they are adjacent.
@@ -321,43 +323,47 @@ void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
LiveRange* range = current->GetFirstRange();
while (range != nullptr) {
- while (use != nullptr && use->GetPosition() < range->GetStart()) {
- DCHECK(use->IsSynthesized());
- use = use->GetNext();
- }
- while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
- DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
- if (!use->IsSynthesized()) {
- LocationSummary* locations = use->GetUser()->GetLocations();
- Location expected_location = locations->InAt(use->GetInputIndex());
+ // Process uses in the closed interval [range->GetStart(), range->GetEnd()].
+ // FindMatchingUseRange() expects a half-open interval, so pass `range->GetEnd() + 1u`.
+ size_t range_begin = range->GetStart();
+ size_t range_end = range->GetEnd() + 1u;
+ auto matching_use_range =
+ FindMatchingUseRange(use_it, use_end, range_begin, range_end);
+ DCHECK(std::all_of(use_it,
+ matching_use_range.begin(),
+ [](const UsePosition& pos) { return pos.IsSynthesized(); }));
+ for (const UsePosition& use : matching_use_range) {
+ DCHECK(current->CoversSlow(use.GetPosition()) || (use.GetPosition() == range->GetEnd()));
+ if (!use.IsSynthesized()) {
+ LocationSummary* locations = use.GetUser()->GetLocations();
+ Location expected_location = locations->InAt(use.GetInputIndex());
// The expected (actual) location may be invalid in case the input is unused. Currently
// this only happens for intrinsics.
if (expected_location.IsValid()) {
if (expected_location.IsUnallocated()) {
- locations->SetInAt(use->GetInputIndex(), source);
+ locations->SetInAt(use.GetInputIndex(), source);
} else if (!expected_location.IsConstant()) {
- AddInputMoveFor(interval->GetDefinedBy(), use->GetUser(), source, expected_location);
+ AddInputMoveFor(
+ interval->GetDefinedBy(), use.GetUser(), source, expected_location);
}
} else {
- DCHECK(use->GetUser()->IsInvoke());
- DCHECK(use->GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
+ DCHECK(use.GetUser()->IsInvoke());
+ DCHECK(use.GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
}
}
- use = use->GetNext();
}
+ use_it = matching_use_range.end();
// Walk over the environment uses, and update their locations.
- while (env_use != nullptr && env_use->GetPosition() < range->GetStart()) {
- env_use = env_use->GetNext();
- }
-
- while (env_use != nullptr && env_use->GetPosition() <= range->GetEnd()) {
- DCHECK(current->CoversSlow(env_use->GetPosition())
- || (env_use->GetPosition() == range->GetEnd()));
- HEnvironment* environment = env_use->GetEnvironment();
- environment->SetLocationAt(env_use->GetInputIndex(), source);
- env_use = env_use->GetNext();
+ auto matching_env_use_range =
+ FindMatchingUseRange(env_use_it, env_use_end, range_begin, range_end);
+ for (const EnvUsePosition& env_use : matching_env_use_range) {
+ DCHECK(current->CoversSlow(env_use.GetPosition())
+ || (env_use.GetPosition() == range->GetEnd()));
+ HEnvironment* environment = env_use.GetEnvironment();
+ environment->SetLocationAt(env_use.GetInputIndex(), source);
}
+ env_use_it = matching_env_use_range.end();
range = range->GetNext();
}
@@ -395,13 +401,8 @@ void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
current = next_sibling;
} while (current != nullptr);
- if (kIsDebugBuild) {
- // Following uses can only be synthesized uses.
- while (use != nullptr) {
- DCHECK(use->IsSynthesized());
- use = use->GetNext();
- }
- }
+ // Following uses can only be synthesized uses.
+ DCHECK(std::all_of(use_it, use_end, [](const UsePosition& pos) { return pos.IsSynthesized(); }));
}
static bool IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 300f4c6239..5e22772844 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -20,7 +20,7 @@
#include "linear_order.h"
#include "register_allocation_resolver.h"
#include "ssa_liveness_analysis.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
@@ -178,18 +178,17 @@ static float ComputeSpillWeight(LiveInterval* interval, const SsaLivenessAnalysi
use_weight += CostForMoveAt(interval->GetStart() + 1, liveness);
}
- UsePosition* use = interval->GetFirstUse();
- while (use != nullptr && use->GetPosition() <= interval->GetStart()) {
- // Skip uses before the start of this live interval.
- use = use->GetNext();
- }
-
- while (use != nullptr && use->GetPosition() <= interval->GetEnd()) {
- if (use->GetUser() != nullptr && use->RequiresRegister()) {
+ // Process uses in the range (interval->GetStart(), interval->GetEnd()], i.e.
+ // [interval->GetStart() + 1, interval->GetEnd() + 1)
+ auto matching_use_range = FindMatchingUseRange(interval->GetUses().begin(),
+ interval->GetUses().end(),
+ interval->GetStart() + 1u,
+ interval->GetEnd() + 1u);
+ for (const UsePosition& use : matching_use_range) {
+ if (use.GetUser() != nullptr && use.RequiresRegister()) {
// Cost for spilling at a register use point.
- use_weight += CostForMoveAt(use->GetUser()->GetLifetimePosition() - 1, liveness);
+ use_weight += CostForMoveAt(use.GetUser()->GetLifetimePosition() - 1, liveness);
}
- use = use->GetNext();
}
// We divide by the length of the interval because we want to prioritize
@@ -989,16 +988,16 @@ void RegisterAllocatorGraphColor::SplitAtRegisterUses(LiveInterval* interval) {
interval = TrySplit(interval, interval->GetStart() + 1);
}
- UsePosition* use = interval->GetFirstUse();
- while (use != nullptr && use->GetPosition() < interval->GetStart()) {
- use = use->GetNext();
- }
-
+ // Process uses in the range [interval->GetStart(), interval->GetEnd()], i.e.
+ // [interval->GetStart(), interval->GetEnd() + 1)
+ auto matching_use_range = FindMatchingUseRange(interval->GetUses().begin(),
+ interval->GetUses().end(),
+ interval->GetStart(),
+ interval->GetEnd() + 1u);
// Split around register uses.
- size_t end = interval->GetEnd();
- while (use != nullptr && use->GetPosition() <= end) {
- if (use->RequiresRegister()) {
- size_t position = use->GetPosition();
+ for (const UsePosition& use : matching_use_range) {
+ if (use.RequiresRegister()) {
+ size_t position = use.GetPosition();
interval = TrySplit(interval, position - 1);
if (liveness_.GetInstructionFromPosition(position / 2)->IsControlFlow()) {
// If we are at the very end of a basic block, we cannot split right
@@ -1008,7 +1007,6 @@ void RegisterAllocatorGraphColor::SplitAtRegisterUses(LiveInterval* interval) {
interval = TrySplit(interval, position);
}
}
- use = use->GetNext();
}
}
@@ -1398,18 +1396,20 @@ void ColoringIteration::FindCoalesceOpportunities() {
}
// Try to prevent moves into fixed input locations.
- UsePosition* use = interval->GetFirstUse();
- for (; use != nullptr && use->GetPosition() <= interval->GetStart(); use = use->GetNext()) {
- // Skip past uses before the start of this interval.
- }
- for (; use != nullptr && use->GetPosition() <= interval->GetEnd(); use = use->GetNext()) {
- HInstruction* user = use->GetUser();
+ // Process uses in the range (interval->GetStart(), interval->GetEnd()], i.e.
+ // [interval->GetStart() + 1, interval->GetEnd() + 1)
+ auto matching_use_range = FindMatchingUseRange(interval->GetUses().begin(),
+ interval->GetUses().end(),
+ interval->GetStart() + 1u,
+ interval->GetEnd() + 1u);
+ for (const UsePosition& use : matching_use_range) {
+ HInstruction* user = use.GetUser();
if (user == nullptr) {
// User may be null for certain intervals, such as temp intervals.
continue;
}
LocationSummary* locations = user->GetLocations();
- Location input = locations->InAt(use->GetInputIndex());
+ Location input = locations->InAt(use.GetInputIndex());
if (input.IsRegister() || input.IsFpuRegister()) {
// TODO: Could try to handle pair interval too, but coalescing with fixed pair nodes
// is currently not supported.
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 667afb1ec3..24a2ab24d8 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -912,9 +912,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), &allocator, -1, one);
- first->first_use_ = new(&allocator) UsePosition(user, false, 8, first->first_use_);
- first->first_use_ = new(&allocator) UsePosition(user, false, 7, first->first_use_);
- first->first_use_ = new(&allocator) UsePosition(user, false, 6, first->first_use_);
+ first->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
+ first->uses_.push_front(*new(&allocator) UsePosition(user, false, 7));
+ first->uses_.push_front(*new(&allocator) UsePosition(user, false, 6));
locations = new (&allocator) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
@@ -934,9 +934,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), &allocator, -1, three);
- third->first_use_ = new(&allocator) UsePosition(user, false, 8, third->first_use_);
- third->first_use_ = new(&allocator) UsePosition(user, false, 4, third->first_use_);
- third->first_use_ = new(&allocator) UsePosition(user, false, 3, third->first_use_);
+ third->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
+ third->uses_.push_front(*new(&allocator) UsePosition(user, false, 4));
+ third->uses_.push_front(*new(&allocator) UsePosition(user, false, 3));
locations = new (&allocator) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 1a89567991..e78cd78aa2 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -288,6 +288,11 @@ void SchedulingLatencyVisitorARM::VisitIntermediateAddress(HIntermediateAddress*
last_visited_latency_ = kArmIntegerOpLatency;
}
+void SchedulingLatencyVisitorARM::VisitIntermediateAddressIndex(
+ HIntermediateAddressIndex* ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "IntermediateAddressIndex is not implemented for ARM";
+}
+
void SchedulingLatencyVisitorARM::VisitMultiplyAccumulate(HMultiplyAccumulate* ATTRIBUTE_UNUSED) {
last_visited_latency_ = kArmMulIntegerLatency;
}
@@ -813,10 +818,5 @@ void SchedulingLatencyVisitorARM::VisitTypeConversion(HTypeConversion* instr) {
}
}
-void SchedulingLatencyVisitorARM::VisitArmDexCacheArraysBase(art::HArmDexCacheArraysBase*) {
- last_visited_internal_latency_ = kArmIntegerOpLatency;
- last_visited_latency_ = kArmIntegerOpLatency;
-}
-
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 8d5e4f375b..897e97da49 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -17,7 +17,11 @@
#ifndef ART_COMPILER_OPTIMIZING_SCHEDULER_ARM_H_
#define ART_COMPILER_OPTIMIZING_SCHEDULER_ARM_H_
+#ifdef ART_USE_OLD_ARM_BACKEND
+#include "code_generator_arm.h"
+#else
#include "code_generator_arm_vixl.h"
+#endif
#include "scheduler.h"
namespace art {
@@ -99,6 +103,7 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor {
M(BitwiseNegatedRight, unused) \
M(MultiplyAccumulate, unused) \
M(IntermediateAddress, unused) \
+ M(IntermediateAddressIndex, unused) \
M(DataProcWithShifterOp, unused)
#define DECLARE_VISIT_INSTRUCTION(type, unused) \
diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc
index 558dcc4cbc..83b487fb5b 100644
--- a/compiler/optimizing/scheduler_arm64.cc
+++ b/compiler/optimizing/scheduler_arm64.cc
@@ -16,6 +16,7 @@
#include "scheduler_arm64.h"
#include "code_generator_utils.h"
+#include "mirror/array-inl.h"
namespace art {
namespace arm64 {
@@ -43,6 +44,13 @@ void SchedulingLatencyVisitorARM64::VisitIntermediateAddress(
last_visited_latency_ = kArm64IntegerOpLatency + 2;
}
+void SchedulingLatencyVisitorARM64::VisitIntermediateAddressIndex(
+ HIntermediateAddressIndex* instr ATTRIBUTE_UNUSED) {
+ // Although the code generated is a simple `add` instruction, we found through empirical results
+ // that spacing it from its use in memory accesses was beneficial.
+ last_visited_latency_ = kArm64DataProcWithShifterOpLatency + 2;
+}
+
void SchedulingLatencyVisitorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* ATTRIBUTE_UNUSED) {
last_visited_latency_ = kArm64MulIntegerLatency;
}
@@ -192,5 +200,148 @@ void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr)
}
}
+void SchedulingLatencyVisitorARM64::HandleSimpleArithmeticSIMD(HVecOperation *instr) {
+ if (Primitive::IsFloatingPointType(instr->GetPackedType())) {
+ last_visited_latency_ = kArm64SIMDFloatingPointOpLatency;
+ } else {
+ last_visited_latency_ = kArm64SIMDIntegerOpLatency;
+ }
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecReplicateScalar(
+ HVecReplicateScalar* instr ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64SIMDReplicateOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecSetScalars(HVecSetScalars* instr) {
+ LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId();
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecSumReduce(HVecSumReduce* instr) {
+ LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId();
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecCnv(HVecCnv* instr ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64SIMDTypeConversionInt2FPLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecNeg(HVecNeg* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecAbs(HVecAbs* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecNot(HVecNot* instr) {
+ if (instr->GetPackedType() == Primitive::kPrimBoolean) {
+ last_visited_internal_latency_ = kArm64SIMDIntegerOpLatency;
+ }
+ last_visited_latency_ = kArm64SIMDIntegerOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecAdd(HVecAdd* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecSub(HVecSub* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecMul(HVecMul* instr) {
+ if (Primitive::IsFloatingPointType(instr->GetPackedType())) {
+ last_visited_latency_ = kArm64SIMDMulFloatingPointLatency;
+ } else {
+ last_visited_latency_ = kArm64SIMDMulIntegerLatency;
+ }
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecDiv(HVecDiv* instr) {
+ if (instr->GetPackedType() == Primitive::kPrimFloat) {
+ last_visited_latency_ = kArm64SIMDDivFloatLatency;
+ } else {
+ DCHECK(instr->GetPackedType() == Primitive::kPrimDouble);
+ last_visited_latency_ = kArm64SIMDDivDoubleLatency;
+ }
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecMin(HVecMin* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecMax(HVecMax* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecAnd(HVecAnd* instr ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64SIMDIntegerOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecAndNot(HVecAndNot* instr) {
+ LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId();
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecOr(HVecOr* instr ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64SIMDIntegerOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecXor(HVecXor* instr ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64SIMDIntegerOpLatency;
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecShl(HVecShl* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecShr(HVecShr* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecUShr(HVecUShr* instr) {
+ HandleSimpleArithmeticSIMD(instr);
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecMultiplyAccumulate(
+ HVecMultiplyAccumulate* instr ATTRIBUTE_UNUSED) {
+ last_visited_latency_ = kArm64SIMDMulIntegerLatency;
+}
+
+void SchedulingLatencyVisitorARM64::HandleVecAddress(
+ HVecMemoryOperation* instruction,
+ size_t size ATTRIBUTE_UNUSED) {
+ HInstruction* index = instruction->InputAt(1);
+ if (!index->IsConstant()) {
+ last_visited_internal_latency_ += kArm64DataProcWithShifterOpLatency;
+ }
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecLoad(HVecLoad* instr) {
+ last_visited_internal_latency_ = 0;
+ size_t size = Primitive::ComponentSize(instr->GetPackedType());
+
+ if (instr->GetPackedType() == Primitive::kPrimChar
+ && mirror::kUseStringCompression
+ && instr->IsStringCharAt()) {
+ // Set latencies for the uncompressed case.
+ last_visited_internal_latency_ += kArm64MemoryLoadLatency + kArm64BranchLatency;
+ HandleVecAddress(instr, size);
+ last_visited_latency_ = kArm64SIMDMemoryLoadLatency;
+ } else {
+ HandleVecAddress(instr, size);
+ last_visited_latency_ = kArm64SIMDMemoryLoadLatency;
+ }
+}
+
+void SchedulingLatencyVisitorARM64::VisitVecStore(HVecStore* instr) {
+ last_visited_internal_latency_ = 0;
+ size_t size = Primitive::ComponentSize(instr->GetPackedType());
+ HandleVecAddress(instr, size);
+ last_visited_latency_ = kArm64SIMDMemoryStoreLatency;
+}
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 7a33720655..63d5b7d6b6 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -42,6 +42,18 @@ static constexpr uint32_t kArm64LoadStringInternalLatency = 7;
static constexpr uint32_t kArm64MulFloatingPointLatency = 6;
static constexpr uint32_t kArm64MulIntegerLatency = 6;
static constexpr uint32_t kArm64TypeConversionFloatingPointIntegerLatency = 5;
+static constexpr uint32_t kArm64BranchLatency = kArm64IntegerOpLatency;
+
+static constexpr uint32_t kArm64SIMDFloatingPointOpLatency = 10;
+static constexpr uint32_t kArm64SIMDIntegerOpLatency = 6;
+static constexpr uint32_t kArm64SIMDMemoryLoadLatency = 10;
+static constexpr uint32_t kArm64SIMDMemoryStoreLatency = 6;
+static constexpr uint32_t kArm64SIMDMulFloatingPointLatency = 12;
+static constexpr uint32_t kArm64SIMDMulIntegerLatency = 12;
+static constexpr uint32_t kArm64SIMDReplicateOpLatency = 16;
+static constexpr uint32_t kArm64SIMDDivDoubleLatency = 60;
+static constexpr uint32_t kArm64SIMDDivFloatLatency = 30;
+static constexpr uint32_t kArm64SIMDTypeConversionInt2FPLatency = 10;
class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
public:
@@ -52,29 +64,54 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
// We add a second unused parameter to be able to use this macro like the others
// defined in `nodes.h`.
-#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \
- M(ArrayGet , unused) \
- M(ArrayLength , unused) \
- M(ArraySet , unused) \
- M(BinaryOperation , unused) \
- M(BoundsCheck , unused) \
- M(Div , unused) \
- M(InstanceFieldGet , unused) \
- M(InstanceOf , unused) \
- M(Invoke , unused) \
- M(LoadString , unused) \
- M(Mul , unused) \
- M(NewArray , unused) \
- M(NewInstance , unused) \
- M(Rem , unused) \
- M(StaticFieldGet , unused) \
- M(SuspendCheck , unused) \
- M(TypeConversion , unused)
+#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \
+ M(ArrayGet , unused) \
+ M(ArrayLength , unused) \
+ M(ArraySet , unused) \
+ M(BinaryOperation , unused) \
+ M(BoundsCheck , unused) \
+ M(Div , unused) \
+ M(InstanceFieldGet , unused) \
+ M(InstanceOf , unused) \
+ M(Invoke , unused) \
+ M(LoadString , unused) \
+ M(Mul , unused) \
+ M(NewArray , unused) \
+ M(NewInstance , unused) \
+ M(Rem , unused) \
+ M(StaticFieldGet , unused) \
+ M(SuspendCheck , unused) \
+ M(TypeConversion , unused) \
+ M(VecReplicateScalar , unused) \
+ M(VecSetScalars , unused) \
+ M(VecSumReduce , unused) \
+ M(VecCnv , unused) \
+ M(VecNeg , unused) \
+ M(VecAbs , unused) \
+ M(VecNot , unused) \
+ M(VecAdd , unused) \
+ M(VecHalvingAdd , unused) \
+ M(VecSub , unused) \
+ M(VecMul , unused) \
+ M(VecDiv , unused) \
+ M(VecMin , unused) \
+ M(VecMax , unused) \
+ M(VecAnd , unused) \
+ M(VecAndNot , unused) \
+ M(VecOr , unused) \
+ M(VecXor , unused) \
+ M(VecShl , unused) \
+ M(VecShr , unused) \
+ M(VecUShr , unused) \
+ M(VecMultiplyAccumulate, unused) \
+ M(VecLoad , unused) \
+ M(VecStore , unused)
#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
M(BitwiseNegatedRight, unused) \
M(MultiplyAccumulate, unused) \
M(IntermediateAddress, unused) \
+ M(IntermediateAddressIndex, unused) \
M(DataProcWithShifterOp, unused)
#define DECLARE_VISIT_INSTRUCTION(type, unused) \
@@ -85,6 +122,10 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ void HandleSimpleArithmeticSIMD(HVecOperation *instr);
+ void HandleVecAddress(HVecMemoryOperation* instruction, size_t size);
};
class HSchedulerARM64 : public HScheduler {
@@ -101,6 +142,8 @@ class HSchedulerARM64 : public HScheduler {
return true;
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(CASE_INSTRUCTION_KIND)
return true;
+ FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(CASE_INSTRUCTION_KIND)
+ return true;
default:
return HScheduler::IsSchedulable(instruction);
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 7b8104b8ca..8bd568befd 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -128,15 +128,8 @@ void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative;
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
} else {
- // Use PC-relative access to the dex cache arrays.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
- // Note: we use the invoke's graph instead of the codegen graph, which are
- // different when inlining (the codegen graph is the most outer graph). The
- // invoke's dex method index is relative to the dex file where the invoke's graph
- // was built from.
- DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen->GetInstructionSet()),
- &invoke->GetBlock()->GetGraph()->GetDexFile());
- method_load_data = layout.MethodOffset(invoke->GetDexMethodIndex());
+ // Use PC-relative access to the .bss methods arrays.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBssEntry;
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
}
@@ -159,7 +152,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit) {
Handle<mirror::Class> klass = load_class->GetClass();
- DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
+ DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kRuntimeCall ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
@@ -185,7 +178,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
DCHECK(!runtime->UseJitCompilation());
if (!compiler_driver->GetSupportBootImageFixup()) {
// compiler_driver_test. Do not sharpen.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
} else if ((klass != nullptr) &&
compiler_driver->IsImageClass(dex_file.StringByTypeIdx(type_index))) {
is_in_boot_image = true;
@@ -210,7 +203,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
// this `HLoadClass` hasn't been executed in the interpreter.
// Fallback to the dex cache.
// TODO(ngeoffray): Generate HDeoptimize instead.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
}
} else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
// AOT app compilation. Check if the class is in the boot image.
@@ -229,7 +222,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
}
if (!IsSameDexFile(load_class->GetDexFile(), *dex_compilation_unit.GetDexFile())) {
- if ((load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) ||
+ if ((load_kind == HLoadClass::LoadKind::kRuntimeCall) ||
(load_kind == HLoadClass::LoadKind::kBssEntry)) {
// We actually cannot reference this class, we're forced to bail.
// We cannot reference this class with Bss, as the entrypoint will lookup the class
@@ -241,7 +234,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
}
void HSharpening::ProcessLoadString(HLoadString* load_string) {
- DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kRuntimeCall);
const DexFile& dex_file = load_string->GetDexFile();
dex::StringIndex string_index = load_string->GetStringIndex();
@@ -268,7 +261,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
desired_load_kind = HLoadString::LoadKind::kBootImageLinkTimePcRelative;
} else {
// compiler_driver_test. Do not sharpen.
- desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
+ desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
}
} else if (runtime->UseJitCompilation()) {
DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
@@ -280,7 +273,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
}
} else {
- desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
+ desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index b538a89a06..7b7495bf3b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -356,14 +356,16 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until,
}
}
- UsePosition* use = first_use_;
size_t start = GetStart();
size_t end = GetEnd();
- while (use != nullptr && use->GetPosition() <= end) {
- size_t use_position = use->GetPosition();
- if (use_position >= start && !use->IsSynthesized()) {
- HInstruction* user = use->GetUser();
- size_t input_index = use->GetInputIndex();
+ for (const UsePosition& use : GetUses()) {
+ size_t use_position = use.GetPosition();
+ if (use_position > end) {
+ break;
+ }
+ if (use_position >= start && !use.IsSynthesized()) {
+ HInstruction* user = use.GetUser();
+ size_t input_index = use.GetInputIndex();
if (user->IsPhi()) {
// If the phi has a register, try to use the same.
Location phi_location = user->GetLiveInterval()->ToLocation();
@@ -395,7 +397,7 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until,
} else {
// If the instruction is expected in a register, try to use it.
LocationSummary* locations = user->GetLocations();
- Location expected = locations->InAt(use->GetInputIndex());
+ Location expected = locations->InAt(use.GetInputIndex());
// We use the user's lifetime position - 1 (and not `use_position`) because the
// register is blocked at the beginning of the user.
size_t position = user->GetLifetimePosition() - 1;
@@ -408,7 +410,6 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until,
}
}
}
- use = use->GetNext();
}
return kNoRegister;
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index e9dffc1fac..a6681575a2 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -19,7 +19,9 @@
#include <iostream>
+#include "base/iteration_range.h"
#include "nodes.h"
+#include "utils/intrusive_forward_list.h"
namespace art {
@@ -102,28 +104,23 @@ class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> {
/**
* A use position represents a live interval use at a given position.
*/
-class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
+class UsePosition : public ArenaObject<kArenaAllocSsaLiveness>,
+ public IntrusiveForwardListNode<UsePosition> {
public:
- UsePosition(HInstruction* user, size_t input_index, size_t position, UsePosition* next)
+ UsePosition(HInstruction* user, size_t input_index, size_t position)
: user_(user),
input_index_(input_index),
- position_(position),
- next_(next) {
- DCHECK(next_ == nullptr || next->GetPosition() >= GetPosition());
+ position_(position) {
}
explicit UsePosition(size_t position)
: user_(nullptr),
input_index_(kNoInput),
- position_(dchecked_integral_cast<uint32_t>(position)),
- next_(nullptr) {
+ position_(dchecked_integral_cast<uint32_t>(position)) {
}
size_t GetPosition() const { return position_; }
- UsePosition* GetNext() const { return next_; }
- void SetNext(UsePosition* next) { next_ = next; }
-
HInstruction* GetUser() const { return user_; }
bool IsSynthesized() const { return user_ == nullptr; }
@@ -138,10 +135,8 @@ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
return user_->GetBlock()->GetLoopInformation();
}
- UsePosition* Dup(ArenaAllocator* allocator) const {
- return new (allocator) UsePosition(
- user_, input_index_, position_,
- next_ == nullptr ? nullptr : next_->Dup(allocator));
+ UsePosition* Clone(ArenaAllocator* allocator) const {
+ return new (allocator) UsePosition(user_, input_index_, position_);
}
bool RequiresRegister() const {
@@ -156,33 +151,28 @@ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
HInstruction* const user_;
const size_t input_index_;
const size_t position_;
- UsePosition* next_;
DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
+using UsePositionList = IntrusiveForwardList<UsePosition>;
/**
* An environment use position represents a live interval for environment use at a given position.
*/
-class EnvUsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
+class EnvUsePosition : public ArenaObject<kArenaAllocSsaLiveness>,
+ public IntrusiveForwardListNode<EnvUsePosition> {
public:
EnvUsePosition(HEnvironment* environment,
size_t input_index,
- size_t position,
- EnvUsePosition* next)
+ size_t position)
: environment_(environment),
input_index_(input_index),
- position_(position),
- next_(next) {
+ position_(position) {
DCHECK(environment != nullptr);
- DCHECK(next_ == nullptr || next->GetPosition() >= GetPosition());
}
size_t GetPosition() const { return position_; }
- EnvUsePosition* GetNext() const { return next_; }
- void SetNext(EnvUsePosition* next) { next_ = next; }
-
HEnvironment* GetEnvironment() const { return environment_; }
size_t GetInputIndex() const { return input_index_; }
@@ -190,20 +180,47 @@ class EnvUsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
stream << position_;
}
- EnvUsePosition* Dup(ArenaAllocator* allocator) const {
- return new (allocator) EnvUsePosition(
- environment_, input_index_, position_,
- next_ == nullptr ? nullptr : next_->Dup(allocator));
+ EnvUsePosition* Clone(ArenaAllocator* allocator) const {
+ return new (allocator) EnvUsePosition(environment_, input_index_, position_);
}
private:
HEnvironment* const environment_;
const size_t input_index_;
const size_t position_;
- EnvUsePosition* next_;
DISALLOW_COPY_AND_ASSIGN(EnvUsePosition);
};
+using EnvUsePositionList = IntrusiveForwardList<EnvUsePosition>;
+
+template <typename Iterator>
+inline Iterator FindUseAtOrAfterPosition(Iterator first, Iterator last, size_t position) {
+ using value_type = const typename Iterator::value_type;
+ static_assert(std::is_same<value_type, const UsePosition>::value ||
+ std::is_same<value_type, const EnvUsePosition>::value,
+ "Expecting value type UsePosition or EnvUsePosition.");
+ Iterator ret = std::find_if(
+ first, last, [position](const value_type& use) { return use.GetPosition() >= position; });
+ // Check that the processed range is sorted. Do not check the rest of the range to avoid
+ // increasing the complexity of callers from O(n) to O(n^2).
+ DCHECK(std::is_sorted(
+ first,
+ ret,
+ [](const value_type& lhs, const value_type& rhs) {
+ return lhs.GetPosition() < rhs.GetPosition();
+ }));
+ return ret;
+}
+
+template <typename Iterator>
+inline IterationRange<Iterator> FindMatchingUseRange(Iterator first,
+ Iterator last,
+ size_t position_begin,
+ size_t position_end) {
+ Iterator begin = FindUseAtOrAfterPosition(first, last, position_begin);
+ Iterator end = FindUseAtOrAfterPosition(begin, last, position_end);
+ return MakeIterationRange(begin, end);
+}
class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> {
public:
@@ -265,11 +282,11 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
void AddTempUse(HInstruction* instruction, size_t temp_index) {
DCHECK(IsTemp());
- DCHECK(first_use_ == nullptr) << "A temporary can only have one user";
- DCHECK(first_env_use_ == nullptr) << "A temporary cannot have environment user";
+ DCHECK(GetUses().empty()) << "A temporary can only have one user";
+ DCHECK(GetEnvironmentUses().empty()) << "A temporary cannot have environment user";
size_t position = instruction->GetLifetimePosition();
- first_use_ = new (allocator_) UsePosition(
- instruction, temp_index, position, first_use_);
+ UsePosition* new_use = new (allocator_) UsePosition(instruction, temp_index, position);
+ uses_.push_front(*new_use);
AddRange(position, position + 1);
}
@@ -306,32 +323,36 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
AddBackEdgeUses(*instruction->GetBlock());
}
- if ((first_use_ != nullptr)
- && (first_use_->GetUser() == actual_user)
- && (first_use_->GetPosition() < position)) {
+ if ((!uses_.empty()) &&
+ (uses_.front().GetUser() == actual_user) &&
+ (uses_.front().GetPosition() < position)) {
// The user uses the instruction multiple times, and one use dies before the other.
// We update the use list so that the latter is first.
DCHECK(!is_environment);
- UsePosition* cursor = first_use_;
- while ((cursor->GetNext() != nullptr) && (cursor->GetNext()->GetPosition() < position)) {
- cursor = cursor->GetNext();
- }
- DCHECK(first_use_->GetPosition() + 1 == position);
- UsePosition* new_use = new (allocator_) UsePosition(
- instruction, input_index, position, cursor->GetNext());
- cursor->SetNext(new_use);
- if (first_range_->GetEnd() == first_use_->GetPosition()) {
+ DCHECK(uses_.front().GetPosition() + 1 == position);
+ UsePositionList::iterator next_pos = uses_.begin();
+ UsePositionList::iterator insert_pos;
+ do {
+ insert_pos = next_pos;
+ ++next_pos;
+ } while (next_pos != uses_.end() && next_pos->GetPosition() < position);
+ UsePosition* new_use = new (allocator_) UsePosition(instruction, input_index, position);
+ uses_.insert_after(insert_pos, *new_use);
+ if (first_range_->GetEnd() == uses_.front().GetPosition()) {
first_range_->end_ = position;
}
return;
}
if (is_environment) {
- first_env_use_ = new (allocator_) EnvUsePosition(
- environment, input_index, position, first_env_use_);
+ DCHECK(env_uses_.empty() || position <= env_uses_.front().GetPosition());
+ EnvUsePosition* new_env_use =
+ new (allocator_) EnvUsePosition(environment, input_index, position);
+ env_uses_.push_front(*new_env_use);
} else {
- first_use_ = new (allocator_) UsePosition(
- instruction, input_index, position, first_use_);
+ DCHECK(uses_.empty() || position <= uses_.front().GetPosition());
+ UsePosition* new_use = new (allocator_) UsePosition(instruction, input_index, position);
+ uses_.push_front(*new_use);
}
if (is_environment && !keep_alive) {
@@ -369,8 +390,9 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
if (block->IsInLoop()) {
AddBackEdgeUses(*block);
}
- first_use_ = new (allocator_) UsePosition(
- instruction, input_index, block->GetLifetimeEnd(), first_use_);
+ UsePosition* new_use =
+ new (allocator_) UsePosition(instruction, input_index, block->GetLifetimeEnd());
+ uses_.push_front(*new_use);
}
ALWAYS_INLINE void AddRange(size_t start, size_t end) {
@@ -430,7 +452,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
first_range_->start_ = from;
} else {
// Instruction without uses.
- DCHECK(first_use_ == nullptr);
+ DCHECK(uses_.empty());
DCHECK(from == defined_by_->GetLifetimePosition());
first_range_ = last_range_ = range_search_start_ =
new (allocator_) LiveRange(from, from + 2, nullptr);
@@ -528,16 +550,17 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
return position;
}
- UsePosition* use = first_use_;
size_t end = GetEnd();
- while (use != nullptr && use->GetPosition() <= end) {
- size_t use_position = use->GetPosition();
+ for (const UsePosition& use : GetUses()) {
+ size_t use_position = use.GetPosition();
+ if (use_position > end) {
+ break;
+ }
if (use_position > position) {
- if (use->RequiresRegister()) {
+ if (use.RequiresRegister()) {
return use_position;
}
}
- use = use->GetNext();
}
return kNoLifetime;
}
@@ -564,24 +587,25 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
return position;
}
- UsePosition* use = first_use_;
size_t end = GetEnd();
- while (use != nullptr && use->GetPosition() <= end) {
- size_t use_position = use->GetPosition();
+ for (const UsePosition& use : GetUses()) {
+ size_t use_position = use.GetPosition();
+ if (use_position > end) {
+ break;
+ }
if (use_position > position) {
return use_position;
}
- use = use->GetNext();
}
return kNoLifetime;
}
- UsePosition* GetFirstUse() const {
- return first_use_;
+ const UsePositionList& GetUses() const {
+ return parent_->uses_;
}
- EnvUsePosition* GetFirstEnvironmentUse() const {
- return first_env_use_;
+ const EnvUsePositionList& GetEnvironmentUses() const {
+ return parent_->env_uses_;
}
Primitive::Type GetType() const {
@@ -645,8 +669,6 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
next_sibling_ = new_interval;
new_interval->parent_ = parent_;
- new_interval->first_use_ = first_use_;
- new_interval->first_env_use_ = first_env_use_;
LiveRange* current = first_range_;
LiveRange* previous = nullptr;
// Iterate over the ranges, and either find a range that covers this position, or
@@ -718,20 +740,14 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
current = current->GetNext();
}
stream << "}, uses: { ";
- const UsePosition* use = first_use_;
- if (use != nullptr) {
- do {
- use->Dump(stream);
- stream << " ";
- } while ((use = use->GetNext()) != nullptr);
+ for (const UsePosition& use : GetUses()) {
+ use.Dump(stream);
+ stream << " ";
}
stream << "}, { ";
- const EnvUsePosition* env_use = first_env_use_;
- if (env_use != nullptr) {
- do {
- env_use->Dump(stream);
- stream << " ";
- } while ((env_use = env_use->GetNext()) != nullptr);
+ for (const EnvUsePosition& env_use : GetEnvironmentUses()) {
+ env_use.Dump(stream);
+ stream << " ";
}
stream << "}";
stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit();
@@ -833,12 +849,16 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
high_or_low_interval_->last_range_ = high_or_low_interval_->first_range_->GetLastRange();
high_or_low_interval_->range_search_start_ = high_or_low_interval_->first_range_;
}
- if (first_use_ != nullptr) {
- high_or_low_interval_->first_use_ = first_use_->Dup(allocator_);
+ auto pos = high_or_low_interval_->uses_.before_begin();
+ for (const UsePosition& use : uses_) {
+ UsePosition* new_use = use.Clone(allocator_);
+ pos = high_or_low_interval_->uses_.insert_after(pos, *new_use);
}
- if (first_env_use_ != nullptr) {
- high_or_low_interval_->first_env_use_ = first_env_use_->Dup(allocator_);
+ auto env_pos = high_or_low_interval_->env_uses_.before_begin();
+ for (const EnvUsePosition& env_use : env_uses_) {
+ EnvUsePosition* new_env_use = env_use.Clone(allocator_);
+ env_pos = high_or_low_interval_->env_uses_.insert_after(env_pos, *new_env_use);
}
}
@@ -962,8 +982,8 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
range_search_start_(nullptr),
first_safepoint_(nullptr),
last_safepoint_(nullptr),
- first_use_(nullptr),
- first_env_use_(nullptr),
+ uses_(),
+ env_uses_(),
type_(type),
next_sibling_(nullptr),
parent_(this),
@@ -1005,14 +1025,12 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
}
bool HasSynthesizeUseAt(size_t position) const {
- UsePosition* use = first_use_;
- while (use != nullptr) {
- size_t use_position = use->GetPosition();
- if ((use_position == position) && use->IsSynthesized()) {
+ for (const UsePosition& use : GetUses()) {
+ size_t use_position = use.GetPosition();
+ if ((use_position == position) && use.IsSynthesized()) {
return true;
}
if (use_position > position) break;
- use = use->GetNext();
}
return false;
}
@@ -1028,11 +1046,11 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
// Add synthesized uses at the back edge of loops to help the register allocator.
// Note that this method is called in decreasing liveness order, to faciliate adding
- // uses at the head of the `first_use_` linked list. Because below
+ // uses at the head of the `uses_` list. Because below
// we iterate from inner-most to outer-most, which is in increasing liveness order,
- // we need to take extra care of how the `first_use_` linked list is being updated.
- UsePosition* first_in_new_list = nullptr;
- UsePosition* last_in_new_list = nullptr;
+ // we need to add subsequent entries after the last inserted entry.
+ const UsePositionList::iterator old_begin = uses_.begin();
+ UsePositionList::iterator insert_pos = uses_.before_begin();
for (HLoopInformationOutwardIterator it(block_at_use);
!it.Done();
it.Advance()) {
@@ -1042,37 +1060,25 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
break;
}
- // We're only adding a synthesized use at the last back edge. Adding syntehsized uses on
+ // We're only adding a synthesized use at the last back edge. Adding synthesized uses on
// all back edges is not necessary: anything used in the loop will have its use at the
// last back edge. If we want branches in a loop to have better register allocation than
// another branch, then it is the linear order we should change.
size_t back_edge_use_position = current->GetLifetimeEnd();
- if ((first_use_ != nullptr) && (first_use_->GetPosition() <= back_edge_use_position)) {
+ if ((old_begin != uses_.end()) && (old_begin->GetPosition() <= back_edge_use_position)) {
// There was a use already seen in this loop. Therefore the previous call to `AddUse`
// already inserted the backedge use. We can stop going outward.
DCHECK(HasSynthesizeUseAt(back_edge_use_position));
break;
}
- DCHECK(last_in_new_list == nullptr ||
- back_edge_use_position > last_in_new_list->GetPosition());
+ DCHECK(insert_pos != uses_.before_begin()
+ ? back_edge_use_position > insert_pos->GetPosition()
+ : current == block_at_use.GetLoopInformation())
+ << std::distance(uses_.before_begin(), insert_pos);
UsePosition* new_use = new (allocator_) UsePosition(back_edge_use_position);
-
- if (last_in_new_list != nullptr) {
- // Going outward. The latest created use needs to point to the new use.
- last_in_new_list->SetNext(new_use);
- } else {
- // This is the inner-most loop.
- DCHECK_EQ(current, block_at_use.GetLoopInformation());
- first_in_new_list = new_use;
- }
- last_in_new_list = new_use;
- }
- // Link the newly created linked list with `first_use_`.
- if (last_in_new_list != nullptr) {
- last_in_new_list->SetNext(first_use_);
- first_use_ = first_in_new_list;
+ insert_pos = uses_.insert_after(insert_pos, *new_use);
}
}
@@ -1091,9 +1097,9 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
SafepointPosition* first_safepoint_;
SafepointPosition* last_safepoint_;
- // Uses of this interval. Note that this linked list is shared amongst siblings.
- UsePosition* first_use_;
- EnvUsePosition* first_env_use_;
+ // Uses of this interval. Only the parent interval keeps these lists.
+ UsePositionList uses_;
+ EnvUsePositionList env_uses_;
// The instruction type this interval corresponds to.
const Primitive::Type type_;
@@ -1202,14 +1208,14 @@ class SsaLivenessAnalysis : public ValueObject {
// A temporary shares the same lifetime start as the instruction that requires it.
DCHECK(temp->IsTemp());
HInstruction* user = GetInstructionFromPosition(temp->GetStart() / 2);
- DCHECK_EQ(user, temp->GetFirstUse()->GetUser());
+ DCHECK_EQ(user, temp->GetUses().front().GetUser());
return user;
}
size_t GetTempIndex(LiveInterval* temp) const {
// We use the input index to store the index of the temporary in the user's temporary list.
DCHECK(temp->IsTemp());
- return temp->GetFirstUse()->GetInputIndex();
+ return temp->GetUses().front().GetInputIndex();
}
size_t GetMaxLifetimePosition() const {
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 0f24e81be2..bb23a29064 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -25,7 +25,7 @@
#include "base/bit_utils.h"
#include "base/enums.h"
#include "base/logging.h"
-#include "base/stl_util.h"
+#include "base/stl_util_identity.h"
#include "base/value_object.h"
#include "constants_arm.h"
#include "utils/arm/assembler_arm_shared.h"
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 6afc3ddecb..eb3f870432 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -18,6 +18,8 @@
#include <type_traits>
#include "assembler_arm_vixl.h"
+#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "thread.h"
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 4c0979e0b7..b390508ed4 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -23,7 +23,7 @@
#include "base/array_ref.h"
#include "dedupe_set-inl.h"
#include "gtest/gtest.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/compiler/utils/intrusive_forward_list.h b/compiler/utils/intrusive_forward_list.h
index b5fc2f2456..5a358ac2c4 100644
--- a/compiler/utils/intrusive_forward_list.h
+++ b/compiler/utils/intrusive_forward_list.h
@@ -23,6 +23,7 @@
#include <memory>
#include <type_traits>
+#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -42,10 +43,19 @@ struct IntrusiveForwardListHook {
mutable const IntrusiveForwardListHook* next_hook;
};
+template <typename Derived, typename Tag = void>
+struct IntrusiveForwardListNode : public IntrusiveForwardListHook {
+};
+
template <typename T, IntrusiveForwardListHook T::* NextPtr = &T::hook>
-class IntrusiveForwardListMemberHook;
+class IntrusiveForwardListMemberHookTraits;
+
+template <typename T, typename Tag = void>
+class IntrusiveForwardListBaseHookTraits;
-template <typename T, typename HookTraits = IntrusiveForwardListMemberHook<T>>
+template <typename T,
+ typename HookTraits =
+ IntrusiveForwardListBaseHookTraits<typename std::remove_const<T>::type>>
class IntrusiveForwardList;
template <typename T, typename HookTraits>
@@ -435,7 +445,7 @@ bool operator>=(const IntrusiveForwardList<T, HookTraits>& lhs,
}
template <typename T, IntrusiveForwardListHook T::* NextPtr>
-class IntrusiveForwardListMemberHook {
+class IntrusiveForwardListMemberHookTraits {
public:
static const IntrusiveForwardListHook* GetHook(const T* value) {
return &(value->*NextPtr);
@@ -447,6 +457,20 @@ class IntrusiveForwardListMemberHook {
}
};
+template <typename T, typename Tag>
+class IntrusiveForwardListBaseHookTraits {
+ public:
+ static const IntrusiveForwardListHook* GetHook(const T* value) {
+ // Explicit conversion to the "node" followed by implicit conversion to the "hook".
+ return static_cast<const IntrusiveForwardListNode<T, Tag>*>(value);
+ }
+
+ static T* GetValue(const IntrusiveForwardListHook* hook) {
+ return down_cast<T*>(down_cast<IntrusiveForwardListNode<T, Tag>*>(
+ const_cast<IntrusiveForwardListHook*>(hook)));
+ }
+};
+
} // namespace art
#endif // ART_COMPILER_UTILS_INTRUSIVE_FORWARD_LIST_H_
diff --git a/compiler/utils/intrusive_forward_list_test.cc b/compiler/utils/intrusive_forward_list_test.cc
index f2efa4dd15..939676cdc8 100644
--- a/compiler/utils/intrusive_forward_list_test.cc
+++ b/compiler/utils/intrusive_forward_list_test.cc
@@ -23,13 +23,14 @@
namespace art {
-struct IFLTestValue {
+struct IFLTestValue : public IntrusiveForwardListNode<IFLTestValue> {
// Deliberately not explicit.
- IFLTestValue(int v) : hook(), value(v) { } // NOLINT(runtime/explicit)
+ IFLTestValue(int v) : value(v) { } // NOLINT(runtime/explicit)
- IntrusiveForwardListHook hook;
int value;
};
+using IFLTestValueList = IntrusiveForwardList<IFLTestValue>;
+using ConstIFLTestValueList = IntrusiveForwardList<const IFLTestValue>;
bool operator==(const IFLTestValue& lhs, const IFLTestValue& rhs) {
return lhs.value == rhs.value;
@@ -39,6 +40,24 @@ bool operator<(const IFLTestValue& lhs, const IFLTestValue& rhs) {
return lhs.value < rhs.value;
}
+struct IFLTestValue2 {
+ // Deliberately not explicit.
+ IFLTestValue2(int v) : hook(), value(v) { } // NOLINT(runtime/explicit)
+
+ IntrusiveForwardListHook hook;
+ int value;
+};
+using IFLTestValue2List =
+ IntrusiveForwardList<IFLTestValue2, IntrusiveForwardListMemberHookTraits<IFLTestValue2>>;
+
+bool operator==(const IFLTestValue2& lhs, const IFLTestValue2& rhs) {
+ return lhs.value == rhs.value;
+}
+
+bool operator<(const IFLTestValue2& lhs, const IFLTestValue2& rhs) {
+ return lhs.value < rhs.value;
+}
+
#define ASSERT_LISTS_EQUAL(expected, value) \
do { \
ASSERT_EQ((expected).empty(), (value).empty()); \
@@ -47,16 +66,82 @@ bool operator<(const IFLTestValue& lhs, const IFLTestValue& rhs) {
ASSERT_TRUE(std::equal((expected).begin(), (expected).end(), (value).begin())); \
} while (false)
-TEST(IntrusiveForwardList, IteratorToConstIterator) {
- IntrusiveForwardList<IFLTestValue> ifl;
- IntrusiveForwardList<IFLTestValue>::iterator begin = ifl.begin();
- IntrusiveForwardList<IFLTestValue>::const_iterator cbegin = ifl.cbegin();
- IntrusiveForwardList<IFLTestValue>::const_iterator converted_begin = begin;
+class IntrusiveForwardListTest : public testing::Test {
+ public:
+ template <typename ListType>
+ void IteratorToConstIterator();
+
+ template <typename ListType>
+ void IteratorOperators();
+
+ template <typename ListType>
+ void ConstructRange();
+
+ template <typename ListType>
+ void Assign();
+
+ template <typename ListType>
+ void PushPop();
+
+ template <typename ListType>
+ void InsertAfter1();
+
+ template <typename ListType>
+ void InsertAfter2();
+
+ template <typename ListType>
+ void EraseAfter1();
+
+ template <typename ListType>
+ void EraseAfter2();
+
+ template <typename ListType>
+ void SwapClear();
+
+ template <typename ListType>
+ void SpliceAfter();
+
+ template <typename ListType>
+ void Remove();
+
+ template <typename ListType>
+ void Unique();
+
+ template <typename ListType>
+ void Merge();
+
+ template <typename ListType>
+ void Sort1();
+
+ template <typename ListType>
+ void Sort2();
+
+ template <typename ListType>
+ void Reverse();
+
+ template <typename ListType>
+ void ModifyValue();
+};
+
+template <typename ListType>
+void IntrusiveForwardListTest::IteratorToConstIterator() {
+ ListType ifl;
+ typename ListType::iterator begin = ifl.begin();
+ typename ListType::const_iterator cbegin = ifl.cbegin();
+ typename ListType::const_iterator converted_begin = begin;
ASSERT_TRUE(converted_begin == cbegin);
}
-TEST(IntrusiveForwardList, IteratorOperators) {
- IntrusiveForwardList<IFLTestValue> ifl;
+TEST_F(IntrusiveForwardListTest, IteratorToConstIterator) {
+ IteratorToConstIterator<IFLTestValueList>();
+ IteratorToConstIterator<ConstIFLTestValueList>();
+ IteratorToConstIterator<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::IteratorOperators() {
+ using ValueType = typename ListType::value_type;
+ ListType ifl;
ASSERT_TRUE(ifl.begin() == ifl.cbegin());
ASSERT_FALSE(ifl.begin() != ifl.cbegin());
ASSERT_TRUE(ifl.end() == ifl.cend());
@@ -65,37 +150,61 @@ TEST(IntrusiveForwardList, IteratorOperators) {
ASSERT_TRUE(ifl.begin() == ifl.end()); // Empty.
ASSERT_FALSE(ifl.begin() != ifl.end()); // Empty.
- IFLTestValue value(1);
+ ValueType value(1);
ifl.insert_after(ifl.cbefore_begin(), value);
ASSERT_FALSE(ifl.begin() == ifl.end()); // Not empty.
ASSERT_TRUE(ifl.begin() != ifl.end()); // Not empty.
}
-TEST(IntrusiveForwardList, ConstructRange) {
+TEST_F(IntrusiveForwardListTest, IteratorOperators) {
+ IteratorOperators<IFLTestValueList>();
+ IteratorOperators<ConstIFLTestValueList>();
+ IteratorOperators<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::ConstructRange() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 1, 2, 7 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
}
-TEST(IntrusiveForwardList, Assign) {
+TEST_F(IntrusiveForwardListTest, ConstructRange) {
+ ConstructRange<IFLTestValueList>();
+ ConstructRange<ConstIFLTestValueList>();
+ ConstructRange<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::Assign() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref1({ 2, 8, 5 });
- std::vector<IFLTestValue> storage1(ref1.begin(), ref1.end());
- IntrusiveForwardList<IFLTestValue> ifl;
+ std::vector<ValueType> storage1(ref1.begin(), ref1.end());
+ ListType ifl;
ifl.assign(storage1.begin(), storage1.end());
ASSERT_LISTS_EQUAL(ref1, ifl);
std::forward_list<int> ref2({ 7, 1, 3 });
- std::vector<IFLTestValue> storage2(ref2.begin(), ref2.end());
+ std::vector<ValueType> storage2(ref2.begin(), ref2.end());
ifl.assign(storage2.begin(), storage2.end());
ASSERT_LISTS_EQUAL(ref2, ifl);
}
-TEST(IntrusiveForwardList, PushPop) {
- IFLTestValue value3(3);
- IFLTestValue value7(7);
+TEST_F(IntrusiveForwardListTest, Assign) {
+ Assign<IFLTestValueList>();
+ Assign<ConstIFLTestValueList>();
+ Assign<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::PushPop() {
+ using ValueType = typename ListType::value_type;
+ ValueType value3(3);
+ ValueType value7(7);
std::forward_list<int> ref;
- IntrusiveForwardList<IFLTestValue> ifl;
+ ListType ifl;
ASSERT_LISTS_EQUAL(ref, ifl);
ref.push_front(3);
ifl.push_front(value3);
@@ -114,13 +223,21 @@ TEST(IntrusiveForwardList, PushPop) {
ASSERT_LISTS_EQUAL(ref, ifl);
}
-TEST(IntrusiveForwardList, InsertAfter1) {
- IFLTestValue value4(4);
- IFLTestValue value8(8);
- IFLTestValue value5(5);
- IFLTestValue value3(3);
+TEST_F(IntrusiveForwardListTest, PushPop) {
+ PushPop<IFLTestValueList>();
+ PushPop<ConstIFLTestValueList>();
+ PushPop<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::InsertAfter1() {
+ using ValueType = typename ListType::value_type;
+ ValueType value4(4);
+ ValueType value8(8);
+ ValueType value5(5);
+ ValueType value3(3);
std::forward_list<int> ref;
- IntrusiveForwardList<IFLTestValue> ifl;
+ ListType ifl;
auto ref_it = ref.insert_after(ref.before_begin(), 4);
auto ifl_it = ifl.insert_after(ifl.before_begin(), value4);
@@ -149,23 +266,31 @@ TEST(IntrusiveForwardList, InsertAfter1) {
ASSERT_EQ(*ref_it, *ifl_it);
}
-TEST(IntrusiveForwardList, InsertAfter2) {
+TEST_F(IntrusiveForwardListTest, InsertAfter1) {
+ InsertAfter1<IFLTestValueList>();
+ InsertAfter1<ConstIFLTestValueList>();
+ InsertAfter1<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::InsertAfter2() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref;
- IntrusiveForwardList<IFLTestValue> ifl;
+ ListType ifl;
auto ref_it = ref.insert_after(ref.before_begin(), { 2, 8, 5 });
- std::vector<IFLTestValue> storage1({ { 2 }, { 8 }, { 5 } });
+ std::vector<ValueType> storage1({ { 2 }, { 8 }, { 5 } });
auto ifl_it = ifl.insert_after(ifl.before_begin(), storage1.begin(), storage1.end());
ASSERT_LISTS_EQUAL(ref, ifl);
ASSERT_EQ(*ref_it, *ifl_it);
- std::vector<IFLTestValue> storage2({ { 7 }, { 2 } });
+ std::vector<ValueType> storage2({ { 7 }, { 2 } });
ref_it = ref.insert_after(ref.begin(), { 7, 2 });
ifl_it = ifl.insert_after(ifl.begin(), storage2.begin(), storage2.end());
ASSERT_LISTS_EQUAL(ref, ifl);
ASSERT_EQ(*ref_it, *ifl_it);
- std::vector<IFLTestValue> storage3({ { 1 }, { 3 }, { 4 }, { 9 } });
+ std::vector<ValueType> storage3({ { 1 }, { 3 }, { 4 }, { 9 } });
ref_it = ref.begin();
ifl_it = ifl.begin();
std::advance(ref_it, std::distance(ref.begin(), ref.end()) - 1);
@@ -175,10 +300,18 @@ TEST(IntrusiveForwardList, InsertAfter2) {
ASSERT_LISTS_EQUAL(ref, ifl);
}
-TEST(IntrusiveForwardList, EraseAfter1) {
+TEST_F(IntrusiveForwardListTest, InsertAfter2) {
+ InsertAfter2<IFLTestValueList>();
+ InsertAfter2<ConstIFLTestValueList>();
+ InsertAfter2<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::EraseAfter1() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 1, 2, 7, 4, 5 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
CHECK_EQ(std::distance(ref.begin(), ref.end()), 5);
@@ -230,10 +363,18 @@ TEST(IntrusiveForwardList, EraseAfter1) {
ASSERT_TRUE(ifl_it == ifl.begin());
}
-TEST(IntrusiveForwardList, EraseAfter2) {
+TEST_F(IntrusiveForwardListTest, EraseAfter1) {
+ EraseAfter1<IFLTestValueList>();
+ EraseAfter1<ConstIFLTestValueList>();
+ EraseAfter1<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::EraseAfter2() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 1, 2, 7, 4, 5, 3, 2, 8, 9 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
CHECK_EQ(std::distance(ref.begin(), ref.end()), 9);
@@ -262,13 +403,21 @@ TEST(IntrusiveForwardList, EraseAfter2) {
CHECK_EQ(std::distance(ref.begin(), ref.end()), 0);
}
-TEST(IntrusiveForwardList, SwapClear) {
+TEST_F(IntrusiveForwardListTest, EraseAfter2) {
+ EraseAfter2<IFLTestValueList>();
+ EraseAfter2<ConstIFLTestValueList>();
+ EraseAfter2<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::SwapClear() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref1({ 1, 2, 7 });
- std::vector<IFLTestValue> storage1(ref1.begin(), ref1.end());
- IntrusiveForwardList<IFLTestValue> ifl1(storage1.begin(), storage1.end());
+ std::vector<ValueType> storage1(ref1.begin(), ref1.end());
+ ListType ifl1(storage1.begin(), storage1.end());
std::forward_list<int> ref2({ 3, 8, 6 });
- std::vector<IFLTestValue> storage2(ref2.begin(), ref2.end());
- IntrusiveForwardList<IFLTestValue> ifl2(storage2.begin(), storage2.end());
+ std::vector<ValueType> storage2(ref2.begin(), ref2.end());
+ ListType ifl2(storage2.begin(), storage2.end());
ASSERT_LISTS_EQUAL(ref1, ifl1);
ASSERT_LISTS_EQUAL(ref2, ifl2);
ref1.swap(ref2);
@@ -289,12 +438,20 @@ TEST(IntrusiveForwardList, SwapClear) {
ASSERT_LISTS_EQUAL(ref2, ifl2);
}
-TEST(IntrusiveForwardList, SpliceAfter) {
+TEST_F(IntrusiveForwardListTest, SwapClear) {
+ SwapClear<IFLTestValueList>();
+ SwapClear<ConstIFLTestValueList>();
+ SwapClear<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::SpliceAfter() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref1({ 3, 1, 2, 7, 4, 5, 4, 8, 7 });
std::forward_list<int> ref2;
- std::vector<IFLTestValue> storage(ref1.begin(), ref1.end());
- IntrusiveForwardList<IFLTestValue> ifl1(storage.begin(), storage.end());
- IntrusiveForwardList<IFLTestValue> ifl2;
+ std::vector<ValueType> storage(ref1.begin(), ref1.end());
+ ListType ifl1(storage.begin(), storage.end());
+ ListType ifl2;
ASSERT_LISTS_EQUAL(ref1, ifl1);
ASSERT_LISTS_EQUAL(ref2, ifl2);
@@ -398,10 +555,18 @@ TEST(IntrusiveForwardList, SpliceAfter) {
ASSERT_LISTS_EQUAL(check, ifl2);
}
-TEST(IntrusiveForwardList, Remove) {
+TEST_F(IntrusiveForwardListTest, SpliceAfter) {
+ SpliceAfter<IFLTestValueList>();
+ SpliceAfter<ConstIFLTestValueList>();
+ SpliceAfter<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::Remove() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 3, 1, 2, 7, 4, 5, 4, 8, 7 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
ref.remove(1);
ifl.remove(1);
@@ -409,20 +574,28 @@ TEST(IntrusiveForwardList, Remove) {
ref.remove(4);
ifl.remove(4);
ASSERT_LISTS_EQUAL(ref, ifl);
- auto odd = [](IFLTestValue value) { return (value.value & 1) != 0; }; // NOLINT(readability/braces)
+ auto odd = [](ValueType value) { return (value.value & 1) != 0; }; // NOLINT(readability/braces)
ref.remove_if(odd);
ifl.remove_if(odd);
ASSERT_LISTS_EQUAL(ref, ifl);
- auto all = [](IFLTestValue value ATTRIBUTE_UNUSED) { return true; }; // NOLINT(readability/braces)
+ auto all = [](ValueType value ATTRIBUTE_UNUSED) { return true; }; // NOLINT(readability/braces)
ref.remove_if(all);
ifl.remove_if(all);
ASSERT_LISTS_EQUAL(ref, ifl);
}
-TEST(IntrusiveForwardList, Unique) {
+TEST_F(IntrusiveForwardListTest, Remove) {
+ Remove<IFLTestValueList>();
+ Remove<ConstIFLTestValueList>();
+ Remove<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::Unique() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 3, 1, 1, 2, 3, 3, 7, 7, 4, 4, 5, 7 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
ref.unique();
ifl.unique();
@@ -430,7 +603,7 @@ TEST(IntrusiveForwardList, Unique) {
std::forward_list<int> check({ 3, 1, 2, 3, 7, 4, 5, 7 });
ASSERT_LISTS_EQUAL(check, ifl);
- auto bin_pred = [](IFLTestValue lhs, IFLTestValue rhs) {
+ auto bin_pred = [](const ValueType& lhs, const ValueType& rhs) {
return (lhs.value & ~1) == (rhs.value & ~1);
};
ref.unique(bin_pred);
@@ -440,13 +613,21 @@ TEST(IntrusiveForwardList, Unique) {
ASSERT_LISTS_EQUAL(check, ifl);
}
-TEST(IntrusiveForwardList, Merge) {
+TEST_F(IntrusiveForwardListTest, Unique) {
+ Unique<IFLTestValueList>();
+ Unique<ConstIFLTestValueList>();
+ Unique<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::Merge() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref1({ 1, 4, 8, 8, 12 });
- std::vector<IFLTestValue> storage1(ref1.begin(), ref1.end());
- IntrusiveForwardList<IFLTestValue> ifl1(storage1.begin(), storage1.end());
+ std::vector<ValueType> storage1(ref1.begin(), ref1.end());
+ ListType ifl1(storage1.begin(), storage1.end());
std::forward_list<int> ref2({ 3, 5, 6, 7, 9 });
- std::vector<IFLTestValue> storage2(ref2.begin(), ref2.end());
- IntrusiveForwardList<IFLTestValue> ifl2(storage2.begin(), storage2.end());
+ std::vector<ValueType> storage2(ref2.begin(), ref2.end());
+ ListType ifl2(storage2.begin(), storage2.end());
ASSERT_LISTS_EQUAL(ref1, ifl1);
ASSERT_LISTS_EQUAL(ref2, ifl2);
CHECK(std::is_sorted(ref1.begin(), ref1.end()));
@@ -460,10 +641,18 @@ TEST(IntrusiveForwardList, Merge) {
ASSERT_LISTS_EQUAL(check, ifl1);
}
-TEST(IntrusiveForwardList, Sort1) {
+TEST_F(IntrusiveForwardListTest, Merge) {
+ Merge<IFLTestValueList>();
+ Merge<ConstIFLTestValueList>();
+ Merge<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::Sort1() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 2, 9, 8, 3, 7, 4, 1, 5, 3, 0 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
CHECK(!std::is_sorted(ref.begin(), ref.end()));
ref.sort();
@@ -473,12 +662,20 @@ TEST(IntrusiveForwardList, Sort1) {
ASSERT_LISTS_EQUAL(check, ifl);
}
-TEST(IntrusiveForwardList, Sort2) {
+TEST_F(IntrusiveForwardListTest, Sort1) {
+ Sort1<IFLTestValueList>();
+ Sort1<ConstIFLTestValueList>();
+ Sort1<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::Sort2() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 2, 9, 8, 3, 7, 4, 1, 5, 3, 0 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
- auto cmp = [](IFLTestValue lhs, IFLTestValue rhs) {
+ auto cmp = [](const ValueType& lhs, const ValueType& rhs) {
return (lhs.value & ~1) < (rhs.value & ~1);
};
CHECK(!std::is_sorted(ref.begin(), ref.end(), cmp));
@@ -489,10 +686,18 @@ TEST(IntrusiveForwardList, Sort2) {
ASSERT_LISTS_EQUAL(check, ifl);
}
-TEST(IntrusiveForwardList, Reverse) {
+TEST_F(IntrusiveForwardListTest, Sort2) {
+ Sort2<IFLTestValueList>();
+ Sort2<ConstIFLTestValueList>();
+ Sort2<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::Reverse() {
+ using ValueType = typename ListType::value_type;
std::forward_list<int> ref({ 8, 3, 5, 4, 1, 3 });
- std::vector<IFLTestValue> storage(ref.begin(), ref.end());
- IntrusiveForwardList<IFLTestValue> ifl(storage.begin(), storage.end());
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
ASSERT_LISTS_EQUAL(ref, ifl);
CHECK(!std::is_sorted(ref.begin(), ref.end()));
ref.reverse();
@@ -502,4 +707,73 @@ TEST(IntrusiveForwardList, Reverse) {
ASSERT_LISTS_EQUAL(check, ifl);
}
+TEST_F(IntrusiveForwardListTest, Reverse) {
+ Reverse<IFLTestValueList>();
+ Reverse<ConstIFLTestValueList>();
+ Reverse<IFLTestValue2List>();
+}
+
+template <typename ListType>
+void IntrusiveForwardListTest::ModifyValue() {
+ using ValueType = typename ListType::value_type;
+ std::forward_list<int> ref({ 3, 7, 42 });
+ std::vector<ValueType> storage(ref.begin(), ref.end());
+ ListType ifl(storage.begin(), storage.end());
+ ASSERT_LISTS_EQUAL(ref, ifl);
+
+ auto add1 = [](const ValueType& value) { return value.value + 1; }; // NOLINT [readability/braces]
+ std::transform(ref.begin(), ref.end(), ref.begin(), add1);
+ std::transform(ifl.begin(), ifl.end(), ifl.begin(), add1);
+ ASSERT_LISTS_EQUAL(ref, ifl);
+}
+
+TEST_F(IntrusiveForwardListTest, ModifyValue) {
+ ModifyValue<IFLTestValueList>();
+ // Does not compile with ConstIFLTestValueList because LHS of the assignment is const.
+ // ModifyValue<ConstIFLTestValueList>();
+ static_assert(std::is_const<ConstIFLTestValueList::iterator::value_type>::value, "Const check.");
+ ModifyValue<IFLTestValue2List>();
+}
+
+struct Tag1;
+struct Tag2;
+struct TwoListsValue : public IntrusiveForwardListNode<TwoListsValue, Tag1>,
+ public IntrusiveForwardListNode<TwoListsValue, Tag2> {
+ // Deliberately not explicit.
+ TwoListsValue(int v) : value(v) { } // NOLINT(runtime/explicit)
+
+ int value;
+};
+using FirstList =
+ IntrusiveForwardList<TwoListsValue, IntrusiveForwardListBaseHookTraits<TwoListsValue, Tag1>>;
+using SecondList =
+ IntrusiveForwardList<TwoListsValue, IntrusiveForwardListBaseHookTraits<TwoListsValue, Tag2>>;
+
+bool operator==(const TwoListsValue& lhs, const TwoListsValue& rhs) {
+ return lhs.value == rhs.value;
+}
+
+TEST_F(IntrusiveForwardListTest, TwoLists) {
+ // Test that a value can be in two lists at the same time and the hooks do not interfere.
+ std::vector<TwoListsValue> storage({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); // storage[i] = i
+
+ std::vector<int> order1({ 3, 1, 7, 2, 8, 9, 4, 0, 6, 5 });
+ FirstList list1;
+ auto pos1 = list1.before_begin();
+ for (size_t idx : order1) {
+ pos1 = list1.insert_after(pos1, storage[idx]);
+ }
+
+ std::vector<int> order2({ 8, 5, 1, 6, 7, 2, 9, 3, 0, 4 });
+ SecondList list2;
+ auto pos2 = list2.before_begin();
+ for (size_t idx : order2) {
+ pos2 = list2.insert_after(pos2, storage[idx]);
+ }
+
+ // Using `storage[i] = i`, we can easily compare that nodes of each list are in the right order.
+ ASSERT_LISTS_EQUAL(order1, list1);
+ ASSERT_LISTS_EQUAL(order2, list2);
+}
+
} // namespace art
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index a99d02d4d0..0b05b752da 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -404,6 +404,129 @@ uint32_t MipsAssembler::EmitFI(int opcode, int fmt, FRegister ft, uint16_t imm)
return encoding;
}
+uint32_t MipsAssembler::EmitMsa3R(int operation,
+ int df,
+ VectorRegister wt,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(wt, kNoVectorRegister);
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaOperationShift |
+ df << kDfShift |
+ static_cast<uint32_t>(wt) << kWtShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+ return encoding;
+}
+
+uint32_t MipsAssembler::EmitMsaBIT(int operation,
+ int df_m,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaOperationShift |
+ df_m << kDfMShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+ return encoding;
+}
+
+uint32_t MipsAssembler::EmitMsaELM(int operation,
+ int df_n,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaELMOperationShift |
+ df_n << kDfNShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+ return encoding;
+}
+
+uint32_t MipsAssembler::EmitMsaMI10(int s10,
+ Register rs,
+ VectorRegister wd,
+ int minor_opcode,
+ int df) {
+ CHECK_NE(rs, kNoRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ CHECK(IsUint<10>(s10)) << s10;
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ s10 << kS10Shift |
+ static_cast<uint32_t>(rs) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode << kS10MinorShift |
+ df;
+ Emit(encoding);
+ return encoding;
+}
+
+uint32_t MipsAssembler::EmitMsaI10(int operation,
+ int df,
+ int i10,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(wd, kNoVectorRegister);
+ CHECK(IsUint<10>(i10)) << i10;
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsaOperationShift |
+ df << kDfShift |
+ i10 << kI10Shift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+ return encoding;
+}
+
+uint32_t MipsAssembler::EmitMsa2R(int operation,
+ int df,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsa2ROperationShift |
+ df << kDf2RShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+ return encoding;
+}
+
+uint32_t MipsAssembler::EmitMsa2RF(int operation,
+ int df,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode) {
+ CHECK_NE(ws, kNoVectorRegister);
+ CHECK_NE(wd, kNoVectorRegister);
+ uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
+ operation << kMsa2RFOperationShift |
+ df << kDf2RShift |
+ static_cast<uint32_t>(ws) << kWsShift |
+ static_cast<uint32_t>(wd) << kWdShift |
+ minor_opcode;
+ Emit(encoding);
+ return encoding;
+}
+
void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x21), rd, rs, rt);
}
@@ -635,9 +758,8 @@ void MipsAssembler::Ins(Register rd, Register rt, int pos, int size) {
DsFsmInstrRrr(EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04), rd, rd, rt);
}
-// TODO: This instruction is available in both R6 and MSA and it should be used when available.
void MipsAssembler::Lsa(Register rd, Register rs, Register rt, int saPlusOne) {
- CHECK(IsR6());
+ CHECK(IsR6() || HasMsa());
CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
int sa = saPlusOne - 1;
DsFsmInstrRrr(EmitR(0x0, rs, rt, rd, sa, 0x05), rd, rs, rt);
@@ -653,7 +775,7 @@ void MipsAssembler::ShiftAndAdd(Register dst,
if (shamt == TIMES_1) {
// Catch the special case where the shift amount is zero (0).
Addu(dst, src_base, src_idx);
- } else if (IsR6()) {
+ } else if (IsR6() || HasMsa()) {
Lsa(dst, src_idx, src_base, shamt);
} else {
Sll(tmp, src_idx, shamt);
@@ -1709,6 +1831,1079 @@ void MipsAssembler::PopAndReturn(Register rd, Register rt) {
SetReorder(reordering);
}
+void MipsAssembler::AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x10),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x3, 0x0, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x3, 0x1, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x3, 0x2, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x3, 0x3, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0xe),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x1b),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Ffint_sW(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Ffint_sD(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa2RF(0x19e, 0x1, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Ftint_sW(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa2RF(0x19c, 0x0, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Ftint_sD(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa2RF(0x19c, 0x1, ws, wd, 0x1e),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xd),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::SlliB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(shamt3)) << shamt3;
+ DsFsmInstrFff(EmitMsaBIT(0x0, shamt3 | kMsaDfMByteMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SlliH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(shamt4)) << shamt4;
+ DsFsmInstrFff(EmitMsaBIT(0x0, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SlliW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
+ CHECK(IsUint<5>(shamt5)) << shamt5;
+ DsFsmInstrFff(EmitMsaBIT(0x0, shamt5 | kMsaDfMWordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SlliD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
+ CHECK(IsUint<6>(shamt6)) << shamt6;
+ DsFsmInstrFff(EmitMsaBIT(0x0, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SraiB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(shamt3)) << shamt3;
+ DsFsmInstrFff(EmitMsaBIT(0x1, shamt3 | kMsaDfMByteMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SraiH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(shamt4)) << shamt4;
+ DsFsmInstrFff(EmitMsaBIT(0x1, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SraiW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
+ CHECK(IsUint<5>(shamt5)) << shamt5;
+ DsFsmInstrFff(EmitMsaBIT(0x1, shamt5 | kMsaDfMWordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SraiD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
+ CHECK(IsUint<6>(shamt6)) << shamt6;
+ DsFsmInstrFff(EmitMsaBIT(0x1, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SrliB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(shamt3)) << shamt3;
+ DsFsmInstrFff(EmitMsaBIT(0x2, shamt3 | kMsaDfMByteMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SrliH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(shamt4)) << shamt4;
+ DsFsmInstrFff(EmitMsaBIT(0x2, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SrliW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
+ CHECK(IsUint<5>(shamt5)) << shamt5;
+ DsFsmInstrFff(EmitMsaBIT(0x2, shamt5 | kMsaDfMWordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SrliD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
+ CHECK(IsUint<6>(shamt6)) << shamt6;
+ DsFsmInstrFff(EmitMsaBIT(0x2, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::MoveV(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsaBIT(0x1, 0x3e, ws, wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SplatiB(VectorRegister wd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ DsFsmInstrFff(EmitMsaELM(0x1, n4 | kMsaDfNByteMask, ws, wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SplatiH(VectorRegister wd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ DsFsmInstrFff(EmitMsaELM(0x1, n3 | kMsaDfNHalfwordMask, ws, wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SplatiW(VectorRegister wd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ DsFsmInstrFff(EmitMsaELM(0x1, n2 | kMsaDfNWordMask, ws, wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
+ CHECK(HasMsa());
+ CHECK(IsUint<1>(n1)) << n1;
+ DsFsmInstrFff(EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::FillB(VectorRegister wd, Register rs) {
+ CHECK(HasMsa());
+ DsFsmInstrFr(EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::FillH(VectorRegister wd, Register rs) {
+ CHECK(HasMsa());
+ DsFsmInstrFr(EmitMsa2R(0xc0, 0x1, static_cast<VectorRegister>(rs), wd, 0x1e),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::FillW(VectorRegister wd, Register rs) {
+ CHECK(HasMsa());
+ DsFsmInstrFr(EmitMsa2R(0xc0, 0x2, static_cast<VectorRegister>(rs), wd, 0x1e),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::LdiB(VectorRegister wd, int imm8) {
+ CHECK(HasMsa());
+ CHECK(IsInt<8>(imm8)) << imm8;
+ DsFsmInstrFr(EmitMsaI10(0x6, 0x0, imm8 & kMsaS10Mask, wd, 0x7),
+ static_cast<FRegister>(wd),
+ ZERO);
+}
+
+void MipsAssembler::LdiH(VectorRegister wd, int imm10) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(imm10)) << imm10;
+ DsFsmInstrFr(EmitMsaI10(0x6, 0x1, imm10 & kMsaS10Mask, wd, 0x7),
+ static_cast<FRegister>(wd),
+ ZERO);
+}
+
+void MipsAssembler::LdiW(VectorRegister wd, int imm10) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(imm10)) << imm10;
+ DsFsmInstrFr(EmitMsaI10(0x6, 0x2, imm10 & kMsaS10Mask, wd, 0x7),
+ static_cast<FRegister>(wd),
+ ZERO);
+}
+
+void MipsAssembler::LdiD(VectorRegister wd, int imm10) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(imm10)) << imm10;
+ DsFsmInstrFr(EmitMsaI10(0x6, 0x3, imm10 & kMsaS10Mask, wd, 0x7),
+ static_cast<FRegister>(wd),
+ ZERO);
+}
+
+void MipsAssembler::LdB(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(offset)) << offset;
+ DsFsmInstrFr(EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x8, 0x0),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::LdH(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<11>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMipsHalfwordSize);
+ DsFsmInstrFr(EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x8, 0x1),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::LdW(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<12>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMipsWordSize);
+ DsFsmInstrFr(EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x8, 0x2),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::LdD(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<13>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMipsDoublewordSize);
+ DsFsmInstrFr(EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x8, 0x3),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::StB(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<10>(offset)) << offset;
+ DsFsmInstrFR(EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x9, 0x0), static_cast<FRegister>(wd), rs);
+}
+
+void MipsAssembler::StH(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<11>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMipsHalfwordSize);
+ DsFsmInstrFR(EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x9, 0x1),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::StW(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<12>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMipsWordSize);
+ DsFsmInstrFR(EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x9, 0x2),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::StD(VectorRegister wd, Register rs, int offset) {
+ CHECK(HasMsa());
+ CHECK(IsInt<13>(offset)) << offset;
+ CHECK_ALIGNED(offset, kMipsDoublewordSize);
+ DsFsmInstrFR(EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::LoadConst32(Register rd, int32_t value) {
if (IsUint<16>(value)) {
// Use OR with (unsigned) immediate to encode 16b unsigned int.
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 463daeb5d7..dd4ce6dc80 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -25,6 +25,7 @@
#include "base/arena_containers.h"
#include "base/enums.h"
#include "base/macros.h"
+#include "base/stl_util_identity.h"
#include "constants_mips.h"
#include "globals.h"
#include "managed_register_mips.h"
@@ -36,6 +37,7 @@
namespace art {
namespace mips {
+static constexpr size_t kMipsHalfwordSize = 2;
static constexpr size_t kMipsWordSize = 4;
static constexpr size_t kMipsDoublewordSize = 8;
@@ -194,6 +196,7 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
+ has_msa_(instruction_set_features != nullptr ? instruction_set_features->HasMsa() : false),
isa_features_(instruction_set_features) {
cfi().DelayEmittingAdvancePCs();
}
@@ -464,6 +467,149 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void Clear(Register rd);
void Not(Register rd, Register rs);
+ // MSA instructions.
+ void AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ void AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ void FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ void Ffint_sW(VectorRegister wd, VectorRegister ws);
+ void Ffint_sD(VectorRegister wd, VectorRegister ws);
+ void Ftint_sW(VectorRegister wd, VectorRegister ws);
+ void Ftint_sD(VectorRegister wd, VectorRegister ws);
+
+ void SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
+ // Immediate shift instructions, where shamtN denotes shift amount (must be between 0 and 2^N-1).
+ void SlliB(VectorRegister wd, VectorRegister ws, int shamt3);
+ void SlliH(VectorRegister wd, VectorRegister ws, int shamt4);
+ void SlliW(VectorRegister wd, VectorRegister ws, int shamt5);
+ void SlliD(VectorRegister wd, VectorRegister ws, int shamt6);
+ void SraiB(VectorRegister wd, VectorRegister ws, int shamt3);
+ void SraiH(VectorRegister wd, VectorRegister ws, int shamt4);
+ void SraiW(VectorRegister wd, VectorRegister ws, int shamt5);
+ void SraiD(VectorRegister wd, VectorRegister ws, int shamt6);
+ void SrliB(VectorRegister wd, VectorRegister ws, int shamt3);
+ void SrliH(VectorRegister wd, VectorRegister ws, int shamt4);
+ void SrliW(VectorRegister wd, VectorRegister ws, int shamt5);
+ void SrliD(VectorRegister wd, VectorRegister ws, int shamt6);
+
+ void MoveV(VectorRegister wd, VectorRegister ws);
+ void SplatiB(VectorRegister wd, VectorRegister ws, int n4);
+ void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
+ void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
+ void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
+ void FillB(VectorRegister wd, Register rs);
+ void FillH(VectorRegister wd, Register rs);
+ void FillW(VectorRegister wd, Register rs);
+
+ void LdiB(VectorRegister wd, int imm8);
+ void LdiH(VectorRegister wd, int imm10);
+ void LdiW(VectorRegister wd, int imm10);
+ void LdiD(VectorRegister wd, int imm10);
+ void LdB(VectorRegister wd, Register rs, int offset);
+ void LdH(VectorRegister wd, Register rs, int offset);
+ void LdW(VectorRegister wd, Register rs, int offset);
+ void LdD(VectorRegister wd, Register rs, int offset);
+ void StB(VectorRegister wd, Register rs, int offset);
+ void StH(VectorRegister wd, Register rs, int offset);
+ void StW(VectorRegister wd, Register rs, int offset);
+ void StD(VectorRegister wd, Register rs, int offset);
+
+ void IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
// Higher level composite instructions.
void LoadConst32(Register rd, int32_t value);
void LoadConst64(Register reg_hi, Register reg_lo, int64_t value);
@@ -1282,6 +1428,30 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
uint32_t EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
void EmitBcondR2(BranchCondition cond, Register rs, Register rt, uint16_t imm16);
void EmitBcondR6(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21);
+ uint32_t EmitMsa3R(int operation,
+ int df,
+ VectorRegister wt,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode);
+ uint32_t EmitMsaBIT(int operation,
+ int df_m,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode);
+ uint32_t EmitMsaELM(int operation,
+ int df_n,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode);
+ uint32_t EmitMsaMI10(int s10, Register rs, VectorRegister wd, int minor_opcode, int df);
+ uint32_t EmitMsaI10(int operation, int df, int i10, VectorRegister wd, int minor_opcode);
+ uint32_t EmitMsa2R(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode);
+ uint32_t EmitMsa2RF(int operation,
+ int df,
+ VectorRegister ws,
+ VectorRegister wd,
+ int minor_opcode);
void Buncond(MipsLabel* label);
void Bcond(MipsLabel* label, BranchCondition condition, Register lhs, Register rhs = ZERO);
@@ -1332,6 +1502,10 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
// Emits exception block.
void EmitExceptionPoll(MipsExceptionSlowPath* exception);
+ bool HasMsa() const {
+ return has_msa_;
+ }
+
bool IsR6() const {
if (isa_features_ != nullptr) {
return isa_features_->IsR6();
@@ -1386,6 +1560,8 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
uint32_t last_old_position_;
uint32_t last_branch_id_;
+ const bool has_msa_;
+
const MipsInstructionSetFeatures* isa_features_;
DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 30667efa38..d4642607ad 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -34,9 +34,14 @@ struct MIPSCpuRegisterCompare {
class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
mips::Register,
mips::FRegister,
- uint32_t> {
+ uint32_t,
+ mips::VectorRegister> {
public:
- typedef AssemblerTest<mips::MipsAssembler, mips::Register, mips::FRegister, uint32_t> Base;
+ typedef AssemblerTest<mips::MipsAssembler,
+ mips::Register,
+ mips::FRegister,
+ uint32_t,
+ mips::VectorRegister> Base;
AssemblerMIPS32r6Test() :
instruction_set_features_(MipsInstructionSetFeatures::FromVariant("mips32r6", nullptr)) {
@@ -61,7 +66,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
// We use "-modd-spreg" so we can use odd-numbered single precision FPU registers.
// We put the code at address 0x1000000 (instead of 0) to avoid overlapping with the
// .MIPS.abiflags section (there doesn't seem to be a way to suppress its generation easily).
- return " -march=mips32r6 -modd-spreg -Wa,--no-warn"
+ return " -march=mips32r6 -mmsa -modd-spreg -Wa,--no-warn"
" -Wl,-Ttext=0x1000000 -Wl,-e0x1000000 -nostdlib";
}
@@ -182,6 +187,39 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
fp_registers_.push_back(new mips::FRegister(mips::F29));
fp_registers_.push_back(new mips::FRegister(mips::F30));
fp_registers_.push_back(new mips::FRegister(mips::F31));
+
+ vec_registers_.push_back(new mips::VectorRegister(mips::W0));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W1));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W2));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W3));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W4));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W5));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W6));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W7));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W8));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W9));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W10));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W11));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W12));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W13));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W14));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W15));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W16));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W17));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W18));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W19));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W20));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W21));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W22));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W23));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W24));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W25));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W26));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W27));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W28));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W29));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W30));
+ vec_registers_.push_back(new mips::VectorRegister(mips::W31));
}
}
@@ -189,6 +227,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
AssemblerTest::TearDown();
STLDeleteElements(&registers_);
STLDeleteElements(&fp_registers_);
+ STLDeleteElements(&vec_registers_);
}
std::vector<mips::Register*> GetRegisters() OVERRIDE {
@@ -199,6 +238,10 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
return fp_registers_;
}
+ std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+ return vec_registers_;
+ }
+
uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
return imm_value;
}
@@ -250,6 +293,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_;
std::vector<mips::FRegister*> fp_registers_;
+ std::vector<mips::VectorRegister*> vec_registers_;
std::unique_ptr<const MipsInstructionSetFeatures> instruction_set_features_;
};
@@ -328,13 +372,11 @@ TEST_F(AssemblerMIPS32r6Test, Lsa) {
}
TEST_F(AssemblerMIPS32r6Test, Seleqz) {
- DriverStr(RepeatRRR(&mips::MipsAssembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
- "seleqz");
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"), "seleqz");
}
TEST_F(AssemblerMIPS32r6Test, Selnez) {
- DriverStr(RepeatRRR(&mips::MipsAssembler::Selnez, "selnez ${reg1}, ${reg2}, ${reg3}"),
- "selnez");
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Selnez, "selnez ${reg1}, ${reg2}, ${reg3}"), "selnez");
}
TEST_F(AssemblerMIPS32r6Test, ClzR6) {
@@ -914,6 +956,566 @@ TEST_F(AssemblerMIPS32r6Test, LongBranchReorder) {
// AssemblerMIPS32r6Test.Bltu
// AssemblerMIPS32r6Test.Bgeu
+// MSA instructions.
+
+TEST_F(AssemblerMIPS32r6Test, AndV) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::AndV, "and.v ${reg1}, ${reg2}, ${reg3}"), "and.v");
+}
+
+TEST_F(AssemblerMIPS32r6Test, OrV) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::OrV, "or.v ${reg1}, ${reg2}, ${reg3}"), "or.v");
+}
+
+TEST_F(AssemblerMIPS32r6Test, NorV) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::NorV, "nor.v ${reg1}, ${reg2}, ${reg3}"), "nor.v");
+}
+
+TEST_F(AssemblerMIPS32r6Test, XorV) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::XorV, "xor.v ${reg1}, ${reg2}, ${reg3}"), "xor.v");
+}
+
+TEST_F(AssemblerMIPS32r6Test, AddvB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::AddvB, "addv.b ${reg1}, ${reg2}, ${reg3}"), "addv.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, AddvH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::AddvH, "addv.h ${reg1}, ${reg2}, ${reg3}"), "addv.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, AddvW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::AddvW, "addv.w ${reg1}, ${reg2}, ${reg3}"), "addv.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, AddvD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::AddvD, "addv.d ${reg1}, ${reg2}, ${reg3}"), "addv.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SubvB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SubvB, "subv.b ${reg1}, ${reg2}, ${reg3}"), "subv.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SubvH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SubvH, "subv.h ${reg1}, ${reg2}, ${reg3}"), "subv.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SubvW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SubvW, "subv.w ${reg1}, ${reg2}, ${reg3}"), "subv.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SubvD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SubvD, "subv.d ${reg1}, ${reg2}, ${reg3}"), "subv.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MulvB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MulvB, "mulv.b ${reg1}, ${reg2}, ${reg3}"), "mulv.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MulvH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MulvH, "mulv.h ${reg1}, ${reg2}, ${reg3}"), "mulv.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MulvW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MulvW, "mulv.w ${reg1}, ${reg2}, ${reg3}"), "mulv.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MulvD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::MulvD, "mulv.d ${reg1}, ${reg2}, ${reg3}"), "mulv.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_sB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sB, "div_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sH, "div_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sW, "div_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sD, "div_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "div_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_uB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uB, "div_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uH, "div_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uW, "div_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Div_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uD, "div_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "div_u.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_sB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sB, "mod_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sH, "mod_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sW, "mod_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sD, "mod_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "mod_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_uB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uB, "mod_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uH, "mod_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uW, "mod_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Mod_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uD, "mod_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "mod_u.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Add_aB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aB, "add_a.b ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Add_aH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aH, "add_a.h ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Add_aW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aW, "add_a.w ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Add_aD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aD, "add_a.d ${reg1}, ${reg2}, ${reg3}"),
+ "add_a.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_sB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sB, "ave_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sH, "ave_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sW, "ave_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sD, "ave_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "ave_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_uB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uB, "ave_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uH, "ave_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uW, "ave_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ave_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uD, "ave_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "ave_u.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_sB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sB, "aver_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sH, "aver_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sW, "aver_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sD, "aver_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "aver_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_uB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uB, "aver_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uH, "aver_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uW, "aver_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Aver_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uD, "aver_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "aver_u.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_sB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sB, "max_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sH, "max_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sW, "max_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sD, "max_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_uB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uB, "max_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uH, "max_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uW, "max_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Max_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uD, "max_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_sB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sB, "min_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sH, "min_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sW, "min_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sD, "min_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_uB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uB, "min_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uH, "min_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uW, "min_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Min_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uD, "min_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FaddW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FaddW, "fadd.w ${reg1}, ${reg2}, ${reg3}"), "fadd.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FaddD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FaddD, "fadd.d ${reg1}, ${reg2}, ${reg3}"), "fadd.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FsubW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FsubW, "fsub.w ${reg1}, ${reg2}, ${reg3}"), "fsub.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FsubD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FsubD, "fsub.d ${reg1}, ${reg2}, ${reg3}"), "fsub.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmulW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmulW, "fmul.w ${reg1}, ${reg2}, ${reg3}"), "fmul.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmulD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmulD, "fmul.d ${reg1}, ${reg2}, ${reg3}"), "fmul.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FdivW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FdivW, "fdiv.w ${reg1}, ${reg2}, ${reg3}"), "fdiv.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FdivD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FdivD, "fdiv.d ${reg1}, ${reg2}, ${reg3}"), "fdiv.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmaxW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmaxW, "fmax.w ${reg1}, ${reg2}, ${reg3}"), "fmax.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FmaxD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FmaxD, "fmax.d ${reg1}, ${reg2}, ${reg3}"), "fmax.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FminW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FminW, "fmin.w ${reg1}, ${reg2}, ${reg3}"), "fmin.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FminD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::FminD, "fmin.d ${reg1}, ${reg2}, ${reg3}"), "fmin.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ffint_sW) {
+ DriverStr(RepeatVV(&mips::MipsAssembler::Ffint_sW, "ffint_s.w ${reg1}, ${reg2}"), "ffint_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ffint_sD) {
+ DriverStr(RepeatVV(&mips::MipsAssembler::Ffint_sD, "ffint_s.d ${reg1}, ${reg2}"), "ffint_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ftint_sW) {
+ DriverStr(RepeatVV(&mips::MipsAssembler::Ftint_sW, "ftint_s.w ${reg1}, ${reg2}"), "ftint_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Ftint_sD) {
+ DriverStr(RepeatVV(&mips::MipsAssembler::Ftint_sD, "ftint_s.d ${reg1}, ${reg2}"), "ftint_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SllB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SllB, "sll.b ${reg1}, ${reg2}, ${reg3}"), "sll.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SllH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SllH, "sll.h ${reg1}, ${reg2}, ${reg3}"), "sll.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SllW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SllW, "sll.w ${reg1}, ${reg2}, ${reg3}"), "sll.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SllD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SllD, "sll.d ${reg1}, ${reg2}, ${reg3}"), "sll.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SraB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SraB, "sra.b ${reg1}, ${reg2}, ${reg3}"), "sra.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SraH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SraH, "sra.h ${reg1}, ${reg2}, ${reg3}"), "sra.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SraW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SraW, "sra.w ${reg1}, ${reg2}, ${reg3}"), "sra.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SraD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SraD, "sra.d ${reg1}, ${reg2}, ${reg3}"), "sra.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SrlB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SrlB, "srl.b ${reg1}, ${reg2}, ${reg3}"), "srl.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SrlH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SrlH, "srl.h ${reg1}, ${reg2}, ${reg3}"), "srl.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SrlW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SrlW, "srl.w ${reg1}, ${reg2}, ${reg3}"), "srl.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SrlD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::SrlD, "srl.d ${reg1}, ${reg2}, ${reg3}"), "srl.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SlliB) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliB, 3, "slli.b ${reg1}, ${reg2}, {imm}"), "slli.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SlliH) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliH, 4, "slli.h ${reg1}, ${reg2}, {imm}"), "slli.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SlliW) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliW, 5, "slli.w ${reg1}, ${reg2}, {imm}"), "slli.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SlliD) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliD, 6, "slli.d ${reg1}, ${reg2}, {imm}"), "slli.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, MoveV) {
+ DriverStr(RepeatVV(&mips::MipsAssembler::MoveV, "move.v ${reg1}, ${reg2}"), "move.v");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SplatiB) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiB, 4, "splati.b ${reg1}, ${reg2}[{imm}]"),
+ "splati.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SplatiH) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiH, 3, "splati.h ${reg1}, ${reg2}[{imm}]"),
+ "splati.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SplatiW) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiW, 2, "splati.w ${reg1}, ${reg2}[{imm}]"),
+ "splati.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SplatiD) {
+ DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiD, 1, "splati.d ${reg1}, ${reg2}[{imm}]"),
+ "splati.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FillB) {
+ DriverStr(RepeatVR(&mips::MipsAssembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FillH) {
+ DriverStr(RepeatVR(&mips::MipsAssembler::FillH, "fill.h ${reg1}, ${reg2}"), "fill.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, FillW) {
+ DriverStr(RepeatVR(&mips::MipsAssembler::FillW, "fill.w ${reg1}, ${reg2}"), "fill.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdiB) {
+ DriverStr(RepeatVIb(&mips::MipsAssembler::LdiB, -8, "ldi.b ${reg}, {imm}"), "ldi.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdiH) {
+ DriverStr(RepeatVIb(&mips::MipsAssembler::LdiH, -10, "ldi.h ${reg}, {imm}"), "ldi.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdiW) {
+ DriverStr(RepeatVIb(&mips::MipsAssembler::LdiW, -10, "ldi.w ${reg}, {imm}"), "ldi.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdiD) {
+ DriverStr(RepeatVIb(&mips::MipsAssembler::LdiD, -10, "ldi.d ${reg}, {imm}"), "ldi.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdB) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::LdB, -10, "ld.b ${reg1}, {imm}(${reg2})"), "ld.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdH) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::LdH, -10, "ld.h ${reg1}, {imm}(${reg2})", 0, 2),
+ "ld.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdW) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::LdW, -10, "ld.w ${reg1}, {imm}(${reg2})", 0, 4),
+ "ld.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LdD) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::LdD, -10, "ld.d ${reg1}, {imm}(${reg2})", 0, 8),
+ "ld.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, StB) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::StB, -10, "st.b ${reg1}, {imm}(${reg2})"), "st.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, StH) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::StH, -10, "st.h ${reg1}, {imm}(${reg2})", 0, 2),
+ "st.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, StW) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::StW, -10, "st.w ${reg1}, {imm}(${reg2})", 0, 4),
+ "st.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, StD) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::StD, -10, "st.d ${reg1}, {imm}(${reg2})", 0, 8),
+ "st.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvrB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrB, "ilvr.b ${reg1}, ${reg2}, ${reg3}"), "ilvr.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvrH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrH, "ilvr.h ${reg1}, ${reg2}, ${reg3}"), "ilvr.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvrW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrW, "ilvr.w ${reg1}, ${reg2}, ${reg3}"), "ilvr.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvrD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrD, "ilvr.d ${reg1}, ${reg2}, ${reg3}"), "ilvr.d");
+}
+
#undef __
} // namespace art
diff --git a/compiler/utils/mips/constants_mips.h b/compiler/utils/mips/constants_mips.h
index 44ed5cc124..b4dfdbd8d3 100644
--- a/compiler/utils/mips/constants_mips.h
+++ b/compiler/utils/mips/constants_mips.h
@@ -75,8 +75,37 @@ enum InstructionFields {
kFdShift = 6,
kFdBits = 5,
+ kMsaOperationShift = 23,
+ kMsaELMOperationShift = 22,
+ kMsa2ROperationShift = 18,
+ kMsa2RFOperationShift = 17,
+ kDfShift = 21,
+ kDfMShift = 16,
+ kDf2RShift = 16,
+ kDfNShift = 16,
+ kWtShift = 16,
+ kWtBits = 5,
+ kWsShift = 11,
+ kWsBits = 5,
+ kWdShift = 6,
+ kWdBits = 5,
+ kS10Shift = 16,
+ kI10Shift = 11,
+ kS10MinorShift = 2,
+
kBranchOffsetMask = 0x0000ffff,
kJumpOffsetMask = 0x03ffffff,
+
+ kMsaMajorOpcode = 0x1e,
+ kMsaDfMByteMask = 0x70,
+ kMsaDfMHalfwordMask = 0x60,
+ kMsaDfMWordMask = 0x40,
+ kMsaDfMDoublewordMask = 0x00,
+ kMsaDfNByteMask = 0x00,
+ kMsaDfNHalfwordMask = 0x20,
+ kMsaDfNWordMask = 0x30,
+ kMsaDfNDoublewordMask = 0x38,
+ kMsaS10Mask = 0x3ff,
};
enum ScaleFactor {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index c03b98c5c2..24900a7f10 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1456,6 +1456,86 @@ void Mips64Assembler::Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegist
EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x10);
}
+void Mips64Assembler::Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x3, 0x0, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x3, 0x1, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x3, 0x2, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x3, 0x3, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x0, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x1, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x2, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x3, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x0, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x1, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x2, wt, ws, wd, 0xe);
+}
+
+void Mips64Assembler::Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x3, wt, ws, wd, 0xe);
+}
+
void Mips64Assembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b);
@@ -1496,6 +1576,26 @@ void Mips64Assembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister
EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b);
}
+void Mips64Assembler::FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x1b);
+}
+
+void Mips64Assembler::FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x1b);
+}
+
void Mips64Assembler::Ffint_sW(VectorRegister wd, VectorRegister ws) {
CHECK(HasMsa());
EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e);
@@ -1795,6 +1895,17 @@ void Mips64Assembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister
EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14);
}
+void Mips64Assembler::ReplicateFPToVectorRegister(VectorRegister dst,
+ FpuRegister src,
+ bool is_double) {
+ // Float or double in FPU register Fx can be considered as 0th element in vector register Wx.
+ if (is_double) {
+ SplatiD(dst, static_cast<VectorRegister>(src), 0);
+ } else {
+ SplatiW(dst, static_cast<VectorRegister>(src), 0);
+ }
+}
+
void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
TemplateLoadConst32(this, rd, value);
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index c92cf4c048..773db9b208 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -25,6 +25,7 @@
#include "base/arena_containers.h"
#include "base/enums.h"
#include "base/macros.h"
+#include "base/stl_util_identity.h"
#include "constants_mips64.h"
#include "globals.h"
#include "managed_register_mips64.h"
@@ -704,6 +705,22 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
@@ -713,6 +730,10 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void Ffint_sW(VectorRegister wd, VectorRegister ws);
void Ffint_sD(VectorRegister wd, VectorRegister ws);
@@ -774,6 +795,9 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ // Helper for replicating floating point value in all destination elements.
+ void ReplicateFPToVectorRegister(VectorRegister dst, FpuRegister src, bool is_double);
+
// Higher level composite instructions.
int InstrCountForLoadReplicatedConst32(int64_t);
void LoadConst32(GpuRegister rd, int32_t value);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index fbebe0ce15..bdf9598ee7 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -2998,6 +2998,86 @@ TEST_F(AssemblerMIPS64Test, Aver_uD) {
"aver_u.d");
}
+TEST_F(AssemblerMIPS64Test, Max_sB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sB, "max_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Max_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sH, "max_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Max_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sW, "max_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Max_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sD, "max_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "max_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Max_uB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uB, "max_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Max_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uH, "max_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Max_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uW, "max_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Max_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uD, "max_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "max_u.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_sB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sB, "min_s.b ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sH, "min_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sW, "min_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sD, "min_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "min_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_uB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uB, "min_u.b ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uH, "min_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uW, "min_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Min_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uD, "min_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "min_u.d");
+}
+
TEST_F(AssemblerMIPS64Test, FaddW) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddW, "fadd.w ${reg1}, ${reg2}, ${reg3}"),
"fadd.w");
@@ -3038,6 +3118,26 @@ TEST_F(AssemblerMIPS64Test, FdivD) {
"fdiv.d");
}
+TEST_F(AssemblerMIPS64Test, FmaxW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaxW, "fmax.w ${reg1}, ${reg2}, ${reg3}"),
+ "fmax.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FmaxD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaxD, "fmax.d ${reg1}, ${reg2}, ${reg3}"),
+ "fmax.d");
+}
+
+TEST_F(AssemblerMIPS64Test, FminW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FminW, "fmin.w ${reg1}, ${reg2}, ${reg3}"),
+ "fmin.w");
+}
+
+TEST_F(AssemblerMIPS64Test, FminD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::FminD, "fmin.d ${reg1}, ${reg2}, ${reg3}"),
+ "fmin.d");
+}
+
TEST_F(AssemblerMIPS64Test, Ffint_sW) {
DriverStr(RepeatVV(&mips64::Mips64Assembler::Ffint_sW, "ffint_s.w ${reg1}, ${reg2}"),
"ffint_s.w");
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index a1eb08e041..621a652f0a 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -20,10 +20,11 @@
#include <numeric>
#include <sys/mman.h>
+#include "base/bit_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 53e73c344e..b88fe09359 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -39,6 +39,7 @@
#include "arch/instruction_set_features.h"
#include "arch/mips/instruction_set_features_mips.h"
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "base/dumpable.h"
#include "base/macros.h"
#include "base/scoped_flock.h"
@@ -74,6 +75,7 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "oat_file.h"
#include "oat_file_assistant.h"
#include "oat_writer.h"
#include "os.h"
@@ -112,12 +114,15 @@ static std::string CommandLine() {
static std::string StrippedCommandLine() {
std::vector<std::string> command;
- // Do a pre-pass to look for zip-fd.
+ // Do a pre-pass to look for zip-fd and the compiler filter.
bool saw_zip_fd = false;
+ bool saw_compiler_filter = false;
for (int i = 0; i < original_argc; ++i) {
if (android::base::StartsWith(original_argv[i], "--zip-fd=")) {
saw_zip_fd = true;
- break;
+ }
+ if (android::base::StartsWith(original_argv[i], "--compiler-filter=")) {
+ saw_compiler_filter = true;
}
}
@@ -161,6 +166,11 @@ static std::string StrippedCommandLine() {
command.push_back(original_argv[i]);
}
+ if (!saw_compiler_filter) {
+ command.push_back("--compiler-filter=" +
+ CompilerFilter::NameOfFilter(CompilerFilter::kDefaultCompilerFilter));
+ }
+
// Construct the final output.
if (command.size() <= 1U) {
// It seems only "/system/bin/dex2oat" is left, or not even that. Use a pretty line.
@@ -477,6 +487,16 @@ class WatchDog {
android::base::LogId::DEFAULT,
LogSeverity::FATAL,
message.c_str());
+ // If we're on the host, try to dump all threads to get a sense of what's going on. This is
+ // restricted to the host as the dump may itself go bad.
+ // TODO: Use a double watchdog timeout, so we can enable this on-device.
+ if (!kIsTargetBuild && Runtime::Current() != nullptr) {
+ Runtime::Current()->AttachCurrentThread("Watchdog thread attached for dumping",
+ true,
+ nullptr,
+ false);
+ Runtime::Current()->DumpForSigQuit(std::cerr);
+ }
exit(1);
}
@@ -503,13 +523,14 @@ class WatchDog {
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_unlock, (&mutex_), reason);
}
- const int64_t timeout_in_milliseconds_;
- bool shutting_down_;
// TODO: Switch to Mutex when we can guarantee it won't prevent shutdown in error cases.
pthread_mutex_t mutex_;
pthread_cond_t cond_;
pthread_attr_t attr_;
pthread_t pthread_;
+
+ const int64_t timeout_in_milliseconds_;
+ bool shutting_down_;
};
class Dex2Oat FINAL {
@@ -1379,8 +1400,8 @@ class Dex2Oat FINAL {
// Note: we're only invalidating the magic data in the file, as dex2oat needs the rest of
// the information to remain valid.
if (update_input_vdex_) {
- std::unique_ptr<BufferedOutputStream> vdex_out(MakeUnique<BufferedOutputStream>(
- MakeUnique<FileOutputStream>(vdex_files_.back().get())));
+ std::unique_ptr<BufferedOutputStream> vdex_out = std::make_unique<BufferedOutputStream>(
+ std::make_unique<FileOutputStream>(vdex_files_.back().get()));
if (!vdex_out->WriteFully(&VdexFile::Header::kVdexInvalidMagic,
arraysize(VdexFile::Header::kVdexInvalidMagic))) {
PLOG(ERROR) << "Failed to invalidate vdex header. File: " << vdex_out->GetLocation();
@@ -1877,8 +1898,8 @@ class Dex2Oat FINAL {
verifier::VerifierDeps* verifier_deps = callbacks_->GetVerifierDeps();
for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
File* vdex_file = vdex_files_[i].get();
- std::unique_ptr<BufferedOutputStream> vdex_out(
- MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
+ std::unique_ptr<BufferedOutputStream> vdex_out =
+ std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(vdex_file));
if (!oat_writers_[i]->WriteVerifierDeps(vdex_out.get(), verifier_deps)) {
LOG(ERROR) << "Failed to write verifier dependencies into VDEX " << vdex_file->GetPath();
@@ -1912,6 +1933,7 @@ class Dex2Oat FINAL {
elf_writer->PrepareDynamicSection(rodata_size,
text_size,
oat_writer->GetBssSize(),
+ oat_writer->GetBssMethodsOffset(),
oat_writer->GetBssRootsOffset());
if (IsImage()) {
@@ -2489,8 +2511,8 @@ class Dex2Oat FINAL {
runtime_.reset(Runtime::Current());
runtime_->SetInstructionSet(instruction_set_);
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+ for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
+ CalleeSaveType type = CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
@@ -2909,7 +2931,7 @@ static dex2oat::ReturnCode Dex2oat(int argc, char** argv) {
// might produce a stack frame too large for this function or for
// functions inlining it (such as main), that would not fit the
// requirements of the `-Wframe-larger-than` option.
- std::unique_ptr<Dex2Oat> dex2oat = MakeUnique<Dex2Oat>(&timings);
+ std::unique_ptr<Dex2Oat> dex2oat = std::make_unique<Dex2Oat>(&timings);
// Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
dex2oat->ParseArgs(argc, argv);
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 6420aa8759..b604e8b5f1 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -28,6 +28,7 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "base/mutex-inl.h"
#include "dex_file-inl.h"
#include "dex2oat_environment_test.h"
#include "dex2oat_return_codes.h"
@@ -38,6 +39,8 @@
namespace art {
+static constexpr size_t kMaxMethodIds = 65535;
+
using android::base::StringPrintf;
class Dex2oatTest : public Dex2oatEnvironmentTest {
@@ -612,7 +615,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
ProfileCompilationInfo info;
std::string profile_key = ProfileCompilationInfo::GetProfileDexFileKey(dex_location);
for (size_t i = 0; i < num_classes; ++i) {
- info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1 + i));
+ info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1 + i), kMaxMethodIds);
}
bool result = info.Save(profile_test_fd);
close(profile_test_fd);
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 5656ddd59c..1541d7b39e 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1747,9 +1747,8 @@ static void dumpCallSite(const DexFile* pDexFile, u4 idx) {
case EncodedArrayValueIterator::ValueType::kArray:
case EncodedArrayValueIterator::ValueType::kAnnotation:
// Unreachable based on current EncodedArrayValueIterator::Next().
- UNIMPLEMENTED(FATAL) << " type " << type;
+ UNIMPLEMENTED(FATAL) << " type " << it.GetValueType();
UNREACHABLE();
- break;
case EncodedArrayValueIterator::ValueType::kNull:
type = "Null";
value = "null";
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index cf453b9a16..62ee445085 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -57,31 +57,6 @@ static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) {
entry.reg_)));
}
-static uint32_t GetCodeItemSize(const DexFile::CodeItem& disk_code_item) {
- uintptr_t code_item_start = reinterpret_cast<uintptr_t>(&disk_code_item);
- uint32_t insns_size = disk_code_item.insns_size_in_code_units_;
- uint32_t tries_size = disk_code_item.tries_size_;
- if (tries_size == 0) {
- uintptr_t insns_end = reinterpret_cast<uintptr_t>(&disk_code_item.insns_[insns_size]);
- return insns_end - code_item_start;
- } else {
- // Get the start of the handler data.
- const uint8_t* handler_data = DexFile::GetCatchHandlerData(disk_code_item, 0);
- uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
- // Manually read each handler.
- for (uint32_t i = 0; i < handlers_size; ++i) {
- int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
- if (uleb128_count <= 0) {
- uleb128_count = -uleb128_count + 1;
- }
- for (int32_t j = 0; j < uleb128_count; ++j) {
- DecodeUnsignedLeb128(&handler_data);
- }
- }
- return reinterpret_cast<uintptr_t>(handler_data) - code_item_start;
- }
-}
-
static uint32_t GetDebugInfoStreamSize(const uint8_t* debug_info_stream) {
const uint8_t* stream = debug_info_stream;
DecodeUnsignedLeb128(&stream); // line_start
@@ -686,7 +661,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
}
}
- uint32_t size = GetCodeItemSize(disk_code_item);
+ uint32_t size = DexFile::GetCodeItemSize(disk_code_item);
CodeItem* code_item = new CodeItem(
registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries, handler_list);
code_item->SetSize(size);
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 5692eb2b39..95e64bf3e7 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -23,6 +23,7 @@
#include <vector>
#include <stdint.h>
+#include "base/stl_util.h"
#include "dex_file-inl.h"
#include "leb128.h"
#include "utf.h"
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index 829e9feda8..d279bcb65c 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -174,7 +174,7 @@ class Dumper {
ProfileCompilationInfo* profile_info) {
if (profile_info != nullptr) {
uint32_t method_idx = method->GetMethodId()->GetIndex();
- if (!profile_info->ContainsMethod(MethodReference(dex_file, method_idx))) {
+ if (!profile_info->ContainsHotMethod(MethodReference(dex_file, method_idx))) {
return;
}
}
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 205c0d1384..50dda88c55 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1557,7 +1557,7 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
(method->GetAccessFlags() & kAccConstructor) != 0 &&
(method->GetAccessFlags() & kAccStatic) != 0;
const bool method_executed = is_clinit ||
- info_->ContainsMethod(MethodReference(dex_file, method_id->GetIndex()));
+ info_->IsStartupOrHotMethod(MethodReference(dex_file, method_id->GetIndex()));
if (!method_executed) {
continue;
}
@@ -1665,8 +1665,9 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
enum CodeItemKind {
kMethodNotExecuted = 0,
- kMethodExecuted = 1,
- kSize = 2,
+ kMethodClinit = 1,
+ kMethodExecuted = 2,
+ kSize = 3,
};
static constexpr InvokeType invoke_types[] = {
@@ -1694,26 +1695,28 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
continue;
}
// Separate executed methods (clinits and profiled methods) from unexecuted methods.
- // TODO: clinits are executed only once, consider separating them further.
const bool is_clinit = is_profile_class &&
(method->GetAccessFlags() & kAccConstructor) != 0 &&
(method->GetAccessFlags() & kAccStatic) != 0;
- const bool is_method_executed = is_clinit ||
- info_->ContainsMethod(MethodReference(dex_file, method_id->GetIndex()));
- code_items[is_method_executed
- ? CodeItemKind::kMethodExecuted
- : CodeItemKind::kMethodNotExecuted]
- .insert(code_item);
+ const bool is_method_executed =
+ info_->IsStartupOrHotMethod(MethodReference(dex_file, method_id->GetIndex()));
+ CodeItemKind code_item_kind = CodeItemKind::kMethodNotExecuted;
+ if (is_clinit) {
+ code_item_kind = CodeItemKind::kMethodClinit;
+ } else if (is_method_executed) {
+ code_item_kind = CodeItemKind::kMethodExecuted;
+ }
+ code_items[code_item_kind].insert(code_item);
}
}
}
- // total_diff includes diffs generated by both executed and non-executed methods.
+ // Total_diff includes diffs generated by clinits, executed, and non-executed methods.
int32_t total_diff = 0;
// The relative placement has no effect on correctness; it is used to ensure
// the layout is deterministic
for (std::unordered_set<dex_ir::CodeItem*>& code_items_set : code_items) {
- // diff is reset for executed and non-executed methods.
+ // diff is reset for each class of code items.
int32_t diff = 0;
for (dex_ir::ClassData* data : new_class_data_order) {
data->SetOffset(data->GetOffset() + diff);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 1d09a7f72a..6fe8eeb66e 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -341,18 +341,30 @@ class DexLayoutTest : public CommonRuntimeTest {
if ((i & 3) != 0) {
pfi.AddMethodIndex(dex_location,
dex_file->GetLocationChecksum(),
- i);
+ i,
+ dex_file->NumMethodIds());
+ ++profile_methods;
+ } else if ((i & 2) != 0) {
+ pfi.AddSampledMethod(/*startup*/true,
+ dex_location,
+ dex_file->GetLocationChecksum(),
+ i,
+ dex_file->NumMethodIds());
++profile_methods;
}
}
DexCacheResolvedClasses cur_classes(dex_location,
dex_location,
- dex_file->GetLocationChecksum());
+ dex_file->GetLocationChecksum(),
+ dex_file->NumMethodIds());
// Add every even class too.
for (uint32_t i = 0; i < dex_file->NumClassDefs(); i += 1) {
- cur_classes.AddClass(dex_file->GetClassDef(i).class_idx_);
- ++profile_classes;
+ if ((i & 2) == 0) {
+ cur_classes.AddClass(dex_file->GetClassDef(i).class_idx_);
+ ++profile_classes;
+ }
}
+ classes.insert(cur_classes);
}
pfi.AddMethodsAndClasses(pmis, classes);
// Write to provided file.
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 8894cc9899..91203cb9f9 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -438,10 +438,16 @@ static const MipsInstruction gMipsInstructions[] = {
{ kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x10, "ave_u", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x10, "aver_s", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x10, "aver_u", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0xe, "max_s", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x3 << 23) | 0xe, "max_u", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0xe, "min_s", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0xe, "min_u", "Vkmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x0 << 22) | 0x1b, "fadd", "Ukmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x1b, "fsub", "Ukmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x2 << 22) | 0x1b, "fmul", "Ukmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x1b, "fdiv", "Ukmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0xe << 22) | 0x1b, "fmax", "Ukmn" },
+ { kMsaMask | (0xf << 22), kMsa | (0xc << 22) | 0x1b, "fmin", "Ukmn" },
{ kMsaMask | (0x1ff << 17), kMsa | (0x19e << 17) | 0x1e, "ffint_s", "ukm" },
{ kMsaMask | (0x1ff << 17), kMsa | (0x19c << 17) | 0x1e, "ftint_s", "ukm" },
{ kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0xd, "sll", "Vkmn" },
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 0d46b2ea7a..c948d3cbe2 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -28,6 +28,7 @@
#include "runtime/utils.h"
#include "runtime/gc/space/image_space.h"
#include "runtime/gc/heap.h"
+#include "runtime/runtime.h"
#include <sys/types.h>
#include <unistd.h>
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index f07e0f9941..9b95de2fb0 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -63,6 +63,7 @@
#include "safe_map.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
+#include "stack.h"
#include "stack_map.h"
#include "string_reference.h"
#include "thread_list.h"
@@ -129,8 +130,8 @@ class OatSymbolizer FINAL {
if (elf_file == nullptr) {
return false;
}
- std::unique_ptr<BufferedOutputStream> output_stream(
- MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file.get())));
+ std::unique_ptr<BufferedOutputStream> output_stream =
+ std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(elf_file.get()));
builder_.reset(new ElfBuilder<ElfTypes>(isa, features.get(), output_stream.get()));
builder_->Start();
@@ -170,6 +171,7 @@ class OatSymbolizer FINAL {
rodata_size,
text_size,
oat_file_->BssSize(),
+ oat_file_->BssMethodsOffset(),
oat_file_->BssRootsOffset());
builder_->WriteDynamicSection();
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index e750ede8fa..ec3481b622 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -40,6 +40,7 @@
#include "elf_file_impl.h"
#include "gc/space/image_space.h"
#include "image-inl.h"
+#include "intern_table.h"
#include "mirror/dex_cache.h"
#include "mirror/executable.h"
#include "mirror/object-inl.h"
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 1c328987cb..ccf9ac6ad5 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -30,6 +30,8 @@
namespace art {
+static constexpr size_t kMaxMethodIds = 65535;
+
class ProfileAssistantTest : public CommonRuntimeTest {
public:
void PostRuntimeCreate() OVERRIDE {
@@ -56,15 +58,18 @@ class ProfileAssistantTest : public CommonRuntimeTest {
GetOfflineProfileMethodInfo(dex_location1, dex_location_checksum1,
dex_location2, dex_location_checksum2);
if (reverse_dex_write_order) {
- ASSERT_TRUE(info->AddMethod(dex_location2, dex_location_checksum2, i, pmi));
- ASSERT_TRUE(info->AddMethod(dex_location1, dex_location_checksum1, i, pmi));
+ ASSERT_TRUE(info->AddMethod(dex_location2, dex_location_checksum2, i, kMaxMethodIds, pmi));
+ ASSERT_TRUE(info->AddMethod(dex_location1, dex_location_checksum1, i, kMaxMethodIds, pmi));
} else {
- ASSERT_TRUE(info->AddMethod(dex_location1, dex_location_checksum1, i, pmi));
- ASSERT_TRUE(info->AddMethod(dex_location2, dex_location_checksum2, i, pmi));
+ ASSERT_TRUE(info->AddMethod(dex_location1, dex_location_checksum1, i, kMaxMethodIds, pmi));
+ ASSERT_TRUE(info->AddMethod(dex_location2, dex_location_checksum2, i, kMaxMethodIds, pmi));
}
}
for (uint16_t i = 0; i < number_of_classes; i++) {
- ASSERT_TRUE(info->AddClassIndex(dex_location1, dex_location_checksum1, dex::TypeIndex(i)));
+ ASSERT_TRUE(info->AddClassIndex(dex_location1,
+ dex_location_checksum1,
+ dex::TypeIndex(i),
+ kMaxMethodIds));
}
ASSERT_TRUE(info->Save(GetFd(profile)));
@@ -72,6 +77,29 @@ class ProfileAssistantTest : public CommonRuntimeTest {
ASSERT_TRUE(profile.GetFile()->ResetOffset());
}
+ void SetupBasicProfile(const std::string& id,
+ uint32_t checksum,
+ uint16_t number_of_methods,
+ const std::vector<uint32_t> hot_methods,
+ const std::vector<uint32_t> startup_methods,
+ const std::vector<uint32_t> post_startup_methods,
+ const ScratchFile& profile,
+ ProfileCompilationInfo* info) {
+ std::string dex_location = "location1" + id;
+ for (uint32_t idx : hot_methods) {
+ info->AddMethodIndex(dex_location, checksum, idx, number_of_methods);
+ }
+ for (uint32_t idx : startup_methods) {
+ info->AddSampledMethod(/*startup*/true, dex_location, checksum, idx, number_of_methods);
+ }
+ for (uint32_t idx : post_startup_methods) {
+ info->AddSampledMethod(/*startup*/false, dex_location, checksum, idx, number_of_methods);
+ }
+ ASSERT_TRUE(info->Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ }
+
// Creates an inline cache which will be destructed at the end of the test.
ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
@@ -84,8 +112,8 @@ class ProfileAssistantTest : public CommonRuntimeTest {
const std::string& dex_location2, uint32_t dex_checksum2) {
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back(dex_location1, dex_checksum1);
- pmi.dex_references.emplace_back(dex_location2, dex_checksum2);
+ pmi.dex_references.emplace_back(dex_location1, dex_checksum1, kMaxMethodIds);
+ pmi.dex_references.emplace_back(dex_location2, dex_checksum2, kMaxMethodIds);
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
@@ -193,28 +221,42 @@ class ProfileAssistantTest : public CommonRuntimeTest {
return true;
}
- bool DumpClassesAndMethods(const std::string& filename, std::string* file_contents) {
- ScratchFile class_names_file;
+ bool RunProfman(const std::string& filename,
+ std::vector<std::string>& extra_args,
+ std::string* output) {
+ ScratchFile output_file;
std::string profman_cmd = GetProfmanCmd();
std::vector<std::string> argv_str;
argv_str.push_back(profman_cmd);
- argv_str.push_back("--dump-classes-and-methods");
+ argv_str.insert(argv_str.end(), extra_args.begin(), extra_args.end());
argv_str.push_back("--profile-file=" + filename);
argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
argv_str.push_back("--dex-location=" + GetLibCoreDexFileNames()[0]);
- argv_str.push_back("--dump-output-to-fd=" + std::to_string(GetFd(class_names_file)));
+ argv_str.push_back("--dump-output-to-fd=" + std::to_string(GetFd(output_file)));
std::string error;
EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
- File* file = class_names_file.GetFile();
+ File* file = output_file.GetFile();
EXPECT_EQ(0, file->Flush());
EXPECT_TRUE(file->ResetOffset());
int64_t length = file->GetLength();
std::unique_ptr<char[]> buf(new char[length]);
EXPECT_EQ(file->Read(buf.get(), length, 0), length);
- *file_contents = std::string(buf.get(), length);
+ *output = std::string(buf.get(), length);
return true;
}
+ bool DumpClassesAndMethods(const std::string& filename, std::string* file_contents) {
+ std::vector<std::string> extra_args;
+ extra_args.push_back("--dump-classes-and-methods");
+ return RunProfman(filename, extra_args, file_contents);
+ }
+
+ bool DumpOnly(const std::string& filename, std::string* file_contents) {
+ std::vector<std::string> extra_args;
+ extra_args.push_back("--dump-only");
+ return RunProfman(filename, extra_args, file_contents);
+ }
+
bool CreateAndDump(const std::string& input_file_contents,
std::string* output_file_contents) {
ScratchFile profile_file;
@@ -520,10 +562,11 @@ TEST_F(ProfileAssistantTest, TestProfileGenerationWithIndexDex) {
TEST_F(ProfileAssistantTest, TestProfileCreationAllMatch) {
// Class names put here need to be in sorted order.
std::vector<std::string> class_names = {
+ "HLjava/lang/Object;-><init>()V",
"Ljava/lang/Comparable;",
"Ljava/lang/Math;",
"Ljava/lang/Object;",
- "Ljava/lang/Object;-><init>()V"
+ "SPLjava/lang/Comparable;->compareTo(Ljava/lang/Object;)I",
};
std::string file_contents;
for (std::string& class_name : class_names) {
@@ -807,15 +850,80 @@ TEST_F(ProfileAssistantTest, TestProfileCreateWithInvalidData) {
// Verify that the start-up classes contain the invalid class.
std::set<dex::TypeIndex> classes;
- std::set<uint16_t> methods;
- ASSERT_TRUE(info.GetClassesAndMethods(*dex_file, &classes, &methods));
+ std::set<uint16_t> hot_methods;
+ std::set<uint16_t> startup_methods;
+ std::set<uint16_t> post_start_methods;
+ ASSERT_TRUE(info.GetClassesAndMethods(*dex_file,
+ &classes,
+ &hot_methods,
+ &startup_methods,
+ &post_start_methods));
ASSERT_EQ(1u, classes.size());
ASSERT_TRUE(classes.find(invalid_class_index) != classes.end());
// Verify that the invalid method is in the profile.
- ASSERT_EQ(2u, methods.size());
+ ASSERT_EQ(2u, hot_methods.size());
uint16_t invalid_method_index = std::numeric_limits<uint16_t>::max() - 1;
- ASSERT_TRUE(methods.find(invalid_method_index) != methods.end());
+ ASSERT_TRUE(hot_methods.find(invalid_method_index) != hot_methods.end());
+}
+
+TEST_F(ProfileAssistantTest, DumpOnly) {
+ ScratchFile profile;
+
+ const uint32_t kNumberOfMethods = 64;
+ std::vector<uint32_t> hot_methods;
+ std::vector<uint32_t> startup_methods;
+ std::vector<uint32_t> post_startup_methods;
+ for (size_t i = 0; i < kNumberOfMethods; ++i) {
+ if (i % 2 == 0) {
+ hot_methods.push_back(i);
+ }
+ if (i % 3 == 1) {
+ startup_methods.push_back(i);
+ }
+ if (i % 4 == 2) {
+ post_startup_methods.push_back(i);
+ }
+ }
+ EXPECT_GT(hot_methods.size(), 0u);
+ EXPECT_GT(startup_methods.size(), 0u);
+ EXPECT_GT(post_startup_methods.size(), 0u);
+ ProfileCompilationInfo info1;
+ SetupBasicProfile("p1",
+ 1,
+ kNumberOfMethods,
+ hot_methods,
+ startup_methods,
+ post_startup_methods,
+ profile,
+ &info1);
+ std::string output;
+ DumpOnly(profile.GetFilename(), &output);
+ const size_t hot_offset = output.find("hot methods:");
+ const size_t startup_offset = output.find("startup methods:");
+ const size_t post_startup_offset = output.find("post startup methods:");
+ const size_t classes_offset = output.find("classes:");
+ ASSERT_NE(hot_offset, std::string::npos);
+ ASSERT_NE(startup_offset, std::string::npos);
+ ASSERT_NE(post_startup_offset, std::string::npos);
+ ASSERT_LT(hot_offset, startup_offset);
+ ASSERT_LT(startup_offset, post_startup_offset);
+ // Check the actual contents of the dump by looking at the offsets of the methods.
+ for (uint32_t m : hot_methods) {
+ const size_t pos = output.find(std::to_string(m) + "[],", hot_offset);
+ ASSERT_NE(pos, std::string::npos);
+ EXPECT_LT(pos, startup_offset);
+ }
+ for (uint32_t m : startup_methods) {
+ const size_t pos = output.find(std::to_string(m) + ",", startup_offset);
+ ASSERT_NE(pos, std::string::npos);
+ EXPECT_LT(pos, post_startup_offset);
+ }
+ for (uint32_t m : post_startup_methods) {
+ const size_t pos = output.find(std::to_string(m) + ",", post_startup_offset);
+ ASSERT_NE(pos, std::string::npos);
+ EXPECT_LT(pos, classes_offset);
+ }
}
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index afc21057b1..adef0d0332 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -43,6 +43,7 @@
#include "runtime.h"
#include "type_reference.h"
#include "utils.h"
+#include "type_reference.h"
#include "zip_archive.h"
namespace art {
@@ -150,6 +151,9 @@ static const std::string kClassAllMethods = "*";
static constexpr char kProfileParsingInlineChacheSep = '+';
static constexpr char kProfileParsingTypeSep = ',';
static constexpr char kProfileParsingFirstCharInSignature = '(';
+static constexpr char kMethodFlagStringHot = 'H';
+static constexpr char kMethodFlagStringStartup = 'S';
+static constexpr char kMethodFlagStringPostStartup = 'P';
// TODO(calin): This class has grown too much from its initial design. Split the functionality
// into smaller, more contained pieces.
@@ -426,18 +430,42 @@ class ProfMan FINAL {
}
for (const std::unique_ptr<const DexFile>& dex_file : *dex_files) {
std::set<dex::TypeIndex> class_types;
- std::set<uint16_t> methods;
- if (profile_info.GetClassesAndMethods(*dex_file.get(), &class_types, &methods)) {
+ std::set<uint16_t> hot_methods;
+ std::set<uint16_t> startup_methods;
+ std::set<uint16_t> post_startup_methods;
+ std::set<uint16_t> combined_methods;
+ if (profile_info.GetClassesAndMethods(*dex_file.get(),
+ &class_types,
+ &hot_methods,
+ &startup_methods,
+ &post_startup_methods)) {
for (const dex::TypeIndex& type_index : class_types) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(type_index);
out_lines->insert(std::string(dex_file->GetTypeDescriptor(type_id)));
}
- for (uint16_t dex_method_idx : methods) {
+ combined_methods = hot_methods;
+ combined_methods.insert(startup_methods.begin(), startup_methods.end());
+ combined_methods.insert(post_startup_methods.begin(), post_startup_methods.end());
+ for (uint16_t dex_method_idx : combined_methods) {
const DexFile::MethodId& id = dex_file->GetMethodId(dex_method_idx);
std::string signature_string(dex_file->GetMethodSignature(id).ToString());
std::string type_string(dex_file->GetTypeDescriptor(dex_file->GetTypeId(id.class_idx_)));
std::string method_name(dex_file->GetMethodName(id));
- out_lines->insert(type_string + kMethodSep + method_name + signature_string);
+ std::string flags_string;
+ if (hot_methods.find(dex_method_idx) != hot_methods.end()) {
+ flags_string += kMethodFlagStringHot;
+ }
+ if (startup_methods.find(dex_method_idx) != startup_methods.end()) {
+ flags_string += kMethodFlagStringStartup;
+ }
+ if (post_startup_methods.find(dex_method_idx) != post_startup_methods.end()) {
+ flags_string += kMethodFlagStringPostStartup;
+ }
+ out_lines->insert(flags_string +
+ type_string +
+ kMethodSep +
+ method_name +
+ signature_string);
}
}
}
@@ -461,7 +489,7 @@ class ProfMan FINAL {
return true;
}
- int DumpClasses() {
+ int DumpClassesAndMethods() {
// Validate that at least one profile file or reference was specified.
if (profile_files_.empty() && profile_files_fd_.empty() &&
reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
@@ -694,11 +722,30 @@ class ProfMan FINAL {
/*out*/ProfileCompilationInfo* profile) {
std::string klass;
std::string method_str;
- size_t method_sep_index = line.find(kMethodSep);
+ bool is_hot = false;
+ bool is_startup = false;
+ bool is_post_startup = false;
+ const size_t method_sep_index = line.find(kMethodSep, 0);
if (method_sep_index == std::string::npos) {
- klass = line;
+ klass = line.substr(0);
} else {
- klass = line.substr(0, method_sep_index);
+ // The method prefix flags are only valid for method strings.
+ size_t start_index = 0;
+ while (start_index < line.size() && line[start_index] != 'L') {
+ const char c = line[start_index];
+ if (c == kMethodFlagStringHot) {
+ is_hot = true;
+ } else if (c == kMethodFlagStringStartup) {
+ is_startup = true;
+ } else if (c == kMethodFlagStringPostStartup) {
+ is_post_startup = true;
+ } else {
+ LOG(WARNING) << "Invalid flag " << c;
+ return false;
+ }
+ ++start_index;
+ }
+ klass = line.substr(start_index, method_sep_index - start_index);
method_str = line.substr(method_sep_index + kMethodSep.size());
}
@@ -715,7 +762,8 @@ class ProfMan FINAL {
const auto& dex_resolved_classes = resolved_class_set.emplace(
dex_file->GetLocation(),
dex_file->GetBaseLocation(),
- dex_file->GetLocationChecksum());
+ dex_file->GetLocationChecksum(),
+ dex_file->NumMethodIds());
dex_resolved_classes.first->AddClass(class_ref.type_index);
std::vector<ProfileMethodInfo> methods;
if (method_str == kClassAllMethods) {
@@ -745,6 +793,9 @@ class ProfMan FINAL {
std::string method_spec;
std::vector<std::string> inline_cache_elems;
+ // If none of the flags are set, default to hot.
+ is_hot = is_hot || (!is_hot && !is_startup && !is_post_startup);
+
std::vector<std::string> method_elems;
bool is_missing_types = false;
Split(method_str, kProfileParsingInlineChacheSep, &method_elems);
@@ -766,7 +817,6 @@ class ProfMan FINAL {
return false;
}
- std::vector<ProfileMethodInfo> pmi;
std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
if (is_missing_types || !inline_cache_elems.empty()) {
uint32_t dex_pc;
@@ -783,8 +833,29 @@ class ProfMan FINAL {
}
inline_caches.emplace_back(dex_pc, is_missing_types, classes);
}
- pmi.emplace_back(class_ref.dex_file, method_index, inline_caches);
- profile->AddMethodsAndClasses(pmi, std::set<DexCacheResolvedClasses>());
+ ProfileMethodInfo pmi(class_ref.dex_file, method_index, inline_caches);
+ if (is_hot) {
+ profile->AddMethod(pmi);
+ }
+ if (is_startup) {
+ if (!profile->AddSampledMethod(/*is_startup*/ true,
+ pmi.dex_file->GetLocation(),
+ pmi.dex_file->GetLocationChecksum(),
+ method_index,
+ pmi.dex_file->NumMethodIds())) {
+ return false;
+ }
+ DCHECK(profile->IsStartupOrHotMethod(MethodReference(pmi.dex_file, method_index)));
+ }
+ if (is_post_startup) {
+ if (!profile->AddSampledMethod(/*is_startup*/ false,
+ pmi.dex_file->GetLocation(),
+ pmi.dex_file->GetLocationChecksum(),
+ method_index,
+ pmi.dex_file->NumMethodIds())) {
+ return false;
+ }
+ }
return true;
}
@@ -959,7 +1030,7 @@ static int profman(int argc, char** argv) {
return profman.DumpProfileInfo();
}
if (profman.ShouldOnlyDumpClassesAndMethods()) {
- return profman.DumpClasses();
+ return profman.DumpClassesAndMethods();
}
if (profman.ShouldCreateProfile()) {
return profman.CreateProfile();
diff --git a/runtime/Android.bp b/runtime/Android.bp
index aa7dc65871..26e52e012e 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -54,6 +54,7 @@ cc_defaults {
"compiler_filter.cc",
"debugger.cc",
"dex_file.cc",
+ "dex_file_tracking_registrar.cc",
"dex_file_annotations.cc",
"dex_file_verifier.cc",
"dex_instruction.cc",
@@ -123,6 +124,7 @@ cc_defaults {
"jni_internal.cc",
"jobject_comparator.cc",
"linear_alloc.cc",
+ "managed_stack.cc",
"mem_map.cc",
"memory_region.cc",
"method_handles.cc",
@@ -243,7 +245,6 @@ cc_defaults {
"entrypoints/quick/quick_entrypoints_enum.cc",
"entrypoints/quick/quick_field_entrypoints.cc",
"entrypoints/quick/quick_fillarray_entrypoints.cc",
- "entrypoints/quick/quick_instrumentation_entrypoints.cc",
"entrypoints/quick/quick_jni_entrypoints.cc",
"entrypoints/quick/quick_lock_entrypoints.cc",
"entrypoints/quick/quick_math_entrypoints.cc",
@@ -427,6 +428,7 @@ gensrcs {
srcs: [
"arch/instruction_set.h",
"base/allocator.h",
+ "base/callee_save_type.h",
"base/enums.h",
"base/mutex.h",
"debugger.h",
@@ -454,7 +456,6 @@ gensrcs {
"oat.h",
"object_callbacks.h",
"process_state.h",
- "runtime.h",
"stack.h",
"thread.h",
"thread_state.h",
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d6056c0ece..838ae40838 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -17,11 +17,30 @@
#include <stdint.h>
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "common_runtime_test.h"
#include "quick/quick_method_frame_info.h"
-// Common tests are declared next to the constants.
-#define ADD_TEST_EQ(x, y) EXPECT_EQ(x, y);
-#include "asm_support.h"
+
+
+// asm_support.h declares tests next to the #defines. We use asm_support_check.h to (safely)
+// generate CheckAsmSupportOffsetsAndSizes using gtest's EXPECT for the tests. We also use the
+// RETURN_TYPE, HEADER and FOOTER defines from asm_support_check.h to try to ensure that any
+// tests are actually generated.
+
+// Let CheckAsmSupportOffsetsAndSizes return a size_t (the count).
+#define ASM_SUPPORT_CHECK_RETURN_TYPE size_t
+
+// Declare the counter that will be updated per test.
+#define ASM_SUPPORT_CHECK_HEADER size_t count = 0;
+
+// Use EXPECT_EQ for tests, and increment the counter.
+#define ADD_TEST_EQ(x, y) EXPECT_EQ(x, y); count++;
+
+// Return the counter at the end of CheckAsmSupportOffsetsAndSizes.
+#define ASM_SUPPORT_CHECK_FOOTER return count;
+
+// Generate CheckAsmSupportOffsetsAndSizes().
+#include "asm_support_check.h"
namespace art {
@@ -40,7 +59,7 @@ class ArchTest : public CommonRuntimeTest {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
- static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
+ static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
Runtime* const runtime = Runtime::Current();
Thread* const self = Thread::Current();
@@ -57,7 +76,8 @@ class ArchTest : public CommonRuntimeTest {
};
TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
- CheckAsmSupportOffsetsAndSizes();
+ size_t test_count = CheckAsmSupportOffsetsAndSizes();
+ EXPECT_GT(test_count, 0u);
}
// Grab architecture specific constants.
@@ -151,16 +171,16 @@ static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
#define TEST_ARCH(Arch, arch) \
TEST_F(ArchTest, Arch) { \
CheckFrameSize(InstructionSet::k##Arch, \
- Runtime::kSaveAllCalleeSaves, \
+ CalleeSaveType::kSaveAllCalleeSaves, \
arch::kFrameSizeSaveAllCalleeSaves); \
CheckFrameSize(InstructionSet::k##Arch, \
- Runtime::kSaveRefsOnly, \
+ CalleeSaveType::kSaveRefsOnly, \
arch::kFrameSizeSaveRefsOnly); \
CheckFrameSize(InstructionSet::k##Arch, \
- Runtime::kSaveRefsAndArgs, \
+ CalleeSaveType::kSaveRefsAndArgs, \
arch::kFrameSizeSaveRefsAndArgs); \
CheckFrameSize(InstructionSet::k##Arch, \
- Runtime::kSaveEverything, \
+ CalleeSaveType::kSaveEverything, \
arch::kFrameSizeSaveEverything); \
}
TEST_ARCH(Arm, arm)
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 9cbec1e5bc..0db14fb8a5 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -17,8 +17,9 @@
#include "context_arm.h"
#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "quick/quick_method_frame_info.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace arm {
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 919b0afc40..8a8d26466f 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -18,6 +18,7 @@
#include <string.h>
#include "arch/arm/asm_support_arm.h"
+#include "base/bit_utils.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 4c15450ff7..b4bca014f4 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -25,7 +25,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "globals.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
//
// ARM specific fault handler functions.
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 31a7f6ae8e..676efc4a77 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1112,7 +1112,10 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc).
-.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+//
+// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
+// If isInitialized=0 the compiler can only assume it's been at least resolved.
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
ENTRY \c_name
// Fast path rosalloc allocation.
// r0: type/return value, r9: Thread::Current
@@ -1128,6 +1131,11 @@ ENTRY \c_name
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
+ // When isInitialized == 0, then the class is potentially not yet initialized.
+ // If the class is not yet initialized, the object size will be very large to force the branch
+ // below to be taken.
+ //
+ // See InitializeClassVisitors in class-inl.h for more details.
bhs .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size. Since the size is
@@ -1157,18 +1165,6 @@ ENTRY \c_name
#endif
POISON_HEAP_REF r0
str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
- // Fence. This is "ish" not "ishst" so
- // that it also ensures ordering of
- // the class status load with respect
- // to later accesses to the class
- // object. Alternatively we could use
- // "ishst" if we use load-acquire for
- // the object size load.
- // Needs to be done before pushing on
- // allocation since Heap::VisitObjects
- // relies on seeing the class pointer.
- // b/28790624
- dmb ish
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
@@ -1177,6 +1173,28 @@ ENTRY \c_name
str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.)
str r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
// Decrement the size of the free list
+
+ // After this "STR" the object is published to the thread local allocation stack,
+ // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view.
+ // It is not yet visible to the running (user) compiled code until after the return.
+ //
+ // To avoid the memory barrier prior to the "STR", a trick is employed, by differentiating
+ // the state of the allocation stack slot. It can be a pointer to one of:
+ // 0) Null entry, because the stack was bumped but the new pointer wasn't written yet.
+ // (The stack initial state is "null" pointers).
+ // 1) A partially valid object, with an invalid class pointer to the next free rosalloc slot.
+ // 2) A fully valid object, with a valid class pointer pointing to a real class.
+ // Other states are not allowed.
+ //
+ // An object that is invalid only temporarily, and will eventually become valid.
+ // The internal runtime code simply checks if the object is not null or is partial and then
+ // ignores it.
+ //
+ // (Note: The actual check is done by seeing if a non-null object has a class pointer pointing
+ // to ClassClass, and that the ClassClass's class pointer is self-cyclic. A rosalloc free slot
+ // "next" pointer is not-cyclic.)
+ //
+ // See also b/28790624 for a listing of CLs dealing with this race.
ldr r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
sub r1, #1
// TODO: consider combining this store
@@ -1185,6 +1203,19 @@ ENTRY \c_name
str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
mov r0, r3 // Set the return value and return.
+.if \isInitialized == 0
+ // This barrier is only necessary when the allocation also requires
+ // a class initialization check.
+ //
+ // If the class is already observably initialized, then new-instance allocations are protected
+ // from publishing by the compiler which inserts its own StoreStore barrier.
+ dmb ish
+ // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
+ // they should happen-after the implicit initialization check.
+ //
+ // TODO: Remove this dmb for class initialization checks (b/36692143) by introducing
+ // a new observably-initialized class state.
+.endif
bx lr
.Lslow_path\c_name:
@@ -1196,18 +1227,21 @@ ENTRY \c_name
END \c_name
.endm
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1
// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
// and art_quick_alloc_object_resolved/initialized_region_tlab.
//
// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
// Need to preserve r0 to the slow path.
-.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel
- // Load thread_local_pos (r12) and
- // thread_local_end (r3) with ldrd.
- // Check constraints for ldrd.
+//
+// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
+// If isInitialized=0 the compiler can only assume it's been at least resolved.
+.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel isInitialized
+ // Load thread_local_pos (r12) and
+ // thread_local_end (r3) with ldrd.
+ // Check constraints for ldrd.
#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
@@ -1215,6 +1249,11 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
sub r12, r3, r12 // Compute the remaining buf size.
ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
cmp r3, r12 // Check if it fits.
+ // When isInitialized == 0, then the class is potentially not yet initialized.
+ // If the class is not yet initialized, the object size will be very large to force the branch
+ // below to be taken.
+ //
+ // See InitializeClassVisitors in class-inl.h for more details.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
// Reload old thread_local_pos (r0)
@@ -1222,6 +1261,23 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
ldr r2, [r9, #THREAD_LOCAL_POS_OFFSET]
add r1, r2, r3
str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ // After this "STR" the object is published to the thread local allocation stack,
+ // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view.
+ // It is not yet visible to the running (user) compiled code until after the return.
+ //
+ // To avoid the memory barrier prior to the "STR", a trick is employed, by differentiating
+ // the state of the object. It can be either:
+ // 1) A partially valid object, with a null class pointer
+ // (because the initial state of TLAB buffers is all 0s/nulls).
+ // 2) A fully valid object, with a valid class pointer pointing to a real class.
+ // Other states are not allowed.
+ //
+ // An object that is invalid only temporarily, and will eventually become valid.
+ // The internal runtime code simply checks if the object is not null or is partial and then
+ // ignores it.
+ //
+ // (Note: The actual check is done by checking that the object's class pointer is non-null.
+ // Also, unlike rosalloc, the object can never be observed as null).
ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r1, r1, #1
str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
@@ -1231,21 +1287,29 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
// that the code after this allocation
// site will see the right values in
// the fields of the class.
- // Alternatively we could use "ishst"
- // if we use load-acquire for the
- // object size load.)
mov r0, r2
+.if \isInitialized == 0
+ // This barrier is only necessary when the allocation also requires
+ // a class initialization check.
+ //
+ // If the class is already observably initialized, then new-instance allocations are protected
+ // from publishing by the compiler which inserts its own StoreStore barrier.
dmb ish
+ // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
+ // they should happen-after the implicit initialization check.
+ //
+ // TODO: Remove dmb for class initialization checks (b/36692143)
+.endif
bx lr
.endm
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized
ENTRY \name
// Fast path tlab allocation.
// r0: type, r9: Thread::Current
// r1, r2, r3, r12: free.
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name, \isInitialized
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
mov r1, r9 // Pass Thread::Current.
@@ -1255,10 +1319,10 @@ ENTRY \name
END \name
.endm
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1
// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
@@ -1279,6 +1343,8 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
ldrd r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
sub r12, r12, r3 // Compute the remaining buf size.
cmp r2, r12 // Check if the total_size fits.
+ // The array class is always initialized here. Unlike new-instance,
+ // this does not act as a double test.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
add r2, r2, r3
@@ -1293,11 +1359,13 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
// that the code after this allocation
// site will see the right values in
// the fields of the class.
- // Alternatively we could use "ishst"
- // if we use load-acquire for the
- // object size load.)
mov r0, r3
- dmb ish
+// new-array is special. The class is loaded and immediately goes to the Initialized state
+// before it is published. Therefore the only fence needed is for the publication of the object.
+// See ClassLinker::CreateArrayClass() for more details.
+
+// For publication of the new array, we don't need a 'dmb ishst' here.
+// The compiler generates 'dmb ishst' for all new-array insts.
bx lr
.endm
@@ -1630,8 +1698,10 @@ ENTRY art_quick_instrumentation_entry
@ preserve r0 (not normally an arg) knowing there is a spare slot in kSaveRefsAndArgs.
str r0, [sp, #4]
mov r2, r9 @ pass Thread::Current
- mov r3, lr @ pass LR
- blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, LR)
+ mov r3, sp @ pass SP
+ blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP)
+ cbz r0, .Ldeliver_instrumentation_entry_exception
+ @ Deliver exception if we got nullptr as function.
mov r12, r0 @ r12 holds reference to code
ldr r0, [sp, #4] @ restore r0
RESTORE_SAVE_REFS_AND_ARGS_FRAME
@@ -1647,19 +1717,13 @@ art_quick_instrumentation_exit:
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
+ mov r2, sp @ store gpr_res pointer.
vpush {d0} @ save fp return value
.cfi_adjust_cfa_offset 8
- sub sp, #8 @ space for return value argument. Note: AAPCS stack alignment is 8B, no
- @ need to align by 16.
- .cfi_adjust_cfa_offset 8
- vstr d0, [sp] @ d0 -> [sp] for fpr_res
- mov r2, r0 @ pass return value as gpr_res
- mov r3, r1
- mov r0, r9 @ pass Thread::Current
+ mov r3, sp @ store fpr_res pointer
mov r1, r12 @ pass SP
- blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res, fpr_res)
- add sp, #8
- .cfi_adjust_cfa_offset -8
+ mov r0, r9 @ pass Thread::Current
+ blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res*, fpr_res*)
mov r2, r0 @ link register saved by instrumentation
mov lr, r1 @ r1 is holding link register if we're to bounce to deoptimize
@@ -1669,9 +1733,16 @@ art_quick_instrumentation_exit:
.cfi_adjust_cfa_offset -8
.cfi_restore r0
.cfi_restore r1
- add sp, #32 @ remove callee save frame
- .cfi_adjust_cfa_offset -32
- bx r2 @ return
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ cbz r2, .Ldo_deliver_instrumentation_exception
+ @ Deliver exception if we got nullptr as function.
+ bx r2 @ Otherwise, return
+.Ldeliver_instrumentation_entry_exception:
+ @ Deliver exception for art_quick_instrumentation_entry placed after
+ @ art_quick_instrumentation_exit so that the fallthrough works.
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+.Ldo_deliver_instrumentation_exception:
+ DELIVER_PENDING_EXCEPTION
END art_quick_instrumentation_entry
/*
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 35f1948138..39061f0d4d 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -17,10 +17,12 @@
#ifndef ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
#define ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
#include "quick/quick_method_frame_info.h"
#include "registers_arm.h"
-#include "runtime.h" // for Runtime::CalleeSaveType.
namespace art {
namespace arm {
@@ -53,44 +55,44 @@ static constexpr uint32_t kArmCalleeSaveFpAllSpills =
static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
-constexpr uint32_t ArmCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t ArmCalleeSaveCoreSpills(CalleeSaveType type) {
return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
- (type == Runtime::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
}
-constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t ArmCalleeSaveFpSpills(CalleeSaveType type) {
return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
- (type == Runtime::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
}
-constexpr uint32_t ArmCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+constexpr uint32_t ArmCalleeSaveFrameSize(CalleeSaveType type) {
return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
}
-constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(CalleeSaveType type) {
return QuickMethodFrameInfo(ArmCalleeSaveFrameSize(type),
ArmCalleeSaveCoreSpills(type),
ArmCalleeSaveFpSpills(type));
}
-constexpr size_t ArmCalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
+constexpr size_t ArmCalleeSaveFpr1Offset(CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
(POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
POPCOUNT(ArmCalleeSaveFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
}
-constexpr size_t ArmCalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
+constexpr size_t ArmCalleeSaveGpr1Offset(CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
POPCOUNT(ArmCalleeSaveCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
}
-constexpr size_t ArmCalleeSaveLrOffset(Runtime::CalleeSaveType type) {
+constexpr size_t ArmCalleeSaveLrOffset(CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * static_cast<size_t>(kArmPointerSize);
}
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index d5d1ec7f07..0465c1e79d 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -19,8 +19,9 @@
#include "context_arm64.h"
#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "quick/quick_method_frame_info.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace arm64 {
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 610cdee683..9bbcef307e 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -18,6 +18,7 @@
#include <string.h>
#include "arch/arm64/asm_support_arm64.h"
+#include "base/bit_utils.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index dc4e8f389e..0ead732cdd 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -26,7 +26,7 @@
#include "base/macros.h"
#include "globals.h"
#include "registers_arm64.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_throw_null_pointer_exception_from_signal();
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 18015b572e..ee91277417 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1643,7 +1643,9 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
+// If isInitialized=0 the compiler can only assume it's been at least resolved.
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
ENTRY \c_name
// Fast path rosalloc allocation.
// x0: type, xSELF(x19): Thread::Current
@@ -1659,6 +1661,11 @@ ENTRY \c_name
// local allocation. Also does the
// finalizable and initialization
// checks.
+ // When isInitialized == 0, then the class is potentially not yet initialized.
+ // If the class is not yet initialized, the object size will be very large to force the branch
+ // below to be taken.
+ //
+ // See InitializeClassVisitors in class-inl.h for more details.
bhs .Lslow_path\c_name
// Compute the rosalloc bracket index
// from the size. Since the size is
@@ -1682,23 +1689,12 @@ ENTRY \c_name
// header. This also overwrites the
// next pointer. The offsets are
// asserted to match.
+
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF w0
str w0, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
- // Fence. This is "ish" not "ishst" so
- // that it also ensures ordering of
- // the object size load with respect
- // to later accesses to the class
- // object. Alternatively we could use
- // "ishst" if we use load-acquire for
- // the class status load.
- // Needs to be done before pushing on
- // allocation since Heap::VisitObjects
- // relies on seeing the class pointer.
- // b/28790624
- dmb ish
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
@@ -1707,6 +1703,28 @@ ENTRY \c_name
str w3, [x1], #COMPRESSED_REFERENCE_SIZE // (Increment x1 as a side effect.)
str x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
// Decrement the size of the free list
+
+ // After this "STR" the object is published to the thread local allocation stack,
+ // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view.
+ // It is not yet visible to the running (user) compiled code until after the return.
+ //
+ // To avoid the memory barrier prior to the "STR", a trick is employed, by differentiating
+ // the state of the allocation stack slot. It can be a pointer to one of:
+ // 0) Null entry, because the stack was bumped but the new pointer wasn't written yet.
+ // (The stack initial state is "null" pointers).
+ // 1) A partially valid object, with an invalid class pointer to the next free rosalloc slot.
+ // 2) A fully valid object, with a valid class pointer pointing to a real class.
+ // Other states are not allowed.
+ //
+ // An object that is invalid only temporarily, and will eventually become valid.
+ // The internal runtime code simply checks if the object is not null or is partial and then
+ // ignores it.
+ //
+ // (Note: The actual check is done by seeing if a non-null object has a class pointer pointing
+ // to ClassClass, and that the ClassClass's class pointer is self-cyclic. A rosalloc free slot
+ // "next" pointer is not-cyclic.)
+ //
+ // See also b/28790624 for a listing of CLs dealing with this race.
ldr w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
sub x1, x1, #1
// TODO: consider combining this store
@@ -1715,6 +1733,19 @@ ENTRY \c_name
str w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
mov x0, x3 // Set the return value and return.
+.if \isInitialized == 0
+ // This barrier is only necessary when the allocation also requires
+ // a class initialization check.
+ //
+ // If the class is already observably initialized, then new-instance allocations are protected
+ // from publishing by the compiler which inserts its own StoreStore barrier.
+ dmb ish
+ // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
+ // they should happen-after the implicit initialization check.
+ //
+ // TODO: Remove this dmb for class initialization checks (b/36692143) by introducing
+ // a new observably-initialized class state.
+.endif
ret
.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
@@ -1725,10 +1756,12 @@ ENTRY \c_name
END \c_name
.endm
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1
-.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
+// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
+// If isInitialized=0 the compiler can only assume it's been at least resolved.
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel isInitialized
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
@@ -1736,6 +1769,12 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
cmp x6, x5 // Check if it fits, overflow works
// since the tlab pos and end are 32
// bit values.
+
+ // When isInitialized == 0, then the class is potentially not yet initialized.
+ // If the class is not yet initialized, the object size will be very large to force the branch
+ // below to be taken.
+ //
+ // See InitializeClassVisitors in class-inl.h for more details.
bhi \slowPathLabel
str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
@@ -1747,21 +1786,30 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
// that the code after this allocation
// site will see the right values in
// the fields of the class.
- // Alternatively we could use "ishst"
- // if we use load-acquire for the
- // object size load.)
mov x0, x4
+.if \isInitialized == 0
+ // This barrier is only necessary when the allocation also requires
+ // a class initialization check.
+ //
+ // If the class is already observably initialized, then new-instance allocations are protected
+ // from publishing by the compiler which inserts its own StoreStore barrier.
dmb ish
+ // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
+ // they should happen-after the implicit initialization check.
+ //
+ // TODO: Remove this dmb for class initialization checks (b/36692143) by introducing
+ // a new observably-initialized class state.
+.endif
ret
.endm
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized
ENTRY \name
// Fast path region tlab allocation.
// x0: type, xSELF(x19): Thread::Current
// x1-x7: free.
- ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lslow_path\name
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lslow_path\name, \isInitialized
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
mov x1, xSELF // Pass Thread::Current.
@@ -1771,10 +1819,10 @@ ENTRY \name
END \name
.endm
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
-GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1
.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask
@@ -1796,6 +1844,9 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET]
sub \xTemp2, \xTemp2, \xTemp0
cmp \xTemp1, \xTemp2
+
+ // The array class is always initialized here. Unlike new-instance,
+ // this does not act as a double test.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
// Move old thread_local_pos to x0
@@ -1810,7 +1861,12 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
// Fence.
- dmb ishst
+// new-array is special. The class is loaded and immediately goes to the Initialized state
+// before it is published. Therefore the only fence needed is for the publication of the object.
+// See ClassLinker::CreateArrayClass() for more details.
+
+// For publication of the new array, we don't need a 'dmb ishst' here.
+// The compiler generates 'dmb ishst' for all new-array insts.
ret
.endm
@@ -2168,15 +2224,19 @@ ENTRY art_quick_instrumentation_entry
mov x20, x0 // Preserve method reference in a callee-save.
mov x2, xSELF
- mov x3, xLR
- bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, LR)
+ mov x3, sp // Pass SP
+ bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, SP)
mov xIP0, x0 // x0 = result of call.
mov x0, x20 // Reload method reference.
RESTORE_SAVE_REFS_AND_ARGS_FRAME // Note: will restore xSELF
+ cbz xIP0, 1f // Deliver the pending exception if method is null.
adr xLR, art_quick_instrumentation_exit
br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit.
+
+1:
+ DELIVER_PENDING_EXCEPTION
END art_quick_instrumentation_entry
.extern artInstrumentationMethodExitFromCode
@@ -2185,30 +2245,28 @@ ENTRY art_quick_instrumentation_exit
SETUP_SAVE_REFS_ONLY_FRAME
- // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then
- // we would need to fully restore it. As there are a lot of callee-save registers, it seems
- // easier to have an extra small stack area.
-
str x0, [sp, #-16]! // Save integer result.
.cfi_adjust_cfa_offset 16
- str d0, [sp, #8] // Save floating-point result.
+ str d0, [sp, #8] // Save floating-point result.
+ add x3, sp, #8 // Pass floating-point result pointer.
+ mov x2, sp // Pass integer result pointer.
add x1, sp, #16 // Pass SP.
- mov x2, x0 // Pass integer result.
- fmov x3, d0 // Pass floating-point result.
mov x0, xSELF // Pass Thread.
- bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res)
+ bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res*, fpr_res*)
mov xIP0, x0 // Return address from instrumentation call.
mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize
ldr d0, [sp, #8] // Restore floating-point result.
ldr x0, [sp], #16 // Restore integer result, and drop stack area.
- .cfi_adjust_cfa_offset 16
-
- POP_SAVE_REFS_ONLY_FRAME
+ .cfi_adjust_cfa_offset -16
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ cbz xIP0, 1f // Handle error
br xIP0 // Tail-call out.
+1:
+ DELIVER_PENDING_EXCEPTION
END art_quick_instrumentation_exit
/*
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index 32d9d08739..c231d4d3d4 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -17,10 +17,13 @@
#ifndef ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
#define ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "globals.h"
#include "quick/quick_method_frame_info.h"
#include "registers_arm64.h"
-#include "runtime.h" // for Runtime::CalleeSaveType.
namespace art {
namespace arm64 {
@@ -76,44 +79,44 @@ static constexpr uint32_t kArm64CalleeSaveFpEverythingSpills =
(1 << art::arm64::D27) | (1 << art::arm64::D28) | (1 << art::arm64::D29) |
(1 << art::arm64::D30) | (1 << art::arm64::D31);
-constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t Arm64CalleeSaveCoreSpills(CalleeSaveType type) {
return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
- (type == Runtime::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
}
-constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t Arm64CalleeSaveFpSpills(CalleeSaveType type) {
return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
- (type == Runtime::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
}
-constexpr uint32_t Arm64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+constexpr uint32_t Arm64CalleeSaveFrameSize(CalleeSaveType type) {
return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
}
-constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
return QuickMethodFrameInfo(Arm64CalleeSaveFrameSize(type),
Arm64CalleeSaveCoreSpills(type),
Arm64CalleeSaveFpSpills(type));
}
-constexpr size_t Arm64CalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
+constexpr size_t Arm64CalleeSaveFpr1Offset(CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
(POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
POPCOUNT(Arm64CalleeSaveFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
}
-constexpr size_t Arm64CalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
+constexpr size_t Arm64CalleeSaveGpr1Offset(CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
}
-constexpr size_t Arm64CalleeSaveLrOffset(Runtime::CalleeSaveType type) {
+constexpr size_t Arm64CalleeSaveLrOffset(CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) *
static_cast<size_t>(kArm64PointerSize);
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 98ed5e60e6..ca1de0ae2a 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -17,6 +17,7 @@
#include "context_mips.h"
#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "quick/quick_method_frame_info.h"
namespace art {
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 7072a8a613..52a3df55d6 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -18,13 +18,14 @@
#include <sys/ucontext.h>
#include "art_method.h"
+#include "base/callee_save_type.h"
#include "base/hex_dump.h"
#include "base/logging.h"
#include "base/macros.h"
#include "globals.h"
#include "quick_method_frame_info_mips.h"
#include "registers_mips.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_throw_null_pointer_exception_from_signal();
@@ -80,7 +81,7 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void*
// Decrement $sp by the frame size of the kSaveEverything method and store
// the fault address in the padding right after the ArtMethod*.
- sc->sc_regs[mips::SP] -= mips::MipsCalleeSaveFrameSize(Runtime::kSaveEverything);
+ sc->sc_regs[mips::SP] -= mips::MipsCalleeSaveFrameSize(CalleeSaveType::kSaveEverything);
uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips::SP]) + /* ArtMethod* */ 1;
*padding = reinterpret_cast<uintptr_t>(info->si_addr);
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index 3c5afc28a3..6540b44518 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -47,7 +47,7 @@ static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR2;
static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kBase;
#endif
-static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bit) {
+static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bit, bool* msa) {
// Override defaults based on compiler flags.
if (kRuntimeMipsLevel >= MipsLevel::kR2) {
*mips_isa_gte2 = true;
@@ -57,8 +57,10 @@ static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bi
if (kRuntimeMipsLevel >= MipsLevel::kR5) {
*fpu_32bit = false;
+ *msa = true;
} else {
*fpu_32bit = true;
+ *msa = false;
}
if (kRuntimeMipsLevel >= MipsLevel::kR6) {
@@ -76,7 +78,8 @@ MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
bool fpu_32bit;
bool mips_isa_gte2;
bool r6;
- GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
+ bool msa;
+ GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
// Override defaults based on variant string.
// Only care if it is R1, R2, R5 or R6 and we assume all CPUs will have a FP unit.
@@ -87,6 +90,7 @@ MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
r6 = (variant[kPrefixLength] >= '6');
fpu_32bit = (variant[kPrefixLength] < '5');
mips_isa_gte2 = (variant[kPrefixLength] >= '2');
+ msa = (variant[kPrefixLength] >= '5');
} else if (variant == "default") {
// Default variant has FPU, is gte2. This is the traditional setting.
//
@@ -100,32 +104,57 @@ MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
LOG(WARNING) << "Unexpected CPU variant for Mips32 using defaults: " << variant;
}
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6));
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
}
MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
bool r6 = (bitmap & kR6) != 0;
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6));
+ bool msa = (bitmap & kMsaBitfield) != 0;
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
}
MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCppDefines() {
bool fpu_32bit;
bool mips_isa_gte2;
bool r6;
- GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
+ bool msa;
+ GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6));
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
}
MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCpuInfo() {
bool fpu_32bit;
bool mips_isa_gte2;
bool r6;
- GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
+ bool msa;
+ GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
+
+ msa = false;
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("ASEs") != std::string::npos) {
+ LOG(INFO) << "found Application Specific Extensions";
+ if (line.find("msa") != std::string::npos) {
+ msa = true;
+ }
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
- return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6));
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
}
MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromHwcap() {
@@ -145,13 +174,15 @@ bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) con
const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures();
return (fpu_32bit_ == other_as_mips->fpu_32bit_) &&
(mips_isa_gte2_ == other_as_mips->mips_isa_gte2_) &&
- (r6_ == other_as_mips->r6_);
+ (r6_ == other_as_mips->r6_) &&
+ (msa_ == other_as_mips->msa_);
}
uint32_t MipsInstructionSetFeatures::AsBitmap() const {
return (fpu_32bit_ ? kFpu32Bitfield : 0) |
(mips_isa_gte2_ ? kIsaRevGte2Bitfield : 0) |
- (r6_ ? kR6 : 0);
+ (r6_ ? kR6 : 0) |
+ (msa_ ? kMsaBitfield : 0);
}
std::string MipsInstructionSetFeatures::GetFeatureString() const {
@@ -169,6 +200,11 @@ std::string MipsInstructionSetFeatures::GetFeatureString() const {
if (r6_) {
result += ",r6";
} // Suppress non-r6.
+ if (msa_) {
+ result += ",msa";
+ } else {
+ result += ",-msa";
+ }
return result;
}
@@ -178,6 +214,7 @@ MipsInstructionSetFeatures::AddFeaturesFromSplitString(
bool fpu_32bit = fpu_32bit_;
bool mips_isa_gte2 = mips_isa_gte2_;
bool r6 = r6_;
+ bool msa = msa_;
for (auto i = features.begin(); i != features.end(); i++) {
std::string feature = android::base::Trim(*i);
if (feature == "fpu32") {
@@ -192,13 +229,17 @@ MipsInstructionSetFeatures::AddFeaturesFromSplitString(
r6 = true;
} else if (feature == "-r6") {
r6 = false;
+ } else if (feature == "msa") {
+ msa = true;
+ } else if (feature == "-msa") {
+ msa = false;
} else {
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
}
}
return std::unique_ptr<const InstructionSetFeatures>(
- new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6));
+ new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
}
} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 1aec99fa73..1cb852e262 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -75,6 +75,11 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
return r6_;
}
+ // Does it have MSA (MIPS SIMD Architecture) support.
+ bool HasMsa() const {
+ return msa_;
+ }
+
virtual ~MipsInstructionSetFeatures() {}
protected:
@@ -84,11 +89,12 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
std::string* error_msg) const OVERRIDE;
private:
- MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6)
+ MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
: InstructionSetFeatures(),
fpu_32bit_(fpu_32bit),
mips_isa_gte2_(mips_isa_gte2),
- r6_(r6) {
+ r6_(r6),
+ msa_(msa) {
// Sanity checks.
if (r6) {
CHECK(mips_isa_gte2);
@@ -104,11 +110,13 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
kFpu32Bitfield = 1 << 0,
kIsaRevGte2Bitfield = 1 << 1,
kR6 = 1 << 2,
+ kMsaBitfield = 1 << 3,
};
const bool fpu_32bit_;
const bool mips_isa_gte2_;
const bool r6_;
+ const bool msa_;
DISALLOW_COPY_AND_ASSIGN(MipsInstructionSetFeatures);
};
diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc
index 6613b84365..54fd2c94c4 100644
--- a/runtime/arch/mips/instruction_set_features_mips_test.cc
+++ b/runtime/arch/mips/instruction_set_features_mips_test.cc
@@ -20,15 +20,109 @@
namespace art {
-TEST(MipsInstructionSetFeaturesTest, MipsFeatures) {
+TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromDefaultVariant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips_features(
InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
ASSERT_TRUE(mips_features.get() != nullptr) << error_msg;
EXPECT_EQ(mips_features->GetInstructionSet(), kMips);
EXPECT_TRUE(mips_features->Equals(mips_features.get()));
- EXPECT_STREQ("fpu32,mips2", mips_features->GetFeatureString().c_str());
+ EXPECT_STREQ("fpu32,mips2,-msa", mips_features->GetFeatureString().c_str());
EXPECT_EQ(mips_features->AsBitmap(), 3U);
}
+TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR1Variant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips32r1_features->GetInstructionSet(), kMips);
+ EXPECT_TRUE(mips32r1_features->Equals(mips32r1_features.get()));
+ EXPECT_STREQ("fpu32,-mips2,-msa", mips32r1_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips32r1_features->AsBitmap(), 1U);
+
+ std::unique_ptr<const InstructionSetFeatures> mips_default_features(
+ InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r1_features->Equals(mips_default_features.get()));
+}
+
+TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR2Variant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+ ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips32r2_features->GetInstructionSet(), kMips);
+ EXPECT_TRUE(mips32r2_features->Equals(mips32r2_features.get()));
+ EXPECT_STREQ("fpu32,mips2,-msa", mips32r2_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips32r2_features->AsBitmap(), 3U);
+
+ std::unique_ptr<const InstructionSetFeatures> mips_default_features(
+ InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
+ EXPECT_TRUE(mips32r2_features->Equals(mips_default_features.get()));
+
+ std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r2_features->Equals(mips32r1_features.get()));
+}
+
+TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR5Variant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r5", &error_msg));
+ ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips32r5_features->GetInstructionSet(), kMips);
+ EXPECT_TRUE(mips32r5_features->Equals(mips32r5_features.get()));
+ EXPECT_STREQ("-fpu32,mips2,msa", mips32r5_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips32r5_features->AsBitmap(), 10U);
+
+ std::unique_ptr<const InstructionSetFeatures> mips_default_features(
+ InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r5_features->Equals(mips_default_features.get()));
+
+ std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r5_features->Equals(mips32r1_features.get()));
+
+ std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+ ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r5_features->Equals(mips32r2_features.get()));
+}
+
+TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR6Variant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips32r6_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r6", &error_msg));
+ ASSERT_TRUE(mips32r6_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips32r6_features->GetInstructionSet(), kMips);
+ EXPECT_TRUE(mips32r6_features->Equals(mips32r6_features.get()));
+ EXPECT_STREQ("-fpu32,mips2,r6,msa", mips32r6_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips32r6_features->AsBitmap(), 14U);
+
+ std::unique_ptr<const InstructionSetFeatures> mips_default_features(
+ InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r6_features->Equals(mips_default_features.get()));
+
+ std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r6_features->Equals(mips32r1_features.get()));
+
+ std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+ ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r6_features->Equals(mips32r2_features.get()));
+
+ std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
+ InstructionSetFeatures::FromVariant(kMips, "mips32r5", &error_msg));
+ ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
+ EXPECT_FALSE(mips32r6_features->Equals(mips32r5_features.get()));
+}
+
} // namespace art
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index 6f16352d91..01879a5cea 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -17,10 +17,12 @@
#ifndef ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
#define ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
#include "quick/quick_method_frame_info.h"
#include "registers_mips.h"
-#include "runtime.h" // for Runtime::CalleeSaveType.
namespace art {
namespace mips {
@@ -62,27 +64,27 @@ static constexpr uint32_t kMipsCalleeSaveFpEverythingSpills =
(1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
(1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
-constexpr uint32_t MipsCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t MipsCalleeSaveCoreSpills(CalleeSaveType type) {
return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
- (type == Runtime::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
}
-constexpr uint32_t MipsCalleeSaveFPSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t MipsCalleeSaveFPSpills(CalleeSaveType type) {
return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
- (type == Runtime::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
}
-constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+constexpr uint32_t MipsCalleeSaveFrameSize(CalleeSaveType type) {
return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(MipsCalleeSaveFPSpills(type)) /* fprs */ +
1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
}
-constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(CalleeSaveType type) {
return QuickMethodFrameInfo(MipsCalleeSaveFrameSize(type),
MipsCalleeSaveCoreSpills(type),
MipsCalleeSaveFPSpills(type));
diff --git a/runtime/arch/mips/registers_mips.cc b/runtime/arch/mips/registers_mips.cc
index 5d31f2f910..92c2746d2a 100644
--- a/runtime/arch/mips/registers_mips.cc
+++ b/runtime/arch/mips/registers_mips.cc
@@ -45,5 +45,14 @@ std::ostream& operator<<(std::ostream& os, const FRegister& rhs) {
return os;
}
+std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) {
+ if (rhs >= W0 && rhs < kNumberOfVectorRegisters) {
+ os << "w" << static_cast<int>(rhs);
+ } else {
+ os << "VectorRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
} // namespace mips
} // namespace art
diff --git a/runtime/arch/mips/registers_mips.h b/runtime/arch/mips/registers_mips.h
index 555f3f0473..57af150b33 100644
--- a/runtime/arch/mips/registers_mips.h
+++ b/runtime/arch/mips/registers_mips.h
@@ -106,6 +106,45 @@ enum FRegister {
};
std::ostream& operator<<(std::ostream& os, const FRegister& rhs);
+// Values for vector registers.
+enum VectorRegister {
+ W0 = 0,
+ W1 = 1,
+ W2 = 2,
+ W3 = 3,
+ W4 = 4,
+ W5 = 5,
+ W6 = 6,
+ W7 = 7,
+ W8 = 8,
+ W9 = 9,
+ W10 = 10,
+ W11 = 11,
+ W12 = 12,
+ W13 = 13,
+ W14 = 14,
+ W15 = 15,
+ W16 = 16,
+ W17 = 17,
+ W18 = 18,
+ W19 = 19,
+ W20 = 20,
+ W21 = 21,
+ W22 = 22,
+ W23 = 23,
+ W24 = 24,
+ W25 = 25,
+ W26 = 26,
+ W27 = 27,
+ W28 = 28,
+ W29 = 29,
+ W30 = 30,
+ W31 = 31,
+ kNumberOfVectorRegisters = 32,
+ kNoVectorRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs);
+
} // namespace mips
} // namespace art
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index bd1ac3b0a7..b14908fa04 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -17,6 +17,7 @@
#include "context_mips64.h"
#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "quick/quick_method_frame_info.h"
namespace art {
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 763d93eb47..007f7b3915 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <math.h>
#include <string.h>
#include "atomic.h"
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index f9a92c834e..9d77ebcd22 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -19,13 +19,14 @@
#include <sys/ucontext.h>
#include "art_method.h"
+#include "base/callee_save_type.h"
#include "base/hex_dump.h"
#include "base/logging.h"
#include "base/macros.h"
#include "globals.h"
#include "quick_method_frame_info_mips64.h"
#include "registers_mips64.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_throw_null_pointer_exception_from_signal();
@@ -82,7 +83,7 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void*
// Decrement $sp by the frame size of the kSaveEverything method and store
// the fault address in the padding right after the ArtMethod*.
- sc->sc_regs[mips64::SP] -= mips64::Mips64CalleeSaveFrameSize(Runtime::kSaveEverything);
+ sc->sc_regs[mips64::SP] -= mips64::Mips64CalleeSaveFrameSize(CalleeSaveType::kSaveEverything);
uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips64::SP]) + /* ArtMethod* */ 1;
*padding = reinterpret_cast<uintptr_t>(info->si_addr);
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/quick_method_frame_info_mips64.h
index d774473289..a55ab0e196 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/quick_method_frame_info_mips64.h
@@ -17,10 +17,12 @@
#ifndef ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
#define ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
#include "quick/quick_method_frame_info.h"
#include "registers_mips64.h"
-#include "runtime.h" // for Runtime::CalleeSaveType.
namespace art {
namespace mips64 {
@@ -69,27 +71,27 @@ static constexpr uint32_t kMips64CalleeSaveFpEverythingSpills =
(1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
(1 << art::mips64::F30) | (1 << art::mips64::F31);
-constexpr uint32_t Mips64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t Mips64CalleeSaveCoreSpills(CalleeSaveType type) {
return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
- (type == Runtime::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
}
-constexpr uint32_t Mips64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t Mips64CalleeSaveFpSpills(CalleeSaveType type) {
return kMips64CalleeSaveFpRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
- (type == Runtime::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
}
-constexpr uint32_t Mips64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+constexpr uint32_t Mips64CalleeSaveFrameSize(CalleeSaveType type) {
return RoundUp((POPCOUNT(Mips64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(Mips64CalleeSaveFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
}
-constexpr QuickMethodFrameInfo Mips64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+constexpr QuickMethodFrameInfo Mips64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
return QuickMethodFrameInfo(Mips64CalleeSaveFrameSize(type),
Mips64CalleeSaveCoreSpills(type),
Mips64CalleeSaveFpSpills(type));
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 207bf9d365..bd51809c22 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
@@ -43,8 +44,8 @@ class StubTest : public CommonRuntimeTest {
// Create callee-save methods
ScopedObjectAccess soa(Thread::Current());
runtime_->SetInstructionSet(kRuntimeISA);
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+ for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
+ CalleeSaveType type = CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index cb3dfec5f5..5c3171299c 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -17,6 +17,7 @@
#include "context_x86.h"
#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "base/memory_tool.h"
#include "quick/quick_method_frame_info.h"
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 7d8abb8cc5..798c500f18 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -26,7 +26,7 @@
#include "base/macros.h"
#include "base/safe_copy.h"
#include "globals.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#if defined(__APPLE__)
#define ucontext __darwin_ucontext
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 578812297a..cc0bdf2a29 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -33,23 +33,28 @@ using android::base::StringPrintf;
static constexpr const char* x86_known_variants[] = {
"atom",
+ "sandybridge",
"silvermont",
};
static constexpr const char* x86_variants_with_ssse3[] = {
"atom",
+ "sandybridge",
"silvermont",
};
static constexpr const char* x86_variants_with_sse4_1[] = {
+ "sandybridge",
"silvermont",
};
static constexpr const char* x86_variants_with_sse4_2[] = {
+ "sandybridge",
"silvermont",
};
static constexpr const char* x86_variants_with_popcnt[] = {
+ "sandybridge",
"silvermont",
};
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index 7e6ad3ecbf..c67b4ddfe0 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -69,6 +69,43 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) {
EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
}
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSandybridgeVariant) {
+ // Build features for a 32-bit x86 sandybridge processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_features(
+ InstructionSetFeatures::FromVariant(kX86, "sandybridge", &error_msg));
+ ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+ EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
+ x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 39U);
+
+ // Build features for a 32-bit x86 default processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+ InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+ EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
+ x86_default_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_default_features->AsBitmap(), 0U);
+
+ // Build features for a 64-bit x86-64 sandybridge processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+ InstructionSetFeatures::FromVariant(kX86_64, "sandybridge", &error_msg));
+ ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+ EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
+ x86_64_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_64_features->AsBitmap(), 39U);
+
+ EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+ EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+ EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
+}
+
TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) {
// Build features for a 32-bit x86 silvermont processor.
std::string error_msg;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 2222f5cc3b..031b36bd8b 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1948,17 +1948,23 @@ ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMeth
DEFINE_FUNCTION art_quick_instrumentation_entry
SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, edx
PUSH eax // Save eax which will be clobbered by the callee-save method.
- subl LITERAL(12), %esp // Align stack.
- CFI_ADJUST_CFA_OFFSET(12)
- pushl FRAME_SIZE_SAVE_REFS_AND_ARGS-4+16(%esp) // Pass LR.
- CFI_ADJUST_CFA_OFFSET(4)
+ subl LITERAL(16), %esp // Align stack (12 bytes) and reserve space for the SP argument
+ CFI_ADJUST_CFA_OFFSET(16) // (4 bytes). We lack the scratch registers to calculate the SP
+ // right now, so we will just fill it in later.
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // Pass receiver.
PUSH eax // Pass Method*.
- call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
+ leal 32(%esp), %eax // Put original SP into eax
+ movl %eax, 12(%esp) // set SP
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP)
+
addl LITERAL(28), %esp // Pop arguments upto saved Method*.
CFI_ADJUST_CFA_OFFSET(-28)
+
+ testl %eax, %eax
+ jz 1f // Test for null return (indicating exception) and handle it.
+
movl 60(%esp), %edi // Restore edi.
movl %eax, 60(%esp) // Place code* over edi, just under return pc.
movl SYMBOL(art_quick_instrumentation_exit)@GOT(%ebx), %ebx
@@ -1980,9 +1986,15 @@ DEFINE_FUNCTION art_quick_instrumentation_entry
addl LITERAL(60), %esp // Wind stack back upto code*.
CFI_ADJUST_CFA_OFFSET(-60)
ret // Call method (and pop).
+1:
+ // Make caller handle exception
+ addl LITERAL(4), %esp
+ CFI_ADJUST_CFA_OFFSET(-4)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_instrumentation_entry
-DEFINE_FUNCTION art_quick_instrumentation_exit
+DEFINE_FUNCTION_CUSTOM_CFA art_quick_instrumentation_exit, 0
pushl LITERAL(0) // Push a fake return PC as there will be none on the stack.
CFI_ADJUST_CFA_OFFSET(4)
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
@@ -1992,18 +2004,19 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
movq %xmm0, (%esp)
PUSH edx // Save gpr return value.
PUSH eax
- subl LITERAL(16), %esp // Align stack
- CFI_ADJUST_CFA_OFFSET(16)
- movq %xmm0, (%esp) // Pass float return value.
- PUSH edx // Pass gpr return value.
- PUSH eax
+ leal 8(%esp), %eax // Get pointer to fpr_result
+ movl %esp, %edx // Get pointer to gpr_result
+ PUSH eax // Pass fpr_result
+ PUSH edx // Pass gpr_result
PUSH ecx // Pass SP.
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current.
CFI_ADJUST_CFA_OFFSET(4)
- call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
+ call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result*, fpr_result*)
+ testl %eax, %eax // Check if we returned error.
+ jz 1f
mov %eax, %ecx // Move returned link register.
- addl LITERAL(32), %esp // Pop arguments.
- CFI_ADJUST_CFA_OFFSET(-32)
+ addl LITERAL(16), %esp // Pop arguments.
+ CFI_ADJUST_CFA_OFFSET(-16)
movl %edx, %ebx // Move returned link register for deopt
// (ebx is pretending to be our LR).
POP eax // Restore gpr return value.
@@ -2015,6 +2028,11 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
addl LITERAL(4), %esp // Remove fake return pc.
CFI_ADJUST_CFA_OFFSET(-4)
jmp *%ecx // Return.
+1:
+ addl LITERAL(32), %esp
+ CFI_ADJUST_CFA_OFFSET(-32)
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_instrumentation_exit
/*
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
index 9fcde35cec..8342c9fe03 100644
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ b/runtime/arch/x86/quick_method_frame_info_x86.h
@@ -17,10 +17,12 @@
#ifndef ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
#define ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
#include "quick/quick_method_frame_info.h"
#include "registers_x86.h"
-#include "runtime.h" // for Runtime::CalleeSaveType.
namespace art {
namespace x86 {
@@ -54,24 +56,24 @@ static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
(1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
(1 << art::x86::XMM6) | (1 << art::x86::XMM7);
-constexpr uint32_t X86CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t X86CalleeSaveCoreSpills(CalleeSaveType type) {
return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
}
-constexpr uint32_t X86CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
- return (type == Runtime::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
+constexpr uint32_t X86CalleeSaveFpSpills(CalleeSaveType type) {
+ return (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
}
-constexpr uint32_t X86CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+constexpr uint32_t X86CalleeSaveFrameSize(CalleeSaveType type) {
return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
2 * POPCOUNT(X86CalleeSaveFpSpills(type)) /* fprs */ +
1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
}
-constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(CalleeSaveType type) {
return QuickMethodFrameInfo(X86CalleeSaveFrameSize(type),
X86CalleeSaveCoreSpills(type),
X86CalleeSaveFpSpills(type));
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index 241650eaf4..cc8f1fa00e 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -22,7 +22,7 @@
#include "asm_support_x86.h"
#include "base/enums.h"
#include "base/macros.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#if defined(__APPLE__)
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 7c49e9c2b2..a4db223b1a 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -17,6 +17,7 @@
#include "context_x86_64.h"
#include "base/bit_utils.h"
+#include "base/bit_utils_iterator.h"
#include "quick/quick_method_frame_info.h"
namespace art {
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 41651d8f1a..ad06873197 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1920,24 +1920,30 @@ DEFINE_FUNCTION art_quick_instrumentation_entry
movq %rdi, %r12 // Preserve method pointer in a callee-save.
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass thread.
- movq FRAME_SIZE_SAVE_REFS_AND_ARGS-8(%rsp), %rcx // Pass return PC.
+ movq %rsp, %rcx // Pass SP.
- call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP)
// %rax = result of call.
- movq %r12, %rdi // Reload method pointer.
+ testq %rax, %rax
+ jz 1f
+ movq %r12, %rdi // Reload method pointer.
leaq art_quick_instrumentation_exit(%rip), %r12 // Set up return through instrumentation
movq %r12, FRAME_SIZE_SAVE_REFS_AND_ARGS-8(%rsp) // exit.
RESTORE_SAVE_REFS_AND_ARGS_FRAME
jmp *%rax // Tail call to intended method.
+1:
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ DELIVER_PENDING_EXCEPTION
#endif // __APPLE__
END_FUNCTION art_quick_instrumentation_entry
-DEFINE_FUNCTION art_quick_instrumentation_exit
+DEFINE_FUNCTION_CUSTOM_CFA art_quick_instrumentation_exit, 0
pushq LITERAL(0) // Push a fake return PC as there will be none on the stack.
+ CFI_ADJUST_CFA_OFFSET(8)
SETUP_SAVE_REFS_ONLY_FRAME
@@ -1948,15 +1954,16 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
movq %rsp, %rsi // Pass SP.
PUSH rax // Save integer result.
+ movq %rsp, %rdx // Pass integer result pointer.
+
subq LITERAL(8), %rsp // Save floating-point result.
CFI_ADJUST_CFA_OFFSET(8)
movq %xmm0, (%rsp)
+ movq %rsp, %rcx // Pass floating-point result pointer.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
- movq %rax, %rdx // Pass integer result.
- movq %xmm0, %rcx // Pass floating-point result.
- call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_res, fpr_res)
+ call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_res*, fpr_res*)
movq %rax, %rdi // Store return PC
movq %rdx, %rsi // Store second return PC in hidden arg.
@@ -1968,9 +1975,15 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
RESTORE_SAVE_REFS_ONLY_FRAME
+ testq %rdi, %rdi // Check if we have a return-pc to go to. If we don't then there was
+ // an exception
+ jz 1f
+
addq LITERAL(8), %rsp // Drop fake return pc.
jmp *%rdi // Return.
+1:
+ DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_instrumentation_exit
/*
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
index 867522f3c1..425d616e76 100644
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
@@ -17,10 +17,12 @@
#ifndef ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
#define ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
#include "quick/quick_method_frame_info.h"
#include "registers_x86_64.h"
-#include "runtime.h" // for Runtime::CalleeSaveType.
namespace art {
namespace x86_64 {
@@ -53,25 +55,25 @@ static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
(1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
(1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
-constexpr uint32_t X86_64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t X86_64CalleeSaveCoreSpills(CalleeSaveType type) {
return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
}
-constexpr uint32_t X86_64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+constexpr uint32_t X86_64CalleeSaveFpSpills(CalleeSaveType type) {
return kX86_64CalleeSaveFpSpills |
- (type == Runtime::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
}
-constexpr uint32_t X86_64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+constexpr uint32_t X86_64CalleeSaveFrameSize(CalleeSaveType type) {
return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
}
-constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
return QuickMethodFrameInfo(X86_64CalleeSaveFrameSize(type),
X86_64CalleeSaveCoreSpills(type),
X86_64CalleeSaveFpSpills(type));
diff --git a/runtime/arch/x86_64/thread_x86_64.cc b/runtime/arch/x86_64/thread_x86_64.cc
index 553b6569c8..19d25f6990 100644
--- a/runtime/arch/x86_64/thread_x86_64.cc
+++ b/runtime/arch/x86_64/thread_x86_64.cc
@@ -18,7 +18,7 @@
#include "asm_support_x86_64.h"
#include "base/macros.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#if defined(__linux__)
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 98002aed31..a8a58e135e 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -28,7 +28,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "primitive.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 9d0bfdee5d..40d7e5c3f3 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -20,6 +20,7 @@
#include "art_method.h"
#include "art_field.h"
+#include "base/callee_save_type.h"
#include "base/logging.h"
#include "class_linker-inl.h"
#include "common_throws.h"
@@ -27,6 +28,7 @@
#include "dex_file_annotations.h"
#include "dex_file-inl.h"
#include "gc_root-inl.h"
+#include "invoke_type.h"
#include "jit/profiling_info.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
@@ -35,10 +37,12 @@
#include "mirror/string.h"
#include "oat.h"
#include "obj_ptr-inl.h"
+#include "primitive.h"
#include "quick/quick_method_frame_info.h"
+#include "read_barrier-inl.h"
#include "runtime-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
@@ -198,8 +202,8 @@ inline bool ArtMethod::IsCalleeSaveMethod() {
}
Runtime* runtime = Runtime::Current();
bool result = false;
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- if (this == runtime->GetCalleeSaveMethod(Runtime::CalleeSaveType(i))) {
+ for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); i++) {
+ if (this == runtime->GetCalleeSaveMethod(CalleeSaveType(i))) {
result = true;
break;
}
@@ -270,12 +274,14 @@ inline const char* ArtMethod::GetName() {
return "<runtime internal resolution method>";
} else if (this == runtime->GetImtConflictMethod()) {
return "<runtime internal imt conflict method>";
- } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves)) {
+ } else if (this == runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
return "<runtime internal callee-save all registers method>";
- } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveRefsOnly)) {
+ } else if (this == runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly)) {
return "<runtime internal callee-save reference registers method>";
- } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs)) {
+ } else if (this == runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)) {
return "<runtime internal callee-save reference and argument registers method>";
+ } else if (this == runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything)) {
+ return "<runtime internal save-every-register method>";
} else {
return "<unknown runtime internal method>";
}
@@ -339,6 +345,10 @@ inline const char* ArtMethod::GetReturnTypeDescriptor() {
return dex_file->GetTypeDescriptor(dex_file->GetTypeId(proto_id.return_type_idx_));
}
+inline Primitive::Type ArtMethod::GetReturnTypePrimitive() {
+ return Primitive::GetType(GetReturnTypeDescriptor()[0]);
+}
+
inline const char* ArtMethod::GetTypeDescriptorFromTypeIdx(dex::TypeIndex type_idx) {
DCHECK(!IsProxyMethod());
const DexFile* dex_file = GetDexFile();
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 7de8916ad5..d591e0992c 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -664,7 +664,9 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
}
if (existing_entry_point == GetQuickInstrumentationEntryPoint()) {
// We are running the generic jni stub, but the method is being instrumented.
- DCHECK_EQ(pc, 0u) << "Should be a downcall";
+ // NB We would normally expect the pc to be zero but we can have non-zero pc's if
+ // instrumentation is installed or removed during the call which is using the generic jni
+ // trampoline.
DCHECK(IsNative());
return nullptr;
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 856bfd23e5..3a8d279606 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -24,19 +24,17 @@
#include "base/enums.h"
#include "dex_file.h"
#include "gc_root.h"
-#include "invoke_type.h"
-#include "method_reference.h"
#include "modifiers.h"
-#include "mirror/dex_cache.h"
-#include "mirror/object.h"
#include "obj_ptr.h"
+#include "offsets.h"
+#include "primitive.h"
#include "read_barrier_option.h"
-#include "utils.h"
namespace art {
template<class T> class Handle;
class ImtConflictTable;
+enum InvokeType : uint32_t;
union JValue;
class OatQuickMethodHeader;
class ProfilingInfo;
@@ -47,8 +45,13 @@ class ShadowFrame;
namespace mirror {
class Array;
class Class;
+class ClassLoader;
+class DexCache;
class IfTable;
+class Object;
+template <typename MirrorType> class ObjectArray;
class PointerArray;
+class String;
} // namespace mirror
class ArtMethod FINAL {
@@ -318,11 +321,11 @@ class ArtMethod FINAL {
}
static MemberOffset DexMethodIndexOffset() {
- return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_);
+ return MemberOffset(OFFSETOF_MEMBER(ArtMethod, dex_method_index_));
}
static MemberOffset MethodIndexOffset() {
- return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
+ return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_));
}
uint32_t GetCodeItemOffset() {
@@ -524,10 +527,6 @@ class ArtMethod FINAL {
bool IsImtUnimplementedMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- MethodReference ToMethodReference() REQUIRES_SHARED(Locks::mutator_lock_) {
- return MethodReference(GetDexFile(), GetDexMethodIndex());
- }
-
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
// a move-exception instruction is present.
@@ -571,6 +570,8 @@ class ArtMethod FINAL {
const char* GetReturnTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE Primitive::Type GetReturnTypePrimitive() REQUIRES_SHARED(Locks::mutator_lock_);
+
const char* GetTypeDescriptorFromTypeIdx(dex::TypeIndex type_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index fce6da4718..44c0661e3f 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -17,22 +17,6 @@
#ifndef ART_RUNTIME_ASM_SUPPORT_H_
#define ART_RUNTIME_ASM_SUPPORT_H_
-#if defined(__cplusplus)
-#include "art_method.h"
-#include "base/bit_utils.h"
-#include "gc/accounting/card_table.h"
-#include "gc/allocator/rosalloc.h"
-#include "gc/heap.h"
-#include "jit/jit.h"
-#include "lock_word.h"
-#include "mirror/class.h"
-#include "mirror/dex_cache.h"
-#include "mirror/string.h"
-#include "utils/dex_cache_arrays_layout.h"
-#include "runtime.h"
-#include "thread.h"
-#endif
-
#include "read_barrier_c.h"
#if defined(__arm__) || defined(__mips__)
@@ -49,14 +33,10 @@
#define SUSPEND_CHECK_INTERVAL 96
#endif
-#if defined(__cplusplus)
-
+// To generate tests related to the constants in this header, either define ADD_TEST_EQ before
+// including, or use asm_support_check.h.
#ifndef ADD_TEST_EQ // Allow #include-r to replace with their own.
-#define ADD_TEST_EQ(x, y) CHECK_EQ(x, y);
-#endif
-
-static inline void CheckAsmSupportOffsetsAndSizes() {
-#else
+#define DEFINED_ADD_TEST_EQ 1
#define ADD_TEST_EQ(x, y)
#endif
@@ -74,6 +54,7 @@ ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
// Export new defines (for assembly use) by editing cpp-define-generator def files.
#define DEFINE_CHECK_EQ ADD_TEST_EQ
#include "asm_support_gen.h"
+#undef DEFINE_CHECK_EQ
// Offset of field Thread::tlsPtr_.exception.
#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
@@ -250,8 +231,9 @@ ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32
#define STRING_COMPRESSION_FEATURE 1
ADD_TEST_EQ(STRING_COMPRESSION_FEATURE, art::mirror::kUseStringCompression);
-#if defined(__cplusplus)
-} // End of CheckAsmSupportOffsets.
+#ifdef DEFINED_ADD_TEST_EQ
+#undef ADD_TEST_EQ
+#undef DEFINED_ADD_TEST_EQ
#endif
#endif // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/asm_support_check.h b/runtime/asm_support_check.h
new file mode 100644
index 0000000000..cc6a578313
--- /dev/null
+++ b/runtime/asm_support_check.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ASM_SUPPORT_CHECK_H_
+#define ART_RUNTIME_ASM_SUPPORT_CHECK_H_
+
+#include "art_method.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "gc/accounting/card_table.h"
+#include "gc/allocator/rosalloc.h"
+#include "gc/heap.h"
+#include "jit/jit.h"
+#include "lock_word.h"
+#include "mirror/class.h"
+#include "mirror/dex_cache.h"
+#include "mirror/string.h"
+#include "utils/dex_cache_arrays_layout.h"
+#include "runtime.h"
+#include "stack.h"
+#include "thread.h"
+
+#ifndef ADD_TEST_EQ
+#define ADD_TEST_EQ(x, y) CHECK_EQ(x, y);
+#endif
+
+#ifndef ASM_SUPPORT_CHECK_RETURN_TYPE
+#define ASM_SUPPORT_CHECK_RETURN_TYPE void
+#endif
+
+// Prepare for re-include of asm_support.h.
+#ifdef ART_RUNTIME_ASM_SUPPORT_H_
+#undef ART_RUNTIME_ASM_SUPPORT_H_
+#endif
+
+namespace art {
+
+static inline ASM_SUPPORT_CHECK_RETURN_TYPE CheckAsmSupportOffsetsAndSizes() {
+#ifdef ASM_SUPPORT_CHECK_HEADER
+ ASM_SUPPORT_CHECK_HEADER
+#endif
+
+#include "asm_support.h"
+
+#ifdef ASM_SUPPORT_CHECK_FOOTER
+ ASM_SUPPORT_CHECK_FOOTER
+#endif
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_ASM_SUPPORT_CHECK_H_
diff --git a/runtime/atomic.cc b/runtime/atomic.cc
index d5ae570c30..07aceb7cfc 100644
--- a/runtime/atomic.cc
+++ b/runtime/atomic.cc
@@ -17,7 +17,7 @@
#include "atomic.h"
#include "base/mutex.h"
#include "base/stl_util.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 45c3165b18..25dd1a3a5e 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -257,6 +257,13 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed);
}
+ // Atomically replace the value with desired value if it matches the expected value. Prior writes
+ // to other memory locations become visible to the threads that do a consume or an acquire on the
+ // same location.
+ bool CompareExchangeStrongRelease(T expected_value, T desired_value) {
+ return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release);
+ }
+
// The same, except it may fail spuriously.
bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index f68a5d42e4..25b6925fd8 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -22,7 +22,7 @@
#include "common_runtime_test.h"
#include "mirror/object_array-inl.h"
#include "thread_pool.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
class CheckWaitTask : public Task {
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 8d1c982f3d..fba9308e8e 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -17,12 +17,7 @@
#ifndef ART_RUNTIME_BASE_ALLOCATOR_H_
#define ART_RUNTIME_BASE_ALLOCATOR_H_
-#include <map>
-#include <set>
-#include <unordered_map>
-
#include "atomic.h"
-#include "base/hash_map.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "base/type_static_if.h"
@@ -156,29 +151,6 @@ using TrackingAllocator = typename TypeStaticIf<kEnableTrackingAllocator,
TrackingAllocatorImpl<T, kTag>,
std::allocator<T>>::type;
-template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
-using AllocationTrackingMultiMap = std::multimap<
- Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
-
-template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
-using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
-
-template<class Key,
- class T,
- AllocatorTag kTag,
- class Hash = std::hash<Key>,
- class Pred = std::equal_to<Key>>
-using AllocationTrackingUnorderedMap = std::unordered_map<
- Key, T, Hash, Pred, TrackingAllocator<std::pair<const Key, T>, kTag>>;
-
-template<class Key,
- class T,
- class EmptyFn,
- AllocatorTag kTag,
- class Hash = std::hash<Key>,
- class Pred = std::equal_to<Key>>
-using AllocationTrackingHashMap = HashMap<
- Key, T, EmptyFn, Hash, Pred, TrackingAllocator<std::pair<Key, T>, kTag>>;
} // namespace art
#endif // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 99c310af8c..54b40f28cf 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -26,7 +26,7 @@
#include "logging.h"
#include "mem_map.h"
#include "mutex.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "systrace.h"
namespace art {
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index f536c72bae..0844678b74 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -17,13 +17,11 @@
#ifndef ART_RUNTIME_BASE_BIT_UTILS_H_
#define ART_RUNTIME_BASE_BIT_UTILS_H_
-#include <iterator>
#include <limits>
#include <type_traits>
-#include "base/iteration_range.h"
#include "base/logging.h"
-#include "base/stl_util.h"
+#include "base/stl_util_identity.h"
namespace art {
@@ -312,85 +310,6 @@ constexpr T MinInt(size_t bits) {
: static_cast<T>(0);
}
-// Using the Curiously Recurring Template Pattern to implement everything shared
-// by LowToHighBitIterator and HighToLowBitIterator, i.e. everything but operator*().
-template <typename T, typename Iter>
-class BitIteratorBase
- : public std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, void> {
- static_assert(std::is_integral<T>::value, "T must be integral");
- static_assert(std::is_unsigned<T>::value, "T must be unsigned");
-
- static_assert(sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t), "Unsupported size");
-
- public:
- BitIteratorBase() : bits_(0u) { }
- explicit BitIteratorBase(T bits) : bits_(bits) { }
-
- Iter& operator++() {
- DCHECK_NE(bits_, 0u);
- uint32_t bit = *static_cast<Iter&>(*this);
- bits_ &= ~(static_cast<T>(1u) << bit);
- return static_cast<Iter&>(*this);
- }
-
- Iter& operator++(int) {
- Iter tmp(static_cast<Iter&>(*this));
- ++*this;
- return tmp;
- }
-
- protected:
- T bits_;
-
- template <typename U, typename I>
- friend bool operator==(const BitIteratorBase<U, I>& lhs, const BitIteratorBase<U, I>& rhs);
-};
-
-template <typename T, typename Iter>
-bool operator==(const BitIteratorBase<T, Iter>& lhs, const BitIteratorBase<T, Iter>& rhs) {
- return lhs.bits_ == rhs.bits_;
-}
-
-template <typename T, typename Iter>
-bool operator!=(const BitIteratorBase<T, Iter>& lhs, const BitIteratorBase<T, Iter>& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename T>
-class LowToHighBitIterator : public BitIteratorBase<T, LowToHighBitIterator<T>> {
- public:
- using BitIteratorBase<T, LowToHighBitIterator<T>>::BitIteratorBase;
-
- uint32_t operator*() const {
- DCHECK_NE(this->bits_, 0u);
- return CTZ(this->bits_);
- }
-};
-
-template <typename T>
-class HighToLowBitIterator : public BitIteratorBase<T, HighToLowBitIterator<T>> {
- public:
- using BitIteratorBase<T, HighToLowBitIterator<T>>::BitIteratorBase;
-
- uint32_t operator*() const {
- DCHECK_NE(this->bits_, 0u);
- static_assert(std::numeric_limits<T>::radix == 2, "Unexpected radix!");
- return std::numeric_limits<T>::digits - 1u - CLZ(this->bits_);
- }
-};
-
-template <typename T>
-IterationRange<LowToHighBitIterator<T>> LowToHighBits(T bits) {
- return IterationRange<LowToHighBitIterator<T>>(
- LowToHighBitIterator<T>(bits), LowToHighBitIterator<T>());
-}
-
-template <typename T>
-IterationRange<HighToLowBitIterator<T>> HighToLowBits(T bits) {
- return IterationRange<HighToLowBitIterator<T>>(
- HighToLowBitIterator<T>(bits), HighToLowBitIterator<T>());
-}
-
// Returns value with bit set in lowest one-bit position or 0 if 0. (java.lang.X.lowestOneBit).
template <typename kind>
inline static kind LowestOneBitValue(kind opnd) {
diff --git a/runtime/base/bit_utils_iterator.h b/runtime/base/bit_utils_iterator.h
new file mode 100644
index 0000000000..8514de6b75
--- /dev/null
+++ b/runtime/base/bit_utils_iterator.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_UTILS_ITERATOR_H_
+#define ART_RUNTIME_BASE_BIT_UTILS_ITERATOR_H_
+
+#include <iterator>
+#include <limits>
+#include <type_traits>
+
+#include "base/bit_utils.h"
+#include "base/iteration_range.h"
+#include "base/logging.h"
+#include "base/stl_util.h"
+
+namespace art {
+
+// Using the Curiously Recurring Template Pattern to implement everything shared
+// by LowToHighBitIterator and HighToLowBitIterator, i.e. everything but operator*().
+template <typename T, typename Iter>
+class BitIteratorBase
+ : public std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, void> {
+ static_assert(std::is_integral<T>::value, "T must be integral");
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned");
+
+ static_assert(sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t), "Unsupported size");
+
+ public:
+ BitIteratorBase() : bits_(0u) { }
+ explicit BitIteratorBase(T bits) : bits_(bits) { }
+
+ Iter& operator++() {
+ DCHECK_NE(bits_, 0u);
+ uint32_t bit = *static_cast<Iter&>(*this);
+ bits_ &= ~(static_cast<T>(1u) << bit);
+ return static_cast<Iter&>(*this);
+ }
+
+ Iter& operator++(int) {
+ Iter tmp(static_cast<Iter&>(*this));
+ ++*this;
+ return tmp;
+ }
+
+ protected:
+ T bits_;
+
+ template <typename U, typename I>
+ friend bool operator==(const BitIteratorBase<U, I>& lhs, const BitIteratorBase<U, I>& rhs);
+};
+
+template <typename T, typename Iter>
+bool operator==(const BitIteratorBase<T, Iter>& lhs, const BitIteratorBase<T, Iter>& rhs) {
+ return lhs.bits_ == rhs.bits_;
+}
+
+template <typename T, typename Iter>
+bool operator!=(const BitIteratorBase<T, Iter>& lhs, const BitIteratorBase<T, Iter>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename T>
+class LowToHighBitIterator : public BitIteratorBase<T, LowToHighBitIterator<T>> {
+ public:
+ using BitIteratorBase<T, LowToHighBitIterator<T>>::BitIteratorBase;
+
+ uint32_t operator*() const {
+ DCHECK_NE(this->bits_, 0u);
+ return CTZ(this->bits_);
+ }
+};
+
+template <typename T>
+class HighToLowBitIterator : public BitIteratorBase<T, HighToLowBitIterator<T>> {
+ public:
+ using BitIteratorBase<T, HighToLowBitIterator<T>>::BitIteratorBase;
+
+ uint32_t operator*() const {
+ DCHECK_NE(this->bits_, 0u);
+ static_assert(std::numeric_limits<T>::radix == 2, "Unexpected radix!");
+ return std::numeric_limits<T>::digits - 1u - CLZ(this->bits_);
+ }
+};
+
+template <typename T>
+IterationRange<LowToHighBitIterator<T>> LowToHighBits(T bits) {
+ return IterationRange<LowToHighBitIterator<T>>(
+ LowToHighBitIterator<T>(bits), LowToHighBitIterator<T>());
+}
+
+template <typename T>
+IterationRange<HighToLowBitIterator<T>> HighToLowBits(T bits) {
+ return IterationRange<HighToLowBitIterator<T>>(
+ HighToLowBitIterator<T>(bits), HighToLowBitIterator<T>());
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_UTILS_ITERATOR_H_
diff --git a/runtime/base/bit_utils_test.cc b/runtime/base/bit_utils_test.cc
index 77bd0b815e..9f22fb4670 100644
--- a/runtime/base/bit_utils_test.cc
+++ b/runtime/base/bit_utils_test.cc
@@ -17,6 +17,7 @@
#include <vector>
#include "bit_utils.h"
+#include "bit_utils_iterator.h"
#include "gtest/gtest.h"
diff --git a/runtime/base/callee_save_type.h b/runtime/base/callee_save_type.h
new file mode 100644
index 0000000000..501b296d4f
--- /dev/null
+++ b/runtime/base/callee_save_type.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_CALLEE_SAVE_TYPE_H_
+#define ART_RUNTIME_BASE_CALLEE_SAVE_TYPE_H_
+
+#include <cstddef>
+#include <ostream>
+
+namespace art {
+
+// Returns a special method that describes all callee saves being spilled to the stack.
+enum class CalleeSaveType : uint32_t {
+ kSaveAllCalleeSaves, // All callee-save registers.
+ kSaveRefsOnly, // Only those callee-save registers that can hold references.
+ kSaveRefsAndArgs, // References (see above) and arguments (usually caller-save registers).
+ kSaveEverything, // All registers, including both callee-save and caller-save.
+ kLastCalleeSaveType // Value used for iteration.
+};
+std::ostream& operator<<(std::ostream& os, const CalleeSaveType& rhs);
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_CALLEE_SAVE_TYPE_H_
diff --git a/runtime/base/dumpable-inl.h b/runtime/base/dumpable-inl.h
index 2cdf083f01..9d7fc39093 100644
--- a/runtime/base/dumpable-inl.h
+++ b/runtime/base/dumpable-inl.h
@@ -19,7 +19,7 @@
#include "base/dumpable.h"
#include "base/mutex.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index b28eb729d8..be2092040d 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -198,7 +198,7 @@ inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double
kFractionalDigits)
<< "-" << FormatDuration(Percentile(per_1, data) * kAdjust, unit, kFractionalDigits) << " "
<< "Avg: " << FormatDuration(Mean() * kAdjust, unit, kFractionalDigits) << " Max: "
- << FormatDuration(Max() * kAdjust, unit, kFractionalDigits) << "\n";
+ << FormatDuration(Max() * kAdjust, unit, kFractionalDigits) << std::endl;
}
template <class Value>
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 553928d20a..adfd7d323c 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -21,7 +21,7 @@
#include <sstream>
#include "base/mutex.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
// Headers for LogMessage::LogLine.
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 08b370ec4e..0ac2399a5d 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -194,6 +194,16 @@ inline uint64_t Mutex::GetExclusiveOwnerTid() const {
return exclusive_owner_;
}
+inline void Mutex::AssertExclusiveHeld(const Thread* self) const {
+ if (kDebugLocking && (gAborting == 0)) {
+ CHECK(IsExclusiveHeld(self)) << *this;
+ }
+}
+
+inline void Mutex::AssertHeld(const Thread* self) const {
+ AssertExclusiveHeld(self);
+}
+
inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
@@ -221,6 +231,16 @@ inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
#endif
}
+inline void ReaderWriterMutex::AssertExclusiveHeld(const Thread* self) const {
+ if (kDebugLocking && (gAborting == 0)) {
+ CHECK(IsExclusiveHeld(self)) << *this;
+ }
+}
+
+inline void ReaderWriterMutex::AssertWriterHeld(const Thread* self) const {
+ AssertExclusiveHeld(self);
+}
+
inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) {
AssertSharedHeld(self);
RegisterAsUnlocked(self);
@@ -231,6 +251,19 @@ inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) {
AssertSharedHeld(self);
}
+inline ReaderMutexLock::ReaderMutexLock(Thread* self, ReaderWriterMutex& mu)
+ : self_(self), mu_(mu) {
+ mu_.SharedLock(self_);
+}
+
+inline ReaderMutexLock::~ReaderMutexLock() {
+ mu_.SharedUnlock(self_);
+}
+
+// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
+// "ReaderMutexLock mu(lock)".
+#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
+
} // namespace art
#endif // ART_RUNTIME_BASE_MUTEX_INL_H_
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 03ae63a068..e77d8d749d 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -244,15 +244,11 @@ class LOCKABLE Mutex : public BaseMutex {
void Unlock(Thread* self) RELEASE() { ExclusiveUnlock(self); }
// Is the current thread the exclusive holder of the Mutex.
- bool IsExclusiveHeld(const Thread* self) const;
+ ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
// Assert that the Mutex is exclusively held by the current thread.
- void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
- if (kDebugLocking && (gAborting == 0)) {
- CHECK(IsExclusiveHeld(self)) << *this;
- }
- }
- void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
+ ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
+ ALWAYS_INLINE void AssertHeld(const Thread* self) const ASSERT_CAPABILITY(this);
// Assert that the Mutex is not held by the current thread.
void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
@@ -349,15 +345,11 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
// Is the current thread the exclusive holder of the ReaderWriterMutex.
- bool IsExclusiveHeld(const Thread* self) const;
+ ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
// Assert the current thread has exclusive access to the ReaderWriterMutex.
- void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
- if (kDebugLocking && (gAborting == 0)) {
- CHECK(IsExclusiveHeld(self)) << *this;
- }
- }
- void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
+ ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
+ ALWAYS_INLINE void AssertWriterHeld(const Thread* self) const ASSERT_CAPABILITY(this);
// Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
@@ -517,23 +509,15 @@ class SCOPED_CAPABILITY MutexLock {
// construction and releases it upon destruction.
class SCOPED_CAPABILITY ReaderMutexLock {
public:
- ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) ALWAYS_INLINE :
- self_(self), mu_(mu) {
- mu_.SharedLock(self_);
- }
+ ALWAYS_INLINE ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu);
- ~ReaderMutexLock() RELEASE() ALWAYS_INLINE {
- mu_.SharedUnlock(self_);
- }
+ ALWAYS_INLINE ~ReaderMutexLock() RELEASE();
private:
Thread* const self_;
ReaderWriterMutex& mu_;
DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
};
-// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
-// "ReaderMutexLock mu(lock)".
-#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
// construction and releases it upon destruction.
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 340550f02e..752e77a7c0 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#include "mutex.h"
+#include "mutex-inl.h"
#include "common_runtime_test.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index cfe27f3811..b27297241d 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -25,13 +25,6 @@
namespace art {
-// Sort and remove duplicates of an STL vector or deque.
-template<class T>
-void STLSortAndRemoveDuplicates(T* v) {
- std::sort(v->begin(), v->end());
- v->erase(std::unique(v->begin(), v->end()), v->end());
-}
-
// STLDeleteContainerPointers()
// For a range within a container of pointers, calls delete
// (non-array version) on these pointers.
@@ -83,20 +76,6 @@ void STLDeleteValues(T *v) {
}
}
-template <class T>
-std::string ToString(const T& v) {
- std::ostringstream os;
- os << "[";
- for (size_t i = 0; i < v.size(); ++i) {
- os << v[i];
- if (i < v.size() - 1) {
- os << ", ";
- }
- }
- os << "]";
- return os.str();
-}
-
// Deleter using free() for use with std::unique_ptr<>. See also UniqueCPtr<> below.
struct FreeDelete {
// NOTE: Deleting a const object is valid but free() takes a non-const pointer.
@@ -109,13 +88,6 @@ struct FreeDelete {
template <typename T>
using UniqueCPtr = std::unique_ptr<T, FreeDelete>;
-// C++14 from-the-future import (std::make_unique)
-// Invoke the constructor of 'T' with the provided args, and wrap the result in a unique ptr.
-template <typename T, typename ... Args>
-std::unique_ptr<T> MakeUnique(Args&& ... args) {
- return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
-}
-
// Find index of the first element with the specified value known to be in the container.
template <typename Container, typename T>
size_t IndexOfElement(const Container& container, const T& value) {
@@ -150,13 +122,6 @@ bool ContainsElement(const Container& container, const T& value, size_t start_po
return it != container.end();
}
-// const char* compare function suitable for std::map or std::set.
-struct CStringLess {
- bool operator()(const char* lhs, const char* rhs) const {
- return strcmp(lhs, rhs) < 0;
- }
-};
-
// 32-bit FNV-1a hash function suitable for std::unordered_map.
// It can be used with any container which works with range-based for loop.
// See http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
@@ -171,23 +136,6 @@ struct FNVHash {
}
};
-// Use to suppress type deduction for a function argument.
-// See std::identity<> for more background:
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1856.html#20.2.2 - move/forward helpers
-//
-// e.g. "template <typename X> void bar(identity<X>::type foo);
-// bar(5); // compilation error
-// bar<int>(5); // ok
-// or "template <typename T> void foo(T* x, typename Identity<T*>::type y);
-// Base b;
-// Derived d;
-// foo(&b, &d); // Use implicit Derived* -> Base* conversion.
-// If T was deduced from both &b and &d, there would be a mismatch, i.e. deduction failure.
-template <typename T>
-struct Identity {
- using type = T;
-};
-
// Merge `other` entries into `to_update`.
template <typename T>
static inline void MergeSets(std::set<T>& to_update, const std::set<T>& other) {
diff --git a/runtime/base/stl_util_identity.h b/runtime/base/stl_util_identity.h
new file mode 100644
index 0000000000..40a93f79c5
--- /dev/null
+++ b/runtime/base/stl_util_identity.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_STL_UTIL_IDENTITY_H_
+#define ART_RUNTIME_BASE_STL_UTIL_IDENTITY_H_
+
+namespace art {
+
+// Use to suppress type deduction for a function argument.
+// See std::identity<> for more background:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1856.html#20.2.2 - move/forward helpers
+//
+// e.g. "template <typename X> void bar(identity<X>::type foo);
+// bar(5); // compilation error
+// bar<int>(5); // ok
+// or "template <typename T> void foo(T* x, typename Identity<T*>::type y);
+// Base b;
+// Derived d;
+// foo(&b, &d); // Use implicit Derived* -> Base* conversion.
+// If T was deduced from both &b and &d, there would be a mismatch, i.e. deduction failure.
+template <typename T>
+struct Identity {
+ using type = T;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_STL_UTIL_IDENTITY_H_
diff --git a/runtime/base/strlcpy.h b/runtime/base/strlcpy.h
new file mode 100644
index 0000000000..de135ea990
--- /dev/null
+++ b/runtime/base/strlcpy.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_STRLCPY_H_
+#define ART_RUNTIME_BASE_STRLCPY_H_
+
+#include <cstdio>
+#include <cstring>
+
+// Expose a simple implementation of strlcpy when we're not compiling against bionic. This is to
+// make static analyzers happy not using strcpy.
+//
+// Bionic exposes this function, but the host glibc does not. Remove this shim when we compile
+// against bionic on the host, also.
+
+#if !defined(__BIONIC__) && !defined(__APPLE__)
+
+static inline size_t strlcpy(char* dst, const char* src, size_t size) {
+ // Extra-lazy implementation: this is only a host shim, and we don't have to call this often.
+ return snprintf(dst, size, "%s", src);
+}
+
+#endif
+
+#endif // ART_RUNTIME_BASE_STRLCPY_H_
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index 06e2526294..aaa24317bb 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -25,7 +25,8 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "gc/heap.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
#include <cmath>
#include <iomanip>
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index 531cb37355..fdb60c4141 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -22,7 +22,7 @@
#include <type_traits>
#include <utility>
-#include "base/stl_util.h"
+#include "base/stl_util_identity.h"
namespace art {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index a955cb5acb..f6c8fa9659 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -20,6 +20,7 @@
#include "art_method-inl.h"
#include "oat_quick_method_header.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "stack_map.h"
namespace art {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c169ac0733..0fa25d15d2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -55,7 +55,6 @@
#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap-inl.h"
-#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/scoped_gc_critical_section.h"
#include "gc/space/image_space.h"
@@ -89,7 +88,6 @@
#include "mirror/method_handles_lookup.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "mirror/object-refvisitor-inl.h"
#include "mirror/proxy.h"
#include "mirror/reference-inl.h"
#include "mirror/stack_trace_element.h"
@@ -109,6 +107,7 @@
#include "thread-inl.h"
#include "thread_list.h"
#include "trace.h"
+#include "utf.h"
#include "utils.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "verifier/method_verifier.h"
@@ -1194,63 +1193,6 @@ class VerifyDeclaringClassVisitor : public ArtMethodVisitor {
gc::accounting::HeapBitmap* const live_bitmap_;
};
-class FixupInternVisitor {
- public:
- ALWAYS_INLINE ObjPtr<mirror::Object> TryInsertIntern(mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (obj != nullptr && obj->IsString()) {
- const auto intern = Runtime::Current()->GetInternTable()->InternStrong(obj->AsString());
- return intern;
- }
- return obj;
- }
-
- ALWAYS_INLINE void VisitRootIfNonNull(
- mirror::CompressedReference<mirror::Object>* root) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!root->IsNull()) {
- VisitRoot(root);
- }
- }
-
- ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- root->Assign(TryInsertIntern(root->AsMirrorPtr()));
- }
-
- // Visit Class Fields
- ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
- MemberOffset offset,
- bool is_static ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // There could be overlap between ranges, we must avoid visiting the same reference twice.
- // Avoid the class field since we already fixed it up in FixupClassVisitor.
- if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
- // Updating images, don't do a read barrier.
- // Only string fields are fixed, don't do a verify.
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
- offset);
- obj->SetFieldObject<false, false>(offset, TryInsertIntern(ref));
- }
- }
-
- void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
- ObjPtr<mirror::Reference> ref) const
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
- this->operator()(ref, mirror::Reference::ReferentOffset(), false);
- }
-
- void operator()(mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (obj->IsDexCache()) {
- obj->VisitReferences<true, kVerifyNone, kWithoutReadBarrier>(*this, *this);
- } else {
- // Don't visit native roots for non-dex-cache
- obj->VisitReferences<false, kVerifyNone, kWithoutReadBarrier>(*this, *this);
- }
- }
-};
-
// Copies data from one array to another array at the same position
// if pred returns false. If there is a page of continuous data in
// the src array for which pred consistently returns true then
@@ -1343,7 +1285,6 @@ bool AppImageClassLoadersAndDexCachesHelper::Update(
return false;
}
}
-
// Only add the classes to the class loader after the points where we can return false.
for (size_t i = 0; i < num_dex_caches; i++) {
ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
@@ -1507,21 +1448,6 @@ bool AppImageClassLoadersAndDexCachesHelper::Update(
}
}
}
- {
- // Fixup all the literal strings happens at app images which are supposed to be interned.
- ScopedTrace timing("Fixup String Intern in image and dex_cache");
- const auto& image_header = space->GetImageHeader();
- const auto bitmap = space->GetMarkBitmap(); // bitmap of objects
- const uint8_t* target_base = space->GetMemMap()->Begin();
- const ImageSection& objects_section =
- image_header.GetImageSection(ImageHeader::kSectionObjects);
-
- uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
- uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
-
- FixupInternVisitor fixup_intern_visitor;
- bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_intern_visitor);
- }
if (*out_forward_dex_cache_array) {
ScopedTrace timing("Fixup ArtMethod dex cache arrays");
FixupArtMethodArrayVisitor visitor(header);
@@ -3491,8 +3417,11 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
// Example dex_cache location is SettingsProvider.apk and
// dex file location is /system/priv-app/SettingsProvider/SettingsProvider.apk
CHECK_EQ(dex_cache_location, dex_file_suffix);
- // Clean up pass to remove null dex caches.
+ const OatFile* oat_file =
+ (dex_file.GetOatDexFile() != nullptr) ? dex_file.GetOatDexFile()->GetOatFile() : nullptr;
+ // Clean up pass to remove null dex caches. Also check if we need to initialize OatFile .bss.
// Null dex caches can occur due to class unloading and we are lazily removing null entries.
+ bool initialize_oat_file_bss = (oat_file != nullptr);
JavaVMExt* const vm = self->GetJniEnv()->vm;
for (auto it = dex_caches_.begin(); it != dex_caches_.end(); ) {
DexCacheData data = *it;
@@ -3500,9 +3429,21 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
vm->DeleteWeakGlobalRef(self, data.weak_root);
it = dex_caches_.erase(it);
} else {
+ if (initialize_oat_file_bss &&
+ it->dex_file->GetOatDexFile() != nullptr &&
+ it->dex_file->GetOatDexFile()->GetOatFile() == oat_file) {
+ initialize_oat_file_bss = false; // Already initialized.
+ }
++it;
}
}
+ if (initialize_oat_file_bss) {
+ // TODO: Pre-initialize from boot/app image?
+ ArtMethod* resolution_method = Runtime::Current()->GetResolutionMethod();
+ for (ArtMethod*& entry : oat_file->GetBssMethods()) {
+ entry = resolution_method;
+ }
+ }
jweak dex_cache_jweak = vm->AddWeakGlobalRef(self, dex_cache);
dex_cache->SetDexFile(&dex_file);
DexCacheData data;
@@ -3535,6 +3476,39 @@ ObjPtr<mirror::DexCache> ClassLinker::EnsureSameClassLoader(
return dex_cache;
}
+void ClassLinker::RegisterExistingDexCache(ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ Thread* self = Thread::Current();
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(dex_cache));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ DCHECK(dex_file != nullptr) << "Attempt to register uninitialized dex_cache object!";
+ if (kIsDebugBuild) {
+ DexCacheData old_data;
+ {
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
+ old_data = FindDexCacheDataLocked(*dex_file);
+ }
+ ObjPtr<mirror::DexCache> old_dex_cache = DecodeDexCache(self, old_data);
+ DCHECK(old_dex_cache.IsNull()) << "Attempt to manually register a dex cache thats already "
+ << "been registered on dex file " << dex_file->GetLocation();
+ }
+ ClassTable* table;
+ {
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ table = InsertClassTableForClassLoader(h_class_loader.Get());
+ }
+ WriterMutexLock mu(self, *Locks::dex_lock_);
+ RegisterDexFileLocked(*dex_file, h_dex_cache.Get(), h_class_loader.Get());
+ table->InsertStrongRoot(h_dex_cache.Get());
+ if (h_class_loader.Get() != nullptr) {
+ // Since we added a strong root to the class table, do the write barrier as required for
+ // remembered sets and generational GCs.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(h_class_loader.Get());
+ }
+}
+
ObjPtr<mirror::DexCache> ClassLinker::RegisterDexFile(const DexFile& dex_file,
ObjPtr<mirror::ClassLoader> class_loader) {
Thread* self = Thread::Current();
@@ -3949,6 +3923,12 @@ void ClassLinker::UpdateClassMethods(ObjPtr<mirror::Class> klass,
}
mirror::Class* ClassLinker::LookupClass(Thread* self,
+ const char* descriptor,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ return LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor), class_loader);
+}
+
+mirror::Class* ClassLinker::LookupClass(Thread* self,
const char* descriptor,
size_t hash,
ObjPtr<mirror::ClassLoader> class_loader) {
@@ -4571,7 +4551,10 @@ void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod*
DCHECK(out != nullptr);
out->CopyFrom(proxy_constructor, image_pointer_size_);
// Make this constructor public and fix the class to be our Proxy version
- out->SetAccessFlags((out->GetAccessFlags() & ~kAccProtected) | kAccPublic);
+ // Mark kAccCompileDontBother so that we don't take JIT samples for the method. b/62349349
+ out->SetAccessFlags((out->GetAccessFlags() & ~kAccProtected) |
+ kAccPublic |
+ kAccCompileDontBother);
out->SetDeclaringClass(klass.Get());
}
@@ -4605,7 +4588,8 @@ void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prot
// preference to the invocation handler.
const uint32_t kRemoveFlags = kAccAbstract | kAccDefault | kAccDefaultConflict;
// Make the method final.
- const uint32_t kAddFlags = kAccFinal;
+ // Mark kAccCompileDontBother so that we don't take JIT samples for the method. b/62349349
+ const uint32_t kAddFlags = kAccFinal | kAccCompileDontBother;
out->SetAccessFlags((out->GetAccessFlags() & ~kRemoveFlags) | kAddFlags);
// Clear the dex_code_item_offset_. It needs to be 0 since proxy methods have no CodeItems but the
@@ -8949,7 +8933,8 @@ class GetResolvedClassesVisitor : public ClassVisitor {
last_dex_file_ = &dex_file;
DexCacheResolvedClasses resolved_classes(dex_file.GetLocation(),
dex_file.GetBaseLocation(),
- dex_file.GetLocationChecksum());
+ dex_file.GetLocationChecksum(),
+ dex_file.NumMethodIds());
last_resolved_classes_ = result_->find(resolved_classes);
if (last_resolved_classes_ == result_->end()) {
last_resolved_classes_ = result_->insert(resolved_classes).first;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 205ea1e496..1e8125eb05 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -212,9 +212,7 @@ class ClassLinker {
const char* descriptor,
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor), class_loader);
- }
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds all the classes with the given descriptor, regardless of ClassLoader.
void LookupClasses(const char* descriptor, std::vector<ObjPtr<mirror::Class>>& classes)
@@ -385,6 +383,13 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
+ // Directly register an already existing dex cache. RegisterDexFile should be preferred since that
+ // reduplicates DexCaches when possible. The DexCache given to this function must already be fully
+ // initialized and not already registered.
+ void RegisterExistingDexCache(ObjPtr<mirror::DexCache> cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES(!Locks::dex_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
ObjPtr<mirror::DexCache> RegisterDexFile(const DexFile& dex_file,
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(!Locks::dex_lock_)
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index b421810113..684a261cca 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -50,7 +50,7 @@
#include "mirror/string-inl.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index dfe8949134..b15d82f5e4 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -18,6 +18,8 @@
#define ART_RUNTIME_CLASS_TABLE_INL_H_
#include "class_table.h"
+
+#include "gc_root-inl.h"
#include "oat_file.h"
namespace art {
@@ -93,7 +95,7 @@ inline mirror::Class* ClassTable::TableSlot::Read() const {
if (kReadBarrierOption != kWithoutReadBarrier && before_ptr != after_ptr) {
// If another thread raced and updated the reference, do not store the read barrier updated
// one.
- data_.CompareExchangeStrongRelaxed(before, Encode(after_ptr, MaskHash(before)));
+ data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before)));
}
return after_ptr.Ptr();
}
@@ -108,7 +110,7 @@ inline void ClassTable::TableSlot::VisitRoot(const Visitor& visitor) const {
if (before_ptr != after_ptr) {
// If another thread raced and updated the reference, do not store the read barrier updated
// one.
- data_.CompareExchangeStrongRelaxed(before, Encode(after_ptr, MaskHash(before)));
+ data_.CompareExchangeStrongRelease(before, Encode(after_ptr, MaskHash(before)));
}
}
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 0891d3f9f5..c45bbe5334 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -14,8 +14,9 @@
* limitations under the License.
*/
-#include "class_table.h"
+#include "class_table-inl.h"
+#include "base/stl_util.h"
#include "mirror/class-inl.h"
#include "oat_file.h"
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 01c6641ae9..f9259944b4 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -728,13 +728,3 @@ void CheckJniAbortCatcher::Hook(void* data, const std::string& reason) {
}
} // namespace art
-
-namespace std {
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const std::vector<T>& rhs) {
-os << ::art::ToString(rhs);
-return os;
-}
-
-} // namespace std
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 24dbd058ec..019770302d 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -132,8 +132,7 @@ class CommonRuntimeTestImpl {
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name);
- std::unique_ptr<const DexFile> OpenTestDexFile(const char* name)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ std::unique_ptr<const DexFile> OpenTestDexFile(const char* name);
jobject LoadDex(const char* dex_name) REQUIRES_SHARED(Locks::mutator_lock_);
jobject LoadMultiDex(const char* first_dex_name, const char* second_dex_name)
@@ -258,12 +257,4 @@ class CheckJniAbortCatcher {
} // namespace art
-namespace std {
-
-// TODO: isn't gtest supposed to be able to print STL types for itself?
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const std::vector<T>& rhs);
-
-} // namespace std
-
#endif // ART_RUNTIME_COMMON_RUNTIME_TEST_H_
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index d0b50fe820..7e70b7564c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -38,7 +38,7 @@
#include "gc/scoped_gc_critical_section.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "handle_scope.h"
+#include "handle_scope-inl.h"
#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
#include "jni_internal.h"
@@ -56,7 +56,7 @@
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
-#include "handle_scope-inl.h"
+#include "stack.h"
#include "thread_list.h"
#include "utf.h"
#include "well_known_classes.h"
@@ -149,7 +149,9 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
DebugInstrumentationListener() {}
virtual ~DebugInstrumentationListener() {}
- void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
+ void MethodEntered(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
uint32_t dex_pc)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
@@ -171,12 +173,15 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
// also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
thread->SetDebugMethodEntry();
} else {
- Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
+ Dbg::UpdateDebugger(thread, this_object.Get(), method, 0, Dbg::kMethodEntry, nullptr);
}
}
- void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
- uint32_t dex_pc, const JValue& return_value)
+ void MethodExited(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ const JValue& return_value)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
@@ -189,18 +194,22 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
events |= Dbg::kMethodEntry;
thread->ClearDebugMethodEntry();
}
- Dbg::UpdateDebugger(thread, this_object, method, dex_pc, events, &return_value);
+ Dbg::UpdateDebugger(thread, this_object.Get(), method, dex_pc, events, &return_value);
}
- void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
- ArtMethod* method, uint32_t dex_pc)
+ void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
}
- void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
+ void DexPcMoved(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
uint32_t new_dex_pc)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
@@ -217,26 +226,33 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
events = Dbg::kMethodEntry;
thread->ClearDebugMethodEntry();
}
- Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, events, nullptr);
+ Dbg::UpdateDebugger(thread, this_object.Get(), method, new_dex_pc, events, nullptr);
}
}
- void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field)
+ void FieldRead(Thread* thread ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
+ Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field);
}
- void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field,
+ void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field,
const JValue& field_value)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
+ Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value);
}
- void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
+ Handle<mirror::Throwable> exception_object)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- Dbg::PostException(exception_object);
+ Dbg::PostException(exception_object.Get());
}
// We only care about branches in the Jit.
@@ -248,10 +264,10 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
// We only care about invokes in the Jit.
void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object*,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc,
- ArtMethod*)
+ ArtMethod* target ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
<< " " << dex_pc;
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 6765407949..93daa45519 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -23,6 +23,7 @@
#include <gtest/gtest.h>
+#include "base/stl_util.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
#include "exec_utils.h"
diff --git a/runtime/dex_cache_resolved_classes.h b/runtime/dex_cache_resolved_classes.h
index bebdf0dbfe..2278b052ed 100644
--- a/runtime/dex_cache_resolved_classes.h
+++ b/runtime/dex_cache_resolved_classes.h
@@ -30,10 +30,12 @@ class DexCacheResolvedClasses {
public:
DexCacheResolvedClasses(const std::string& dex_location,
const std::string& base_location,
- uint32_t location_checksum)
+ uint32_t location_checksum,
+ uint32_t num_method_ids)
: dex_location_(dex_location),
base_location_(base_location),
- location_checksum_(location_checksum) {}
+ location_checksum_(location_checksum),
+ num_method_ids_(num_method_ids) {}
// Only compare the key elements, ignore the resolved classes.
int Compare(const DexCacheResolvedClasses& other) const {
@@ -69,10 +71,15 @@ class DexCacheResolvedClasses {
return classes_;
}
+ size_t NumMethodIds() const {
+ return num_method_ids_;
+ }
+
private:
const std::string dex_location_;
const std::string base_location_;
const uint32_t location_checksum_;
+ const uint32_t num_method_ids_;
// Array of resolved class def indexes.
mutable std::unordered_set<dex::TypeIndex> classes_;
};
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index aace8eba43..b267e5f22a 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -35,6 +35,7 @@
#include "base/enums.h"
#include "base/file_magic.h"
#include "base/logging.h"
+#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
@@ -678,6 +679,32 @@ uint32_t DexFile::FindCodeItemOffset(const DexFile::ClassDef& class_def,
UNREACHABLE();
}
+uint32_t DexFile::GetCodeItemSize(const DexFile::CodeItem& code_item) {
+ uintptr_t code_item_start = reinterpret_cast<uintptr_t>(&code_item);
+ uint32_t insns_size = code_item.insns_size_in_code_units_;
+ uint32_t tries_size = code_item.tries_size_;
+ const uint8_t* handler_data = GetCatchHandlerData(code_item, 0);
+
+ if (tries_size == 0 || handler_data == nullptr) {
+ uintptr_t insns_end = reinterpret_cast<uintptr_t>(&code_item.insns_[insns_size]);
+ return insns_end - code_item_start;
+ } else {
+ // Get the start of the handler data.
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
+ // Manually read each handler.
+ for (uint32_t i = 0; i < handlers_size; ++i) {
+ int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
+ if (uleb128_count <= 0) {
+ uleb128_count = -uleb128_count + 1;
+ }
+ for (int32_t j = 0; j < uleb128_count; ++j) {
+ DecodeUnsignedLeb128(&handler_data);
+ }
+ }
+ return reinterpret_cast<uintptr_t>(handler_data) - code_item_start;
+ }
+}
+
const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
const DexFile::StringId& name,
const DexFile::TypeId& type) const {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 591ba42003..3249edbe83 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -28,7 +28,6 @@
#include "invoke_type.h"
#include "jni.h"
#include "modifiers.h"
-#include "utf.h"
namespace art {
@@ -637,6 +636,8 @@ class DexFile {
uint32_t FindCodeItemOffset(const DexFile::ClassDef& class_def,
uint32_t dex_method_idx) const;
+ static uint32_t GetCodeItemSize(const DexFile::CodeItem& disk_code_item);
+
// Returns the declaring class descriptor string of a field id.
const char* GetFieldDeclaringClassDescriptor(const FieldId& field_id) const {
const DexFile::TypeId& type_id = GetTypeId(field_id.class_idx_);
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 1d4507a9d6..78d5c5f4ba 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -27,7 +27,7 @@
#include "mem_map.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
diff --git a/runtime/dex_file_tracking_registrar.cc b/runtime/dex_file_tracking_registrar.cc
new file mode 100644
index 0000000000..f41a50bb80
--- /dev/null
+++ b/runtime/dex_file_tracking_registrar.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_file_tracking_registrar.h"
+
+#include <deque>
+#include <tuple>
+
+// For dex tracking through poisoning. Note: Requires forcing sanitization. This is the reason for
+// the ifdefs and early include.
+#ifdef ART_DEX_FILE_ACCESS_TRACKING
+#ifndef ART_ENABLE_ADDRESS_SANITIZER
+#define ART_ENABLE_ADDRESS_SANITIZER
+#endif
+#endif
+#include "base/memory_tool.h"
+
+#include "base/logging.h"
+#include "dex_file-inl.h"
+
+namespace art {
+namespace dex {
+namespace tracking {
+
+// If true, poison dex files to track accesses.
+static constexpr bool kDexFileAccessTracking =
+#ifdef ART_DEX_FILE_ACCESS_TRACKING
+ true;
+#else
+ false;
+#endif
+
+// The following are configurations of poisoning certain sections of a Dex File.
+// More will be added
+enum DexTrackingType {
+ // Poisons all of a Dex File when set.
+ kWholeDexTracking,
+ // Poisons all Code Items of a Dex File when set.
+ kCodeItemTracking,
+ // Poisons all subsections of a Code Item, except the Insns bytecode array
+ // section, when set for all Code Items in a Dex File.
+ kCodeItemNonInsnsTracking,
+ // Poisons all subsections of a Code Item, except the Insns bytecode array
+ // section, when set for all Code Items in a Dex File.
+ // Additionally unpoisons the entire Code Item when method is a class
+ // initializer.
+ kCodeItemNonInsnsNoClinitTracking,
+ // Poisons based on a custom tracking system which can be specified in
+ // SetDexSections
+ kCustomTracking,
+};
+
+// Intended for local changes only.
+// Represents the current configuration being run.
+static constexpr DexTrackingType kCurrentTrackingSystem = kWholeDexTracking;
+
+// Intended for local changes only.
+void DexFileTrackingRegistrar::SetDexSections() {
+ if (kDexFileAccessTracking || dex_file_ != nullptr) {
+ switch (kCurrentTrackingSystem) {
+ case kWholeDexTracking:
+ SetDexFileRegistration(true);
+ break;
+ case kCodeItemTracking:
+ SetAllCodeItemRegistration(true);
+ break;
+ case kCodeItemNonInsnsTracking:
+ SetAllCodeItemRegistration(true);
+ SetAllInsnsRegistration(false);
+ break;
+ case kCodeItemNonInsnsNoClinitTracking:
+ SetAllCodeItemRegistration(true);
+ SetAllInsnsRegistration(false);
+ SetCodeItemRegistration("<clinit>", false);
+ break;
+ case kCustomTracking:
+ // TODO: Add/remove additional calls here to (un)poison sections of
+ // dex_file_
+ break;
+ }
+ }
+}
+
+void RegisterDexFile(const DexFile* dex_file) {
+ DexFileTrackingRegistrar dex_tracking_registrar(dex_file);
+ dex_tracking_registrar.SetDexSections();
+ dex_tracking_registrar.SetCurrentRanges();
+}
+
+inline void SetRegistrationRange(const void* begin, size_t size, bool should_poison) {
+ if (should_poison) {
+ MEMORY_TOOL_MAKE_NOACCESS(begin, size);
+ } else {
+ // Note: MEMORY_TOOL_MAKE_UNDEFINED has the same functionality with Address
+ // Sanitizer. The difference has not been tested with Valgrind
+ MEMORY_TOOL_MAKE_DEFINED(begin, size);
+ }
+}
+
+void DexFileTrackingRegistrar::SetCurrentRanges() {
+ // This also empties range_values_ to avoid redundant (un)poisoning upon
+ // subsequent calls.
+ while (!range_values_.empty()) {
+ const std::tuple<const void*, size_t, bool>& current_range = range_values_.front();
+ SetRegistrationRange(std::get<0>(current_range),
+ std::get<1>(current_range),
+ std::get<2>(current_range));
+ range_values_.pop_front();
+ }
+}
+
+void DexFileTrackingRegistrar::SetDexFileRegistration(bool should_poison) {
+ const void* dex_file_begin = reinterpret_cast<const void*>(dex_file_->Begin());
+ size_t dex_file_size = dex_file_->Size();
+ range_values_.push_back(std::make_tuple(dex_file_begin, dex_file_size, should_poison));
+}
+
+void DexFileTrackingRegistrar::SetAllCodeItemRegistration(bool should_poison) {
+ for (size_t classdef_ctr = 0; classdef_ctr < dex_file_->NumClassDefs(); ++classdef_ctr) {
+ const DexFile::ClassDef& cd = dex_file_->GetClassDef(classdef_ctr);
+ const uint8_t* class_data = dex_file_->GetClassData(cd);
+ if (class_data != nullptr) {
+ ClassDataItemIterator cdit(*dex_file_, class_data);
+ // Skipping Fields
+ while (cdit.HasNextStaticField() || cdit.HasNextInstanceField()) {
+ cdit.Next();
+ }
+ while (cdit.HasNextDirectMethod()) {
+ const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
+ if (code_item != nullptr) {
+ const void* code_item_begin = reinterpret_cast<const void*>(code_item);
+ size_t code_item_size = DexFile::GetCodeItemSize(*code_item);
+ range_values_.push_back(std::make_tuple(code_item_begin, code_item_size, should_poison));
+ }
+ cdit.Next();
+ }
+ }
+ }
+}
+
+void DexFileTrackingRegistrar::SetAllInsnsRegistration(bool should_poison) {
+ for (size_t classdef_ctr = 0; classdef_ctr < dex_file_->NumClassDefs(); ++classdef_ctr) {
+ const DexFile::ClassDef& cd = dex_file_->GetClassDef(classdef_ctr);
+ const uint8_t* class_data = dex_file_->GetClassData(cd);
+ if (class_data != nullptr) {
+ ClassDataItemIterator cdit(*dex_file_, class_data);
+ // Skipping Fields
+ while (cdit.HasNextStaticField() || cdit.HasNextInstanceField()) {
+ cdit.Next();
+ }
+ while (cdit.HasNextDirectMethod()) {
+ const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
+ if (code_item != nullptr) {
+ const void* insns_begin = reinterpret_cast<const void*>(&code_item->insns_);
+ // Member insns_size_in_code_units_ is in 2-byte units
+ size_t insns_size = code_item->insns_size_in_code_units_ * 2;
+ range_values_.push_back(std::make_tuple(insns_begin, insns_size, should_poison));
+ }
+ cdit.Next();
+ }
+ }
+ }
+}
+
+void DexFileTrackingRegistrar::SetCodeItemRegistration(const char* class_name, bool should_poison) {
+ for (size_t classdef_ctr = 0; classdef_ctr < dex_file_->NumClassDefs(); ++classdef_ctr) {
+ const DexFile::ClassDef& cd = dex_file_->GetClassDef(classdef_ctr);
+ const uint8_t* class_data = dex_file_->GetClassData(cd);
+ if (class_data != nullptr) {
+ ClassDataItemIterator cdit(*dex_file_, class_data);
+ // Skipping Fields
+ while (cdit.HasNextStaticField() || cdit.HasNextInstanceField()) {
+ cdit.Next();
+ }
+ while (cdit.HasNextDirectMethod()) {
+ const DexFile::MethodId& methodid_item = dex_file_->GetMethodId(cdit.GetMemberIndex());
+ const char * methodid_name = dex_file_->GetMethodName(methodid_item);
+ const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
+ if (code_item != nullptr && strcmp(methodid_name, class_name) == 0) {
+ const void* code_item_begin = reinterpret_cast<const void*>(code_item);
+ size_t code_item_size = DexFile::GetCodeItemSize(*code_item);
+ range_values_.push_back(
+ std::make_tuple(code_item_begin, code_item_size, should_poison));
+ }
+ cdit.Next();
+ }
+ }
+ }
+}
+
+} // namespace tracking
+} // namespace dex
+} // namespace art
diff --git a/runtime/dex_file_tracking_registrar.h b/runtime/dex_file_tracking_registrar.h
new file mode 100644
index 0000000000..b0fa275b38
--- /dev/null
+++ b/runtime/dex_file_tracking_registrar.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_FILE_TRACKING_REGISTRAR_H_
+#define ART_RUNTIME_DEX_FILE_TRACKING_REGISTRAR_H_
+
+#include <deque>
+#include <tuple>
+
+#include "dex_file.h"
+
+namespace art {
+namespace dex {
+namespace tracking {
+
+// Class for (un)poisoning various sections of Dex Files
+//
+// This class provides the means to log accesses only of sections whose
+// accesses are needed. All accesses are displayed as stack traces in
+// logcat.
+class DexFileTrackingRegistrar {
+ public:
+ explicit DexFileTrackingRegistrar(const DexFile* const dex_file)
+ : dex_file_(dex_file) {
+ }
+
+ // This function is where the functions below it are called to actually
+ // poison sections.
+ void SetDexSections();
+
+ // Uses data contained inside range_values_ to poison memory through the
+ // memory tool.
+ void SetCurrentRanges();
+
+ private:
+ void SetDexFileRegistration(bool should_poison);
+
+ // Set of functions concerning Code Items of dex_file_
+ void SetAllCodeItemRegistration(bool should_poison);
+ // Sets the insns_ section of all code items.
+ void SetAllInsnsRegistration(bool should_poison);
+ // This function finds the code item of a class based on class name.
+ void SetCodeItemRegistration(const char* class_name, bool should_poison);
+
+ // Contains tuples of all ranges of memory that need to be explicitly
+ // (un)poisoned by the memory tool.
+ std::deque<std::tuple<const void *, size_t, bool>> range_values_;
+
+ const DexFile* const dex_file_;
+};
+
+// This function is meant to called externally to use DexfileTrackingRegistrar
+void RegisterDexFile(const DexFile* dex_file);
+
+} // namespace tracking
+} // namespace dex
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_FILE_TRACKING_REGISTRAR_H_
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index d1043c6841..74f82254b3 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -19,6 +19,8 @@
#include <unordered_set>
+#include "base/allocator.h"
+#include "base/hash_map.h"
#include "dex_file.h"
#include "dex_file_types.h"
#include "safe_map.h"
@@ -226,6 +228,15 @@ class DexFileVerifier {
}
};
// Map from offset to dex file type, HashMap for performance reasons.
+ template<class Key,
+ class T,
+ class EmptyFn,
+ AllocatorTag kTag,
+ class Hash = std::hash<Key>,
+ class Pred = std::equal_to<Key>>
+ using AllocationTrackingHashMap = HashMap<
+ Key, T, EmptyFn, Hash, Pred, TrackingAllocator<std::pair<Key, T>, kTag>>;
+
AllocationTrackingHashMap<uint32_t,
uint16_t,
OffsetTypeMapEmptyFn,
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 068e1223e5..0e58e6d564 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -29,7 +29,7 @@
#include "dex_file_types.h"
#include "leb128.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc
index cd8c39096d..e83829bb46 100644
--- a/runtime/dex_method_iterator_test.cc
+++ b/runtime/dex_method_iterator_test.cc
@@ -20,7 +20,7 @@
#include "common_runtime_test.h"
#include "oat_file.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/dex_reference_collection.h b/runtime/dex_reference_collection.h
new file mode 100644
index 0000000000..01b9b97786
--- /dev/null
+++ b/runtime/dex_reference_collection.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_REFERENCE_COLLECTION_H_
+#define ART_RUNTIME_DEX_REFERENCE_COLLECTION_H_
+
+#include "base/macros.h"
+
+#include <vector>
+#include <map>
+
+namespace art {
+
+class DexFile;
+
+// Collection of dex references that is more memory efficient than a vector of <dex, index> pairs.
+// Also allows quick lookups of all of the references for a single dex.
+template <class IndexType, template<typename Type> class Allocator>
+class DexReferenceCollection {
+ public:
+ using VectorAllocator = Allocator<IndexType>;
+ using IndexVector = std::vector<IndexType, VectorAllocator>;
+ using MapAllocator = Allocator<std::pair<const DexFile*, IndexVector>>;
+ using DexFileMap = std::map<
+ const DexFile*,
+ IndexVector,
+ std::less<const DexFile*>,
+ Allocator<std::pair<const DexFile* const, IndexVector>>>;
+
+ DexReferenceCollection(const MapAllocator& map_allocator = MapAllocator(),
+ const VectorAllocator& vector_allocator = VectorAllocator())
+ : map_(map_allocator),
+ vector_allocator_(vector_allocator) {}
+
+ void AddReference(const DexFile* dex, IndexType index) {
+ GetOrInsertVector(dex)->push_back(index);
+ }
+
+ DexFileMap& GetMap() {
+ return map_;
+ }
+
+ size_t NumReferences() const {
+ size_t ret = 0;
+ for (auto&& pair : map_) {
+ ret += pair.second.size();
+ }
+ return ret;
+ }
+
+ private:
+ DexFileMap map_;
+ const DexFile* current_dex_file_ = nullptr;
+ IndexVector* current_vector_ = nullptr;
+ VectorAllocator vector_allocator_;
+
+ ALWAYS_INLINE IndexVector* GetOrInsertVector(const DexFile* dex) {
+ // Optimize for adding to same vector in succession, the cached dex file and vector aims to
+ // prevent map lookups.
+ if (UNLIKELY(current_dex_file_ != dex)) {
+ // There is an assumption that constructing an empty vector wont do any allocations. If this
+ // incorrect, this might leak for the arena case.
+ current_vector_ = &map_.emplace(dex, IndexVector(vector_allocator_)).first->second;
+ current_dex_file_ = dex;
+ }
+ return current_vector_;
+ }
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_REFERENCE_COLLECTION_H_
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index c340a885c3..88a5a13246 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -177,7 +177,7 @@ bool FillArrayData(ObjPtr<mirror::Object> obj, const Instruction::ArrayDataPaylo
}
static inline std::pair<ArtMethod*, uintptr_t> DoGetCalleeSaveMethodOuterCallerAndPc(
- ArtMethod** sp, Runtime::CalleeSaveType type) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod** sp, CalleeSaveType type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
@@ -232,9 +232,7 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
return caller;
}
-ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
- Runtime::CalleeSaveType type,
- bool do_caller_check)
+ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, CalleeSaveType type, bool do_caller_check)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
@@ -244,8 +242,7 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
return caller;
}
-CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self,
- Runtime::CalleeSaveType type) {
+CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, CalleeSaveType type) {
CallerAndOuterMethod result;
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
@@ -257,7 +254,7 @@ CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self,
return result;
}
-ArtMethod* GetCalleeSaveOuterMethod(Thread* self, Runtime::CalleeSaveType type) {
+ArtMethod* GetCalleeSaveOuterMethod(Thread* self, CalleeSaveType type) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
return DoGetCalleeSaveMethodOuterCallerAndPc(sp, type).first;
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 69ee3ebe75..eed08aabad 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -20,6 +20,7 @@
#include <jni.h>
#include <stdint.h>
+#include "base/callee_save_type.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_instruction.h"
@@ -28,7 +29,6 @@
#include "handle.h"
#include "invoke_type.h"
#include "jvalue.h"
-#include "runtime.h"
namespace art {
@@ -178,7 +178,7 @@ template <typename INT_TYPE, typename FLOAT_TYPE>
inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
- Runtime::CalleeSaveType type,
+ CalleeSaveType type,
bool do_caller_check = false)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -187,11 +187,10 @@ struct CallerAndOuterMethod {
ArtMethod* outer_method;
};
-CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self,
- Runtime::CalleeSaveType type)
+CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
-ArtMethod* GetCalleeSaveOuterMethod(Thread* self, Runtime::CalleeSaveType type)
+ArtMethod* GetCalleeSaveOuterMethod(Thread* self, CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index eeb138b295..dd0819ed8f 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -42,12 +42,11 @@ extern "C" const void* artFindNativeMethod(Thread* self) {
// otherwise we return the address of the method we found.
void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
if (native_code == nullptr) {
- DCHECK(self->IsExceptionPending());
+ self->AssertPendingException();
return nullptr;
- } else {
- // Register so that future calls don't come here
- return method->RegisterNative(native_code, false);
}
+ // Register so that future calls don't come here
+ return method->RegisterNative(native_code, false);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index df37f9586f..69e3fc1045 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -18,9 +18,9 @@
#define ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
#include "arch/instruction_set.h"
+#include "base/callee_save_type.h"
#include "base/enums.h"
#include "base/mutex.h"
-#include "runtime.h"
#include "thread-inl.h"
// Specific frame size code is in architecture-specific files. We include this to compile-time
@@ -46,13 +46,6 @@ class ScopedQuickEntrypointChecks {
}
}
- ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_)
- : self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) {
- if (kIsDebugBuild) {
- TestsOnEntry();
- }
- }
-
~ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
if (exit_check_) {
TestsOnExit();
@@ -74,7 +67,7 @@ class ScopedQuickEntrypointChecks {
bool exit_check_;
};
-static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, Runtime::CalleeSaveType type) {
+static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, CalleeSaveType type) {
// constexpr must be a return statement.
return (isa == kArm || isa == kThumb2) ? arm::ArmCalleeSaveFrameSize(type) :
isa == kArm64 ? arm64::Arm64CalleeSaveFrameSize(type) :
@@ -100,8 +93,7 @@ static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa,
- Runtime::CalleeSaveType type) {
+static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa, CalleeSaveType type) {
return GetCalleeSaveFrameSize(isa, type) - static_cast<size_t>(GetConstExprPointerSize(isa));
}
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 5762e4f00a..53f0727a5f 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -21,6 +21,7 @@
#include "interpreter/interpreter.h"
#include "obj_ptr-inl.h" // TODO: Find the other include that isn't complete, and clean this up.
#include "quick_exception_handler.h"
+#include "runtime.h"
#include "thread.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 6b965678c3..fe565430fe 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "callee_save_frame.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "class_linker-inl.h"
@@ -64,7 +65,8 @@ extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveEverything);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self,
+ CalleeSaveType::kSaveEverything);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
@@ -78,7 +80,8 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* s
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveEverything);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self,
+ CalleeSaveType::kSaveEverything);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
@@ -93,7 +96,8 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveEverything);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self,
+ CalleeSaveType::kSaveEverything);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
@@ -106,7 +110,8 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveEverything);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self,
+ CalleeSaveType::kSaveEverything);
ArtMethod* caller = caller_and_outer.caller;
mirror::String* result = ResolveStringFromCode(caller, dex::StringIndex(string_idx));
if (LIKELY(result != nullptr)) {
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 822c5a8d9d..726bddd334 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -59,12 +60,12 @@ static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_
if (kIsDebugBuild) {
// stub_test doesn't call this code with a proper frame, so get the outer, and if
// it does not have compiled code return it.
- ArtMethod* outer = GetCalleeSaveOuterMethod(self, Runtime::kSaveRefsOnly);
+ ArtMethod* outer = GetCalleeSaveOuterMethod(self, CalleeSaveType::kSaveRefsOnly);
if (outer->GetEntryPointFromQuickCompiledCode() == nullptr) {
return outer;
}
}
- return GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly).caller;
+ return GetCalleeSaveMethodCallerAndOuterMethod(self, CalleeSaveType::kSaveRefsOnly).caller;
}
#define ART_GET_FIELD_FROM_CODE(Kind, PrimitiveType, RetType, SetType, \
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
deleted file mode 100644
index 81560ccbaf..0000000000
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_method.h"
-#include "base/enums.h"
-#include "callee_save_frame.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "instrumentation.h"
-#include "mirror/object-inl.h"
-#include "runtime.h"
-#include "thread-inl.h"
-
-namespace art {
-
-extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
- mirror::Object* this_object,
- Thread* self,
- uintptr_t lr)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
- // that part.
- ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- const void* result;
- if (instrumentation->IsDeoptimized(method)) {
- result = GetQuickToInterpreterBridge();
- } else {
- result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize);
- DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
- }
- bool interpreter_entry = (result == GetQuickToInterpreterBridge());
- instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? nullptr : this_object,
- method, lr, interpreter_entry);
- CHECK(result != nullptr) << method->PrettyMethod();
- return result;
-}
-
-extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp,
- uint64_t gpr_result,
- uint64_t fpr_result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Instrumentation exit stub must not be entered with a pending exception.
- CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
- << self->GetException()->Dump();
- // Compute address of return PC and sanity check that it currently holds 0.
- size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveRefsOnly);
- uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
- return_pc_offset);
- CHECK_EQ(*return_pc, 0U);
-
- // Pop the frame filling in the return pc. The low half of the return value is 0 when
- // deoptimization shouldn't be performed with the high-half having the return address. When
- // deoptimization should be performed the low half is zero and the high-half the address of the
- // deoptimization entry point.
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(
- self, return_pc, gpr_result, fpr_result);
- return return_or_deoptimize_pc;
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 2b349e39a0..b7cd39f107 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -15,9 +15,11 @@
*/
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "base/enums.h"
#include "callee_save_frame.h"
#include "common_throws.h"
+#include "debugger.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -26,7 +28,9 @@
#include "imt_conflict_table.h"
#include "imtable-inl.h"
#include "interpreter/interpreter.h"
+#include "instrumentation.h"
#include "linear_alloc.h"
+#include "method_bss_mapping.h"
#include "method_handles.h"
#include "method_reference.h"
#include "mirror/class-inl.h"
@@ -35,23 +39,24 @@
#include "mirror/method_handle_impl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "oat_file.h"
#include "oat_quick_method_header.h"
#include "quick_exception_handler.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
-#include "debugger.h"
+#include "thread-inl.h"
#include "well_known_classes.h"
namespace art {
-// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+// Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame.
class QuickArgumentVisitor {
// Number of bytes for each out register in the caller method's frame.
static constexpr size_t kBytesStackArgLocation = 4;
// Frame size in bytes of a callee-save frame for RefsAndArgs.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
- GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
+ GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs);
#if defined(__arm__)
// The callee save frame is pointed to by SP.
// | argN | |
@@ -80,11 +85,11 @@ class QuickArgumentVisitor {
static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
- arm::ArmCalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg.
+ arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
- arm::ArmCalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg.
+ arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first GPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
- arm::ArmCalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address.
+ arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -117,12 +122,15 @@ class QuickArgumentVisitor {
static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
static constexpr bool kGprFprLockstep = false;
+ // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
- arm64::Arm64CalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg.
+ arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+ // Offset of first GPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
- arm64::Arm64CalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg.
+ arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+ // Offset of return address.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
- arm64::Arm64CalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address.
+ arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs);
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -322,7 +330,7 @@ class QuickArgumentVisitor {
static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs);
+ return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs);
}
static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -334,7 +342,8 @@ class QuickArgumentVisitor {
static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
+ const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
+ CalleeSaveType::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -361,7 +370,8 @@ class QuickArgumentVisitor {
static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
+ const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
+ CalleeSaveType::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -886,7 +896,6 @@ void BuildQuickArgumentVisitor::FixupReferences() {
soa_->Env()->DeleteLocalRef(pair.first);
}
}
-
// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
// which is responsible for recording callee save registers. We explicitly place into jobjects the
// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
@@ -979,6 +988,77 @@ void RememberForGcArgumentVisitor::FixupReferences() {
}
}
+extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
+ mirror::Object* this_object,
+ Thread* self,
+ ArtMethod** sp)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const void* result;
+ // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
+ // that part.
+ ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (instrumentation->IsDeoptimized(method)) {
+ result = GetQuickToInterpreterBridge();
+ } else {
+ result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize);
+ DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
+ }
+
+ bool interpreter_entry = (result == GetQuickToInterpreterBridge());
+ bool is_static = method->IsStatic();
+ uint32_t shorty_len;
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
+
+ ScopedObjectAccessUnchecked soa(self);
+ RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa);
+ visitor.VisitArguments();
+
+ instrumentation->PushInstrumentationStackFrame(self,
+ is_static ? nullptr : this_object,
+ method,
+ QuickArgumentVisitor::GetCallingPc(sp),
+ interpreter_entry);
+
+ visitor.FixupReferences();
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return nullptr;
+ }
+ CHECK(result != nullptr) << method->PrettyMethod();
+ return result;
+}
+
+extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self,
+ ArtMethod** sp,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
+ CHECK(gpr_result != nullptr);
+ CHECK(fpr_result != nullptr);
+ // Instrumentation exit stub must not be entered with a pending exception.
+ CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
+ << self->GetException()->Dump();
+ // Compute address of return PC and sanity check that it currently holds 0.
+ size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly);
+ uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
+ return_pc_offset);
+ CHECK_EQ(*return_pc, 0U);
+
+ // Pop the frame filling in the return pc. The low half of the return value is 0 when
+ // deoptimization shouldn't be performed with the high-half having the return address. When
+ // deoptimization should be performed the low half is zero and the high-half the address of the
+ // deoptimization entry point.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(
+ self, return_pc, gpr_result, fpr_result);
+ if (self->IsExceptionPending()) {
+ return GetTwoWordFailureValue();
+ }
+ return return_or_deoptimize_pc;
+}
+
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(
ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
@@ -1104,6 +1184,32 @@ extern "C" const void* artQuickResolutionTrampoline(
DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
called = linker->ResolveMethod<ClassLinker::kForceICCECheck>(
self, called_method.dex_method_index, caller, invoke_type);
+
+ // Update .bss entry in oat file if any.
+ if (called != nullptr && called_method.dex_file->GetOatDexFile() != nullptr) {
+ const MethodBssMapping* mapping =
+ called_method.dex_file->GetOatDexFile()->GetMethodBssMapping();
+ if (mapping != nullptr) {
+ auto pp = std::partition_point(
+ mapping->begin(),
+ mapping->end(),
+ [called_method](const MethodBssMappingEntry& entry) {
+ return entry.method_index < called_method.dex_method_index;
+ });
+ if (pp != mapping->end() && pp->CoversIndex(called_method.dex_method_index)) {
+ size_t bss_offset = pp->GetBssOffset(called_method.dex_method_index,
+ static_cast<size_t>(kRuntimePointerSize));
+ DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize));
+ const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile();
+ ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>(
+ oat_file->BssBegin() + bss_offset));
+ DCHECK_GE(method_entry, oat_file->GetBssMethods().data());
+ DCHECK_LT(method_entry,
+ oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size());
+ *method_entry = called;
+ }
+ }
+ }
}
const void* code = nullptr;
if (LIKELY(!self->IsExceptionPending())) {
@@ -2235,7 +2341,7 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx,
Thread* self,
ArtMethod** sp) {
ScopedQuickEntrypointChecks sqec(self);
- DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
+ DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
if (UNLIKELY(method == nullptr)) {
@@ -2456,7 +2562,7 @@ extern "C" uintptr_t artInvokePolymorphic(
ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
+ DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
// Start new JNI local reference state
JNIEnvExt* env = self->GetJniEnv();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 1cd641b962..7e08b7ace0 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -17,6 +17,7 @@
#include <stdint.h>
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "callee_save_frame.h"
#include "common_runtime_test.h"
#include "quick/quick_method_frame_info.h"
@@ -38,7 +39,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
- static ArtMethod* CreateCalleeSaveMethod(InstructionSet isa, Runtime::CalleeSaveType type)
+ static ArtMethod* CreateCalleeSaveMethod(InstructionSet isa, CalleeSaveType type)
NO_THREAD_SAFETY_ANALYSIS {
Runtime* r = Runtime::Current();
@@ -53,7 +54,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
return save_method;
}
- static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
+ static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
@@ -62,7 +63,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
<< frame_info.FpSpillMask() << std::dec << " ISA " << isa;
}
- static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset)
+ static void CheckPCOffset(InstructionSet isa, CalleeSaveType type, size_t pc_offset)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
@@ -80,16 +81,16 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
// This test ensures that kQuickCalleeSaveFrame_RefAndArgs_FrameSize is correct.
TEST_F(QuickTrampolineEntrypointsTest, FrameSize) {
// We have to use a define here as the callee_save_frame.h functions are constexpr.
-#define CHECK_FRAME_SIZE(isa) \
- CheckFrameSize(isa, \
- Runtime::kSaveRefsAndArgs, \
- GetCalleeSaveFrameSize(isa, Runtime::kSaveRefsAndArgs)); \
- CheckFrameSize(isa, \
- Runtime::kSaveRefsOnly, \
- GetCalleeSaveFrameSize(isa, Runtime::kSaveRefsOnly)); \
- CheckFrameSize(isa, \
- Runtime::kSaveAllCalleeSaves, \
- GetCalleeSaveFrameSize(isa, Runtime::kSaveAllCalleeSaves))
+#define CHECK_FRAME_SIZE(isa) \
+ CheckFrameSize(isa, \
+ CalleeSaveType::kSaveRefsAndArgs, \
+ GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsAndArgs)); \
+ CheckFrameSize(isa, \
+ CalleeSaveType::kSaveRefsOnly, \
+ GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsOnly)); \
+ CheckFrameSize(isa, \
+ CalleeSaveType::kSaveAllCalleeSaves, \
+ GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveAllCalleeSaves))
CHECK_FRAME_SIZE(kArm);
CHECK_FRAME_SIZE(kArm64);
@@ -116,12 +117,12 @@ TEST_F(QuickTrampolineEntrypointsTest, ReturnPC) {
// Ensure that the computation in callee_save_frame.h correct.
// Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
// sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
- CheckPCOffset(kRuntimeISA, Runtime::kSaveRefsAndArgs,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveRefsAndArgs));
- CheckPCOffset(kRuntimeISA, Runtime::kSaveRefsOnly,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveRefsOnly));
- CheckPCOffset(kRuntimeISA, Runtime::kSaveAllCalleeSaves,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveAllCalleeSaves));
+ CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs,
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs));
+ CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly,
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly));
+ CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves,
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves));
}
} // namespace art
diff --git a/runtime/exec_utils.cc b/runtime/exec_utils.cc
index 9efb1a353c..db1baa76f9 100644
--- a/runtime/exec_utils.cc
+++ b/runtime/exec_utils.cc
@@ -28,7 +28,6 @@
namespace art {
-using android::base::StringAppendF;
using android::base::StringPrintf;
int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg) {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 5594f4dfc7..fd0cd5f0b2 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -27,7 +27,7 @@
#include "mirror/object_reference.h"
#include "oat_quick_method_header.h"
#include "sigchain.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "verify_object-inl.h"
namespace art {
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 351798efd1..3d0e8172b6 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -29,6 +29,13 @@
#include "mem_map.h"
#include "stack_reference.h"
+// This implements a double-ended queue (deque) with various flavors of PushBack operations,
+// as well as PopBack and PopFront operations. We expect that all calls are performed
+// by a single thread (normally the GC). There is one exception, which accounts for the
+// name:
+// - Multiple calls to AtomicPushBack*() and AtomicBumpBack() may be made concurrently,
+// provided no other calls are made at the same time.
+
namespace art {
namespace gc {
namespace accounting {
@@ -150,7 +157,7 @@ class AtomicStack {
// Pop a number of elements.
void PopBackCount(int32_t n) {
DCHECK_GE(Size(), static_cast<size_t>(n));
- back_index_.FetchAndSubSequentiallyConsistent(n);
+ back_index_.StoreRelaxed(back_index_.LoadRelaxed() - n);
}
bool IsEmpty() const {
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index eb004726df..d039d88770 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -25,7 +25,6 @@
#include "base/mutex.h"
#include "globals.h"
-#include "object_callbacks.h"
namespace art {
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 76247bce84..7097f87e91 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -19,7 +19,6 @@
#include "base/allocator.h"
#include "base/logging.h"
-#include "object_callbacks.h"
#include "space_bitmap.h"
namespace art {
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index c416b9cc3d..57c290ea94 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -28,7 +28,7 @@
#include "mirror/object-inl.h"
#include "mirror/object-refvisitor-inl.h"
#include "space_bitmap-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 48a8742cc8..e5b8ea5609 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -21,7 +21,7 @@
#include "gc/space/space-inl.h"
#include "mirror/array-inl.h"
#include "space_bitmap-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 5594781672..c332f969ad 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -19,7 +19,6 @@
#include "base/allocator.h"
#include "globals.h"
-#include "object_callbacks.h"
#include "safe_map.h"
#include <set>
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index b13648894d..889f57b333 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -25,7 +25,6 @@
#include "base/mutex.h"
#include "globals.h"
-#include "object_callbacks.h"
namespace art {
@@ -35,6 +34,9 @@ namespace mirror {
} // namespace mirror
class MemMap;
+// Same as in object_callbacks.h. Just avoid the include.
+typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
+
namespace gc {
namespace accounting {
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index d694a68b9c..21fa2142df 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -23,7 +23,6 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "gc_root.h"
namespace art {
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 122f7799df..2257b81e09 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "base/stl_util.h"
#include "obj_ptr-inl.h"
+#include "object_callbacks.h"
#include "stack.h"
#ifdef ART_TARGET_ANDROID
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 227c7ad28c..d31e442cc9 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -22,12 +22,12 @@
#include "base/mutex.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "gc_root.h"
namespace art {
class ArtMethod;
+class IsMarkedVisitor;
class Thread;
namespace mirror {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 35a251fda8..d5d3540b1f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -30,7 +30,7 @@
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 562fc750ed..b85d7dff5c 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -707,6 +707,9 @@ class RosAlloc {
// the end of the memory region that's ever managed by this allocator.
size_t max_capacity_;
+ template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
+ using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
+
// The run sets that hold the runs whose slots are not all
// full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
AllocationTrackingSet<Run*, kAllocatorTagRosAlloc> non_full_runs_[kNumOfSizeBrackets];
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index ef843c6650..c0d648117c 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -359,7 +359,7 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
// only.
- thread->VisitRoots(this);
+ thread->VisitRoots(this, kVisitRootFlagAllRoots);
concurrent_copying_->GetBarrier().Pass(self);
}
@@ -2086,8 +2086,11 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset)
// It was updated by the mutator.
break;
}
- } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
- false, false, kVerifyNone>(offset, expected_ref, new_ref));
+ // Use release cas to make sure threads reading the reference see contents of copied objects.
+ } while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier<false, false, kVerifyNone>(
+ offset,
+ expected_ref,
+ new_ref));
}
// Process some roots.
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index f8ca8dba42..7b4340ee09 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -21,9 +21,7 @@
#include "garbage_collector.h"
#include "immune_spaces.h"
#include "jni.h"
-#include "object_callbacks.h"
#include "offsets.h"
-#include "mirror/object.h"
#include "mirror/object_reference.h"
#include "safe_map.h"
@@ -34,6 +32,10 @@ namespace art {
class Closure;
class RootInfo;
+namespace mirror {
+class Object;
+} // namespace mirror
+
namespace gc {
namespace accounting {
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 1e4196b1ac..c5a341fc80 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -31,7 +31,8 @@
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index acb4f575c8..9823708606 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -21,7 +21,7 @@
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
#include "oat_file.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 9d3d950a0f..aef98dee58 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -32,7 +32,7 @@
#include "mirror/object-refvisitor-inl.h"
#include "runtime.h"
#include "stack.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 85727c25c2..0bf4095ac3 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -28,7 +28,6 @@
#include "gc/accounting/heap_bitmap.h"
#include "immune_spaces.h"
#include "lock_word.h"
-#include "object_callbacks.h"
#include "offsets.h"
namespace art {
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index f591cf09ca..fb82b4d270 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -42,7 +42,7 @@
#include "mirror/object-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
@@ -1141,7 +1141,7 @@ class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor
Thread* const self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
- thread->VisitRoots(this);
+ thread->VisitRoots(this, kVisitRootFlagAllRoots);
if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers");
mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 5a9b9f8765..b9e06f9688 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -27,7 +27,6 @@
#include "gc_root.h"
#include "gc/accounting/heap_bitmap.h"
#include "immune_spaces.h"
-#include "object_callbacks.h"
#include "offsets.h"
namespace art {
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index 984779484e..f6ca867e69 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -19,7 +19,7 @@
#include "gc/heap.h"
#include "gc/space/space.h"
#include "partial_mark_sweep.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9d6e74dde4..d3858baaf5 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -27,7 +27,6 @@
#include "gc/accounting/heap_bitmap.h"
#include "immune_spaces.h"
#include "mirror/object_reference.h"
-#include "object_callbacks.h"
#include "offsets.h"
namespace art {
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 428e387c21..98fdfac17b 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -21,7 +21,8 @@
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1af3b57830..d944ce4904 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -90,8 +90,6 @@ namespace art {
namespace gc {
-using android::base::StringPrintf;
-
static constexpr size_t kCollectorTransitionStressIterations = 0;
static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
// Minimum amount of remaining bytes before a concurrent GC is triggered.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 24f4ce29e2..0289250966 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -34,7 +34,6 @@
#include "globals.h"
#include "handle.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "offsets.h"
#include "process_state.h"
#include "safe_map.h"
@@ -43,6 +42,7 @@
namespace art {
class ConditionVariable;
+class IsMarkedVisitor;
class Mutex;
class RootVisitor;
class StackVisitor;
@@ -51,6 +51,9 @@ class ThreadPool;
class TimingLogger;
class VariableSizedHandleScope;
+// Same as in object_callbacks.h. Just avoid the include.
+typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
+
namespace mirror {
class Class;
class Object;
diff --git a/runtime/gc/heap_verification_test.cc b/runtime/gc/heap_verification_test.cc
index 2cdfc16f62..8ea0459c89 100644
--- a/runtime/gc/heap_verification_test.cc
+++ b/runtime/gc/heap_verification_test.cc
@@ -17,7 +17,7 @@
#include "common_runtime_test.h"
#include "base/memory_tool.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "handle_scope-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -141,5 +141,24 @@ TEST_F(VerificationTest, LogHeapCorruption) {
v->LogHeapCorruption(nullptr, MemberOffset(0), arr.Get(), false);
}
+TEST_F(VerificationTest, FindPathFromRootSet) {
+ TEST_DISABLED_FOR_MEMORY_TOOL();
+ ScopedLogSeverity sls(LogSeverity::INFO);
+ ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
+ VariableSizedHandleScope hs(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object>> arr(
+ hs.NewHandle(AllocObjectArray<mirror::Object>(soa.Self(), 256)));
+ ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(soa.Self(), "obj");
+ arr->Set(0, str);
+ const Verification* const v = runtime->GetHeap()->GetVerification();
+ std::string path = v->FirstPathFromRootSet(str);
+ EXPECT_GT(path.length(), 0u);
+ std::ostringstream oss;
+ oss << arr.Get();
+ EXPECT_NE(path.find(oss.str()), std::string::npos);
+ LOG(INFO) << path;
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/reference_processor-inl.h b/runtime/gc/reference_processor-inl.h
index f619a15f74..0f47d3dc9f 100644
--- a/runtime/gc/reference_processor-inl.h
+++ b/runtime/gc/reference_processor-inl.h
@@ -19,6 +19,8 @@
#include "reference_processor.h"
+#include "mirror/reference-inl.h"
+
namespace art {
namespace gc {
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 886c950710..52da7632f0 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -22,6 +22,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
+#include "object_callbacks.h"
#include "reference_processor-inl.h"
#include "reflection.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 38b68cbbe8..a8135d9a3b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -20,11 +20,11 @@
#include "base/mutex.h"
#include "globals.h"
#include "jni.h"
-#include "object_callbacks.h"
#include "reference_queue.h"
namespace art {
+class IsMarkedVisitor;
class TimingLogger;
namespace mirror {
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index fd5dcf9de6..321d22a592 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -22,6 +22,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
+#include "object_callbacks.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index b73a880a8a..c48d48c530 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -27,7 +27,6 @@
#include "globals.h"
#include "jni.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "offsets.h"
#include "thread_pool.h"
@@ -36,6 +35,9 @@ namespace mirror {
class Reference;
} // namespace mirror
+class IsMarkedVisitor;
+class MarkObjectVisitor;
+
namespace gc {
namespace collector {
diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc
index f937d2c778..2976dd0252 100644
--- a/runtime/gc/scoped_gc_critical_section.cc
+++ b/runtime/gc/scoped_gc_critical_section.cc
@@ -19,7 +19,7 @@
#include "gc/collector_type.h"
#include "gc/heap.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index e9982e9d3c..566dc5dc40 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -17,10 +17,17 @@
#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
-#include "object_callbacks.h"
#include "space.h"
namespace art {
+
+namespace mirror {
+class Object;
+}
+
+// Same as in object_callbacks.h. Just avoid the include.
+typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
+
namespace gc {
namespace collector {
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 9282ec7944..7ec54f59fe 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -26,6 +26,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index b6bff5b17c..9da2876416 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -27,6 +27,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "base/enums.h"
#include "base/macros.h"
#include "base/stl_util.h"
@@ -714,13 +715,13 @@ class ImageSpaceLoader {
image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
CHECK_EQ(runtime->GetImtUnimplementedMethod(),
image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves),
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveRefsOnly),
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs),
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveEverything),
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
} else if (!runtime->HasResolutionMethod()) {
runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
@@ -730,14 +731,16 @@ class ImageSpaceLoader {
image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
runtime->SetCalleeSaveMethod(
image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod),
- Runtime::kSaveAllCalleeSaves);
+ CalleeSaveType::kSaveAllCalleeSaves);
runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod), Runtime::kSaveRefsOnly);
+ image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod),
+ CalleeSaveType::kSaveRefsOnly);
runtime->SetCalleeSaveMethod(
image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod),
- Runtime::kSaveRefsAndArgs);
+ CalleeSaveType::kSaveRefsAndArgs);
runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod), Runtime::kSaveEverything);
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod),
+ CalleeSaveType::kSaveEverything);
}
VLOG(image) << "ImageSpace::Init exiting " << *space.get();
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 0f856b8ecc..4597a96ce2 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -31,7 +31,7 @@
#include "os.h"
#include "scoped_thread_state_change-inl.h"
#include "space-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 61161602ce..fc24fc2974 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -18,7 +18,7 @@
#define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
#include "region_space.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 27f30e0719..8d8c4885ef 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -255,13 +255,28 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
MutexLock mu(Thread::Current(), region_lock_);
VerifyNonFreeRegionLimit();
size_t new_non_free_region_index_limit = 0;
+
+ // Combine zeroing and releasing pages to reduce how often madvise is called. This helps
+ // reduce contention on the mmap semaphore. b/62194020
+ // clear_region adds a region to the current block. If the region is not adjacent, the
+ // clear block is zeroed, released, and a new block begins.
+ uint8_t* clear_block_begin = nullptr;
+ uint8_t* clear_block_end = nullptr;
+ auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
+ r->Clear(/*zero_and_release_pages*/false);
+ if (clear_block_end != r->Begin()) {
+ ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
+ clear_block_begin = r->Begin();
+ }
+ clear_block_end = r->End();
+ };
for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = &regions_[i];
if (r->IsInFromSpace()) {
*cleared_bytes += r->BytesAllocated();
*cleared_objects += r->ObjectsAllocated();
--num_non_free_regions_;
- r->Clear();
+ clear_region(r);
} else if (r->IsInUnevacFromSpace()) {
if (r->LiveBytes() == 0) {
// Special case for 0 live bytes, this means all of the objects in the region are dead and
@@ -274,13 +289,13 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
// Also release RAM for large tails.
while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
DCHECK(r->IsLarge());
- regions_[i + free_regions].Clear();
+ clear_region(&regions_[i + free_regions]);
++free_regions;
}
*cleared_bytes += r->BytesAllocated();
*cleared_objects += r->ObjectsAllocated();
num_non_free_regions_ -= free_regions;
- r->Clear();
+ clear_region(r);
GetLiveBitmap()->ClearRange(
reinterpret_cast<mirror::Object*>(r->Begin()),
reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
@@ -317,6 +332,8 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
last_checked_region->Idx() + 1);
}
}
+ // Clear pages for the last block since clearing happens when a new block opens.
+ ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
// Update non_free_region_index_limit_.
SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
@@ -369,7 +386,7 @@ void RegionSpace::Clear() {
if (!r->IsFree()) {
--num_non_free_regions_;
}
- r->Clear();
+ r->Clear(/*zero_and_release_pages*/true);
}
SetNonFreeRegionLimit(0);
current_region_ = &full_region_;
@@ -395,7 +412,7 @@ void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
} else {
DCHECK(reg->IsLargeTail());
}
- reg->Clear();
+ reg->Clear(/*zero_and_release_pages*/true);
--num_non_free_regions_;
}
if (end_addr < Limit()) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 1d1d27e0f4..323ccdbd74 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -284,14 +284,16 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return type_;
}
- void Clear() {
+ void Clear(bool zero_and_release_pages) {
top_.StoreRelaxed(begin_);
state_ = RegionState::kRegionStateFree;
type_ = RegionType::kRegionTypeNone;
objects_allocated_.StoreRelaxed(0);
alloc_time_ = 0;
live_bytes_ = static_cast<size_t>(-1);
- ZeroAndReleasePages(begin_, end_ - begin_);
+ if (zero_and_release_pages) {
+ ZeroAndReleasePages(begin_, end_ - begin_);
+ }
is_newly_allocated_ = false;
is_a_tlab_ = false;
thread_ = nullptr;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 8d8b745b71..9e900e4558 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -24,6 +24,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 4a078b8f49..74ce273abf 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -21,7 +21,7 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index bbfcb31ab1..fddb3f2dd2 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -16,10 +16,12 @@
#include "zygote_space.h"
+#include "base/mutex-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index f1d26d9a41..5a75b37b67 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -18,7 +18,7 @@
#include "common_runtime_test.h"
#include "task_processor.h"
#include "thread_pool.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index c14f250528..03b26a0a6b 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -21,6 +21,7 @@
#include "art_field-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/object-refvisitor-inl.h"
namespace art {
namespace gc {
@@ -138,5 +139,92 @@ bool Verification::IsValidClass(const void* addr) const {
return k1 == k2;
}
+using ObjectSet = std::set<mirror::Object*>;
+using WorkQueue = std::deque<std::pair<mirror::Object*, std::string>>;
+
+// Use for visiting the GcRoots held live by ArtFields, ArtMethods, and ClassLoaders.
+class Verification::BFSFindReachable {
+ public:
+ explicit BFSFindReachable(ObjectSet* visited) : visited_(visited) {}
+
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtField* field = obj->FindFieldByOffset(offset);
+ Visit(obj->GetFieldObject<mirror::Object>(offset),
+ field != nullptr ? field->GetName() : "");
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Visit(root->AsMirrorPtr(), "!nativeRoot");
+ }
+
+ void Visit(mirror::Object* ref, const std::string& field_name) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (ref != nullptr && visited_->insert(ref).second) {
+ new_visited_.emplace_back(ref, field_name);
+ }
+ }
+
+ const WorkQueue& NewlyVisited() const {
+ return new_visited_;
+ }
+
+ private:
+ ObjectSet* visited_;
+ mutable WorkQueue new_visited_;
+};
+
+class Verification::CollectRootVisitor : public SingleRootVisitor {
+ public:
+ CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
+
+ void VisitRoot(mirror::Object* obj, const RootInfo& info)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj != nullptr && visited_->insert(obj).second) {
+ std::ostringstream oss;
+ oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";
+ work_->emplace_back(obj, oss.str());
+ }
+ }
+
+ private:
+ ObjectSet* const visited_;
+ WorkQueue* const work_;
+};
+
+std::string Verification::FirstPathFromRootSet(ObjPtr<mirror::Object> target) const {
+ Runtime* const runtime = Runtime::Current();
+ std::set<mirror::Object*> visited;
+ std::deque<std::pair<mirror::Object*, std::string>> work;
+ {
+ CollectRootVisitor root_visitor(&visited, &work);
+ runtime->VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
+ }
+ while (!work.empty()) {
+ auto pair = work.front();
+ work.pop_front();
+ if (pair.first == target) {
+ return pair.second;
+ }
+ BFSFindReachable visitor(&visited);
+ pair.first->VisitReferences(visitor, VoidFunctor());
+ for (auto&& pair2 : visitor.NewlyVisited()) {
+ std::ostringstream oss;
+ mirror::Object* obj = pair2.first;
+ oss << pair.second << " -> " << obj << "(" << obj->PrettyTypeOf() << ")." << pair2.second;
+ work.emplace_back(obj, oss.str());
+ }
+ }
+ return "<no path found>";
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/verification.h b/runtime/gc/verification.h
index 3d95d93015..903e159c5a 100644
--- a/runtime/gc/verification.h
+++ b/runtime/gc/verification.h
@@ -57,8 +57,16 @@ class Verification {
bool IsValidHeapObjectAddress(const void* addr, space::Space** out_space = nullptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Find the first path to the target from the root set. Should be called while paused since
+ // visiting roots is not safe otherwise.
+ std::string FirstPathFromRootSet(ObjPtr<mirror::Object> target) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
gc::Heap* const heap_;
+
+ class BFSFindReachable;
+ class CollectRootVisitor;
};
} // namespace gc
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 2de4f19222..3a7f21de67 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -27,13 +27,13 @@ DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE), (static_cast<siz
#define COMPRESSED_REFERENCE_SIZE_SHIFT 0x2
DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE_SHIFT), (static_cast<size_t>(art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))))
#define RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveAllCalleeSaves))))
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveAllCalleeSaves))))
#define RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET 0x8
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveRefsOnly))))
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsOnly))))
#define RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET 0x10
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveRefsAndArgs))))
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsAndArgs))))
#define RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 0x18
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveEverything))))
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverything))))
#define THREAD_FLAGS_OFFSET 0
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_ID_OFFSET 12
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 492d4b4bd9..d091e7f371 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -22,7 +22,7 @@
#include "base/mutex.h"
#include "handle.h"
#include "obj_ptr-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "verify_object.h"
namespace art {
diff --git a/runtime/imtable_test.cc b/runtime/imtable_test.cc
index 17149dfe44..d482183d86 100644
--- a/runtime/imtable_test.cc
+++ b/runtime/imtable_test.cc
@@ -29,7 +29,7 @@
#include "mirror/class_loader.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c852d5af3a..cff3ea7ecd 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -34,6 +34,9 @@ namespace art {
static constexpr bool kDumpStackOnNonLocalReference = false;
static constexpr bool kDebugIRT = false;
+// Maximum table size we allow.
+static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
+
const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
switch (kind) {
case kHandleScopeOrInvalid:
@@ -71,6 +74,9 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
CHECK(error_msg != nullptr);
CHECK_NE(desired_kind, kHandleScopeOrInvalid);
+ // Overflow and maximum check.
+ CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
+
const size_t table_bytes = max_count * sizeof(IrtEntry);
table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
PROT_READ | PROT_WRITE, false, false, error_msg));
@@ -203,6 +209,13 @@ static inline void CheckHoleCount(IrtEntry* table,
bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
CHECK_GT(new_size, max_entries_);
+ constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry);
+ if (new_size > kMaxEntries) {
+ *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
+ return false;
+ }
+ // Note: the above check also ensures that there is no overflow below.
+
const size_t table_bytes = new_size * sizeof(IrtEntry);
std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
nullptr,
@@ -247,6 +260,14 @@ IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
}
// Try to double space.
+ if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
+ LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+ << "(max=" << max_entries_ << ")" << std::endl
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this)
+ << " Resizing failed: exceeds size_t";
+ UNREACHABLE();
+ }
+
std::string error_msg;
if (!Resize(max_entries_ * 2, &error_msg)) {
LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
@@ -453,4 +474,38 @@ void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
segment_state_ = new_state;
}
+bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
+ size_t top_index = segment_state_.top_index;
+ if (top_index < max_entries_ && top_index + free_capacity <= max_entries_) {
+ return true;
+ }
+
+ // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end.
+ if (resizable_ == ResizableCapacity::kNo) {
+ *error_msg = "Table is not resizable";
+ return false;
+ }
+
+ // Try to increase the table size.
+
+ // Would this overflow?
+ if (std::numeric_limits<size_t>::max() - free_capacity < top_index) {
+ *error_msg = "Cannot resize table, overflow.";
+ return false;
+ }
+
+ if (!Resize(top_index + free_capacity, error_msg)) {
+ LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
+ << "): " << std::endl
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this)
+ << " Resizing failed: " << *error_msg;
+ return false;
+ }
+ return true;
+}
+
+size_t IndirectReferenceTable::FreeCapacity() {
+ return max_entries_ - segment_state_.top_index;
+}
+
} // namespace art
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 7e452a270a..6d52d959cb 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -28,7 +28,6 @@
#include "base/mutex.h"
#include "gc_root.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "offsets.h"
#include "read_barrier_option.h"
@@ -285,6 +284,13 @@ class IndirectReferenceTable {
return segment_state_.top_index;
}
+ // Ensure that at least free_capacity elements are available, or return false.
+ bool EnsureFreeCapacity(size_t free_capacity, std::string* error_msg)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
+ // without recovering holes. Thus this is a conservative estimate.
+ size_t FreeCapacity() REQUIRES_SHARED(Locks::mutator_lock_);
+
// Note IrtIterator does not have a read barrier as it's used to visit roots.
IrtIterator begin() {
return IrtIterator(table_, 0, Capacity());
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index d862ff2708..8120cc484e 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -20,7 +20,9 @@
#include "arch/context.h"
#include "art_method-inl.h"
+#include "art_field-inl.h"
#include "atomic.h"
+#include "base/callee_save_type.h"
#include "class_linker.h"
#include "debugger.h"
#include "dex_file-inl.h"
@@ -31,6 +33,7 @@
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jvalue-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
@@ -45,6 +48,30 @@ namespace instrumentation {
constexpr bool kVerboseInstrumentation = false;
+void InstrumentationListener::MethodExited(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ Handle<mirror::Object> return_value) {
+ DCHECK_EQ(method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetReturnTypePrimitive(),
+ Primitive::kPrimNot);
+ JValue v;
+ v.SetL(return_value.Get());
+ MethodExited(thread, this_object, method, dex_pc, v);
+}
+
+void InstrumentationListener::FieldWritten(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field,
+ Handle<mirror::Object> field_value) {
+ DCHECK(!field->IsPrimitiveType());
+ JValue v;
+ v.SetL(field_value.Get());
+ FieldWritten(thread, this_object, method, dex_pc, field, v);
+}
+
// Instrumentation works on non-inlined frames by updating returned PCs
// of compiled frames.
static constexpr StackVisitor::StackWalkKind kInstrumentationStackWalk =
@@ -357,7 +384,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
LOG(INFO) << " Removing exit stub in " << DescribeLocation();
}
if (instrumentation_frame.interpreter_entry_) {
- CHECK(m == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
+ CHECK(m == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
} else {
CHECK(m == instrumentation_frame.method_) << ArtMethod::PrettyMethod(m);
}
@@ -916,48 +943,75 @@ const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, PointerSize poin
return class_linker->GetQuickOatCodeFor(method);
}
-void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
+void Instrumentation::MethodEnterEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc) const {
if (HasMethodEntryListeners()) {
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> thiz(hs.NewHandle(this_object));
for (InstrumentationListener* listener : method_entry_listeners_) {
if (listener != nullptr) {
- listener->MethodEntered(thread, this_object, method, dex_pc);
+ listener->MethodEntered(thread, thiz, method, dex_pc);
}
}
}
}
-void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
+void Instrumentation::MethodExitEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* method,
- uint32_t dex_pc, const JValue& return_value) const {
+ uint32_t dex_pc,
+ const JValue& return_value) const {
if (HasMethodExitListeners()) {
- for (InstrumentationListener* listener : method_exit_listeners_) {
- if (listener != nullptr) {
- listener->MethodExited(thread, this_object, method, dex_pc, return_value);
+ Thread* self = Thread::Current();
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Object> thiz(hs.NewHandle(this_object));
+ if (method->GetInterfaceMethodIfProxy(kRuntimePointerSize)
+ ->GetReturnTypePrimitive() != Primitive::kPrimNot) {
+ for (InstrumentationListener* listener : method_exit_listeners_) {
+ if (listener != nullptr) {
+ listener->MethodExited(thread, thiz, method, dex_pc, return_value);
+ }
+ }
+ } else {
+ Handle<mirror::Object> ret(hs.NewHandle(return_value.GetL()));
+ for (InstrumentationListener* listener : method_exit_listeners_) {
+ if (listener != nullptr) {
+ listener->MethodExited(thread, thiz, method, dex_pc, ret);
+ }
}
}
}
}
-void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
+void Instrumentation::MethodUnwindEvent(Thread* thread,
+ mirror::Object* this_object,
ArtMethod* method,
uint32_t dex_pc) const {
if (HasMethodUnwindListeners()) {
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> thiz(hs.NewHandle(this_object));
for (InstrumentationListener* listener : method_unwind_listeners_) {
if (listener != nullptr) {
- listener->MethodUnwind(thread, this_object, method, dex_pc);
+ listener->MethodUnwind(thread, thiz, method, dex_pc);
}
}
}
}
-void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
+void Instrumentation::DexPcMovedEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc) const {
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> thiz(hs.NewHandle(this_object));
for (InstrumentationListener* listener : dex_pc_listeners_) {
if (listener != nullptr) {
- listener->DexPcMoved(thread, this_object, method, dex_pc);
+ listener->DexPcMoved(thread, thiz, method, dex_pc);
}
}
}
@@ -974,36 +1028,56 @@ void Instrumentation::BranchImpl(Thread* thread,
}
void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread,
- mirror::Object* this_object,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee) const {
- // We cannot have thread suspension since that would cause the this_object parameter to
- // potentially become a dangling pointer. An alternative could be to put it in a handle instead.
- ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> thiz(hs.NewHandle(this_object));
for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) {
if (listener != nullptr) {
- listener->InvokeVirtualOrInterface(thread, this_object, caller, dex_pc, callee);
+ listener->InvokeVirtualOrInterface(thread, thiz, caller, dex_pc, callee);
}
}
}
-void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc,
+void Instrumentation::FieldReadEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
ArtField* field) const {
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> thiz(hs.NewHandle(this_object));
for (InstrumentationListener* listener : field_read_listeners_) {
if (listener != nullptr) {
- listener->FieldRead(thread, this_object, method, dex_pc, field);
+ listener->FieldRead(thread, thiz, method, dex_pc, field);
}
}
}
-void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc,
- ArtField* field, const JValue& field_value) const {
- for (InstrumentationListener* listener : field_write_listeners_) {
- if (listener != nullptr) {
- listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
+void Instrumentation::FieldWriteEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field,
+ const JValue& field_value) const {
+ Thread* self = Thread::Current();
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Object> thiz(hs.NewHandle(this_object));
+ if (field->IsPrimitiveType()) {
+ for (InstrumentationListener* listener : field_write_listeners_) {
+ if (listener != nullptr) {
+ listener->FieldWritten(thread, thiz, method, dex_pc, field, field_value);
+ }
+ }
+ } else {
+ Handle<mirror::Object> val(hs.NewHandle(field_value.GetL()));
+ for (InstrumentationListener* listener : field_write_listeners_) {
+ if (listener != nullptr) {
+ listener->FieldWritten(thread, thiz, method, dex_pc, field, val);
+ }
}
}
}
@@ -1018,7 +1092,7 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread,
thread->ClearException();
for (InstrumentationListener* listener : exception_caught_listeners_) {
if (listener != nullptr) {
- listener->ExceptionCaught(thread, h_exception.Get());
+ listener->ExceptionCaught(thread, h_exception);
}
}
thread->SetException(h_exception.Get());
@@ -1049,25 +1123,40 @@ static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instr
void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
ArtMethod* method,
uintptr_t lr, bool interpreter_entry) {
- // We have a callee-save frame meaning this value is guaranteed to never be 0.
- size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk);
+ DCHECK(!self->IsExceptionPending());
std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
if (kVerboseInstrumentation) {
LOG(INFO) << "Entering " << ArtMethod::PrettyMethod(method) << " from PC "
<< reinterpret_cast<void*>(lr);
}
- instrumentation::InstrumentationStackFrame instrumentation_frame(this_object, method, lr,
- frame_id, interpreter_entry);
- stack->push_front(instrumentation_frame);
+ // We send the enter event before pushing the instrumentation frame to make cleanup easier. If the
+ // event causes an exception we can simply send the unwind event and return.
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_this(hs.NewHandle(this_object));
if (!interpreter_entry) {
- MethodEnterEvent(self, this_object, method, 0);
+ MethodEnterEvent(self, h_this.Get(), method, 0);
+ if (self->IsExceptionPending()) {
+ MethodUnwindEvent(self, h_this.Get(), method, 0);
+ return;
+ }
}
+
+ // We have a callee-save frame meaning this value is guaranteed to never be 0.
+ DCHECK(!self->IsExceptionPending());
+ size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk);
+
+ instrumentation::InstrumentationStackFrame instrumentation_frame(h_this.Get(), method, lr,
+ frame_id, interpreter_entry);
+ stack->push_front(instrumentation_frame);
}
-TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
- uint64_t gpr_result,
- uint64_t fpr_result) {
+TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
+ uintptr_t* return_pc,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result) {
+ DCHECK(gpr_result != nullptr);
+ DCHECK(fpr_result != nullptr);
// Do the pop.
std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
CHECK_GT(stack->size(), 0U);
@@ -1083,13 +1172,20 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
uint32_t length;
const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
char return_shorty = method->GetInterfaceMethodIfProxy(pointer_size)->GetShorty(&length)[0];
+ bool is_ref = return_shorty == '[' || return_shorty == 'L';
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Object> res(hs.NewHandle<mirror::Object>(nullptr));
JValue return_value;
if (return_shorty == 'V') {
return_value.SetJ(0);
} else if (return_shorty == 'F' || return_shorty == 'D') {
- return_value.SetJ(fpr_result);
+ return_value.SetJ(*fpr_result);
} else {
- return_value.SetJ(gpr_result);
+ return_value.SetJ(*gpr_result);
+ }
+ if (is_ref) {
+ // Take a handle to the return value so we won't lose it if we suspend.
+ res.Assign(return_value.GetL());
}
// TODO: improve the dex pc information here, requires knowledge of current PC as opposed to
// return_pc.
@@ -1106,6 +1202,10 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
+ if (is_ref) {
+ // Restore the return value if it's a reference since it might have moved.
+ *reinterpret_cast<mirror::Object**>(gpr_result) = res.Get();
+ }
if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
if (kVerboseInstrumentation) {
LOG(INFO) << "Deoptimizing "
@@ -1140,9 +1240,8 @@ uintptr_t Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimizati
// Do the pop.
std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
CHECK_GT(stack->size(), 0U);
+ size_t idx = stack->size();
InstrumentationStackFrame instrumentation_frame = stack->front();
- // TODO: bring back CheckStackDepth(self, instrumentation_frame, 2);
- stack->pop_front();
ArtMethod* method = instrumentation_frame.method_;
if (is_deoptimization) {
@@ -1160,6 +1259,10 @@ uintptr_t Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimizati
uint32_t dex_pc = DexFile::kDexNoIndex;
MethodUnwindEvent(self, instrumentation_frame.this_object_, method, dex_pc);
}
+ // TODO: bring back CheckStackDepth(self, instrumentation_frame, 2);
+ CHECK_EQ(stack->size(), idx);
+ DCHECK(instrumentation_frame.method_ == stack->front().method_);
+ stack->pop_front();
return instrumentation_frame.return_pc_;
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 01071a541f..90b5def9fe 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -36,6 +36,7 @@ namespace mirror {
} // namespace mirror
class ArtField;
class ArtMethod;
+template <typename T> class Handle;
union JValue;
class Thread;
@@ -62,37 +63,70 @@ struct InstrumentationListener {
virtual ~InstrumentationListener() {}
// Call-back for when a method is entered.
- virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
+ virtual void MethodEntered(Thread* thread,
+ Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- // Call-back for when a method is exited.
- virtual void MethodExited(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc,
+ virtual void MethodExited(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ Handle<mirror::Object> return_value)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Call-back for when a method is exited. The implementor should either handler-ize the return
+ // value (if appropriate) or use the alternate MethodExited callback instead if they need to
+ // go through a suspend point.
+ virtual void MethodExited(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
const JValue& return_value)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
- virtual void MethodUnwind(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc)
+ virtual void MethodUnwind(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when the dex pc moves in a method.
- virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t new_dex_pc)
+ virtual void DexPcMoved(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t new_dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we read from a field.
- virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method,
- uint32_t dex_pc, ArtField* field) = 0;
+ virtual void FieldRead(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field) = 0;
+
+ virtual void FieldWritten(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field,
+ Handle<mirror::Object> field_value)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Call-back for when we write into a field.
- virtual void FieldWritten(Thread* thread, mirror::Object* this_object, ArtMethod* method,
- uint32_t dex_pc, ArtField* field, const JValue& field_value) = 0;
+ virtual void FieldWritten(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field,
+ const JValue& field_value)
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back when an exception is caught.
- virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
+ virtual void ExceptionCaught(Thread* thread,
+ Handle<mirror::Throwable> exception_object)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we execute a branch.
@@ -104,11 +138,10 @@ struct InstrumentationListener {
// Call-back for when we get an invokevirtual or an invokeinterface.
virtual void InvokeVirtualOrInterface(Thread* thread,
- mirror::Object* this_object,
+ Handle<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- REQUIRES(Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
@@ -323,8 +356,10 @@ class Instrumentation {
}
// Inform listeners that a method has been exited.
- void MethodExitEvent(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc,
+ void MethodExitEvent(Thread* thread,
+ mirror::Object* this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
const JValue& return_value) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodExitListeners())) {
@@ -397,9 +432,13 @@ class Instrumentation {
REQUIRES_SHARED(Locks::mutator_lock_);
// Called when an instrumented method is exited. Removes the pushed instrumentation frame
- // returning the intended link register. Generates method exit events.
+ // returning the intended link register. Generates method exit events. The gpr_result and
+ // fpr_result pointers are pointers to the locations where the integer/pointer and floating point
+ // result values of the function are stored. Both pointers must always be valid but the values
+ // held there will only be meaningful if interpreted as the appropriate type given the function
+ // being returned from.
TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
- uint64_t gpr_result, uint64_t fpr_result)
+ uint64_t* gpr_result, uint64_t* fpr_result)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Pops an instrumentation frame from the current thread and generate an unwind event.
@@ -465,31 +504,42 @@ class Instrumentation {
// exclusive access to mutator lock which you can't get if the runtime isn't started.
void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
- void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc) const
+ void MethodEnterEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
+ void MethodExitEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* method,
- uint32_t dex_pc, const JValue& return_value) const
+ uint32_t dex_pc,
+ const JValue& return_value) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc) const
+ void DexPcMovedEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc) const
REQUIRES_SHARED(Locks::mutator_lock_);
void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
REQUIRES_SHARED(Locks::mutator_lock_);
void InvokeVirtualOrInterfaceImpl(Thread* thread,
- mirror::Object* this_object,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc,
- ArtField* field) const
+ void FieldReadEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc,
- ArtField* field, const JValue& field_value) const
+ void FieldWriteEventImpl(Thread* thread,
+ ObjPtr<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field,
+ const JValue& field_value) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Read barrier-aware utility functions for accessing deoptimized_methods_
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 7f9f04f435..2a601c9cf2 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -23,11 +23,13 @@
#include "dex_file.h"
#include "gc/scoped_gc_critical_section.h"
#include "handle_scope-inl.h"
+#include "jni_internal.h"
#include "jvalue.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread_list.h"
#include "thread-inl.h"
+#include "well_known_classes.h"
namespace art {
namespace instrumentation {
@@ -35,16 +37,22 @@ namespace instrumentation {
class TestInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
public:
TestInstrumentationListener()
- : received_method_enter_event(false), received_method_exit_event(false),
- received_method_unwind_event(false), received_dex_pc_moved_event(false),
- received_field_read_event(false), received_field_written_event(false),
- received_exception_caught_event(false), received_branch_event(false),
+ : received_method_enter_event(false),
+ received_method_exit_event(false),
+ received_method_exit_object_event(false),
+ received_method_unwind_event(false),
+ received_dex_pc_moved_event(false),
+ received_field_read_event(false),
+ received_field_written_event(false),
+ received_field_written_object_event(false),
+ received_exception_caught_event(false),
+ received_branch_event(false),
received_invoke_virtual_or_interface_event(false) {}
virtual ~TestInstrumentationListener() {}
void MethodEntered(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -52,7 +60,16 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void MethodExited(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> return_value ATTRIBUTE_UNUSED)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ received_method_exit_object_event = true;
+ }
+
+ void MethodExited(Thread* thread ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
@@ -61,7 +78,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -69,7 +86,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -77,7 +94,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void FieldRead(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
@@ -86,7 +103,17 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ ArtField* field ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> field_value ATTRIBUTE_UNUSED)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ received_field_written_object_event = true;
+ }
+
+ void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
@@ -96,7 +123,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
+ Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_caught_event = true;
}
@@ -110,7 +137,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtMethod* callee ATTRIBUTE_UNUSED)
@@ -121,10 +148,12 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
void Reset() {
received_method_enter_event = false;
received_method_exit_event = false;
+ received_method_exit_object_event = false;
received_method_unwind_event = false;
received_dex_pc_moved_event = false;
received_field_read_event = false;
received_field_written_event = false;
+ received_field_written_object_event = false;
received_exception_caught_event = false;
received_branch_event = false;
received_invoke_virtual_or_interface_event = false;
@@ -132,10 +161,12 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
bool received_method_enter_event;
bool received_method_exit_event;
+ bool received_method_exit_object_event;
bool received_method_unwind_event;
bool received_dex_pc_moved_event;
bool received_field_read_event;
bool received_field_written_event;
+ bool received_field_written_object_event;
bool received_exception_caught_event;
bool received_branch_event;
bool received_invoke_virtual_or_interface_event;
@@ -171,6 +202,13 @@ class InstrumentationTest : public CommonRuntimeTest {
}
void TestEvent(uint32_t instrumentation_event) {
+ TestEvent(instrumentation_event, nullptr, nullptr, false);
+ }
+
+ void TestEvent(uint32_t instrumentation_event,
+ ArtMethod* event_method,
+ ArtField* event_field,
+ bool with_object) {
ScopedObjectAccess soa(Thread::Current());
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
TestInstrumentationListener listener;
@@ -180,15 +218,20 @@ class InstrumentationTest : public CommonRuntimeTest {
instr->AddListener(&listener, instrumentation_event);
}
- ArtMethod* const event_method = nullptr;
mirror::Object* const event_obj = nullptr;
const uint32_t event_dex_pc = 0;
// Check the listener is registered and is notified of the event.
EXPECT_TRUE(HasEventListener(instr, instrumentation_event));
- EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event));
- ReportEvent(instr, instrumentation_event, soa.Self(), event_method, event_obj, event_dex_pc);
- EXPECT_TRUE(DidListenerReceiveEvent(listener, instrumentation_event));
+ EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event, with_object));
+ ReportEvent(instr,
+ instrumentation_event,
+ soa.Self(),
+ event_method,
+ event_obj,
+ event_field,
+ event_dex_pc);
+ EXPECT_TRUE(DidListenerReceiveEvent(listener, instrumentation_event, with_object));
listener.Reset();
{
@@ -199,9 +242,15 @@ class InstrumentationTest : public CommonRuntimeTest {
// Check the listener is not registered and is not notified of the event.
EXPECT_FALSE(HasEventListener(instr, instrumentation_event));
- EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event));
- ReportEvent(instr, instrumentation_event, soa.Self(), event_method, event_obj, event_dex_pc);
- EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event));
+ EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event, with_object));
+ ReportEvent(instr,
+ instrumentation_event,
+ soa.Self(),
+ event_method,
+ event_obj,
+ event_field,
+ event_dex_pc);
+ EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event, with_object));
}
void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization)
@@ -317,8 +366,12 @@ class InstrumentationTest : public CommonRuntimeTest {
}
}
- static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type,
- Thread* self, ArtMethod* method, mirror::Object* obj,
+ static void ReportEvent(const instrumentation::Instrumentation* instr,
+ uint32_t event_type,
+ Thread* self,
+ ArtMethod* method,
+ mirror::Object* obj,
+ ArtField* field,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) {
switch (event_type) {
@@ -337,11 +390,11 @@ class InstrumentationTest : public CommonRuntimeTest {
instr->DexPcMovedEvent(self, obj, method, dex_pc);
break;
case instrumentation::Instrumentation::kFieldRead:
- instr->FieldReadEvent(self, obj, method, dex_pc, nullptr);
+ instr->FieldReadEvent(self, obj, method, dex_pc, field);
break;
case instrumentation::Instrumentation::kFieldWritten: {
JValue value;
- instr->FieldWriteEvent(self, obj, method, dex_pc, nullptr, value);
+ instr->FieldWriteEvent(self, obj, method, dex_pc, field, value);
break;
}
case instrumentation::Instrumentation::kExceptionCaught: {
@@ -364,12 +417,14 @@ class InstrumentationTest : public CommonRuntimeTest {
}
static bool DidListenerReceiveEvent(const TestInstrumentationListener& listener,
- uint32_t event_type) {
+ uint32_t event_type,
+ bool with_object) {
switch (event_type) {
case instrumentation::Instrumentation::kMethodEntered:
return listener.received_method_enter_event;
case instrumentation::Instrumentation::kMethodExited:
- return listener.received_method_exit_event;
+ return (!with_object && listener.received_method_exit_event) ||
+ (with_object && listener.received_method_exit_object_event);
case instrumentation::Instrumentation::kMethodUnwind:
return listener.received_method_unwind_event;
case instrumentation::Instrumentation::kDexPcMoved:
@@ -377,7 +432,8 @@ class InstrumentationTest : public CommonRuntimeTest {
case instrumentation::Instrumentation::kFieldRead:
return listener.received_field_read_event;
case instrumentation::Instrumentation::kFieldWritten:
- return listener.received_field_written_event;
+ return (!with_object && listener.received_field_written_event) ||
+ (with_object && listener.received_field_written_object_event);
case instrumentation::Instrumentation::kExceptionCaught:
return listener.received_exception_caught_event;
case instrumentation::Instrumentation::kBranch:
@@ -419,8 +475,42 @@ TEST_F(InstrumentationTest, MethodEntryEvent) {
TestEvent(instrumentation::Instrumentation::kMethodEntered);
}
-TEST_F(InstrumentationTest, MethodExitEvent) {
- TestEvent(instrumentation::Instrumentation::kMethodExited);
+TEST_F(InstrumentationTest, MethodExitObjectEvent) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("Instrumentation");
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
+ ASSERT_TRUE(klass != nullptr);
+ ArtMethod* method = klass->FindDeclaredDirectMethod("returnReference",
+ "()Ljava/lang/Object;",
+ kRuntimePointerSize);
+ ASSERT_TRUE(method != nullptr);
+ TestEvent(instrumentation::Instrumentation::kMethodExited,
+ /*event_method*/ method,
+ /*event_field*/ nullptr,
+ /*with_object*/ true);
+}
+
+TEST_F(InstrumentationTest, MethodExitPrimEvent) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("Instrumentation");
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
+ ASSERT_TRUE(klass != nullptr);
+ ArtMethod* method = klass->FindDeclaredDirectMethod("returnPrimitive",
+ "()I",
+ kRuntimePointerSize);
+ ASSERT_TRUE(method != nullptr);
+ TestEvent(instrumentation::Instrumentation::kMethodExited,
+ /*event_method*/ method,
+ /*event_field*/ nullptr,
+ /*with_object*/ false);
}
TEST_F(InstrumentationTest, MethodUnwindEvent) {
@@ -435,8 +525,40 @@ TEST_F(InstrumentationTest, FieldReadEvent) {
TestEvent(instrumentation::Instrumentation::kFieldRead);
}
-TEST_F(InstrumentationTest, FieldWriteEvent) {
- TestEvent(instrumentation::Instrumentation::kFieldWritten);
+TEST_F(InstrumentationTest, FieldWriteObjectEvent) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("Instrumentation");
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
+ ASSERT_TRUE(klass != nullptr);
+ ArtField* field = klass->FindDeclaredStaticField("referenceField", "Ljava/lang/Object;");
+ ASSERT_TRUE(field != nullptr);
+
+ TestEvent(instrumentation::Instrumentation::kFieldWritten,
+ /*event_method*/ nullptr,
+ /*event_field*/ field,
+ /*with_object*/ true);
+}
+
+TEST_F(InstrumentationTest, FieldWritePrimEvent) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("Instrumentation");
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
+ ASSERT_TRUE(klass != nullptr);
+ ArtField* field = klass->FindDeclaredStaticField("primitiveField", "I");
+ ASSERT_TRUE(field != nullptr);
+
+ TestEvent(instrumentation::Instrumentation::kFieldWritten,
+ /*event_method*/ nullptr,
+ /*event_field*/ field,
+ /*with_object*/ false);
}
TEST_F(InstrumentationTest, ExceptionCaughtEvent) {
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 3e1914604d..2bac2312bf 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -27,6 +27,8 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "mirror/string-inl.h"
+#include "object_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "utf.h"
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 68454fbfd4..2ec03be670 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -25,10 +25,11 @@
#include "base/mutex.h"
#include "gc_root.h"
#include "gc/weak_root_state.h"
-#include "object_callbacks.h"
namespace art {
+class IsMarkedVisitor;
+
namespace gc {
namespace space {
class ImageSpace;
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 311515c7ed..bb27b34cf5 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -23,6 +23,7 @@
#include "handle_scope-inl.h"
#include "mirror/string.h"
#include "scoped_thread_state_change-inl.h"
+#include "utf.h"
namespace art {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d2f5232de1..85cf73b044 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -22,15 +22,16 @@
#include "interpreter_common.h"
#include "interpreter_mterp_impl.h"
#include "interpreter_switch_impl.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jvalue-inl.h"
#include "mirror/string-inl.h"
+#include "mterp/mterp.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
#include "stack.h"
+#include "thread-inl.h"
#include "unstarted_runtime.h"
-#include "mterp/mterp.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
namespace art {
namespace interpreter {
@@ -253,6 +254,13 @@ static inline JValue Execute(
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
method, 0);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ instrumentation->MethodUnwindEvent(self,
+ shadow_frame.GetThisObject(code_item->ins_size_),
+ method,
+ 0);
+ return JValue();
+ }
}
if (!stay_in_interpreter) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 084cb4218f..d06ac23d3c 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -32,6 +32,7 @@
#include "reflection.h"
#include "reflection-inl.h"
#include "stack.h"
+#include "thread-inl.h"
#include "well_known_classes.h"
namespace art {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index b191dd79a1..45788e7617 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -26,13 +26,13 @@
namespace art {
namespace interpreter {
-#define HANDLE_PENDING_EXCEPTION() \
+#define HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(instr) \
do { \
DCHECK(self->IsExceptionPending()); \
self->AllowThreadSuspension(); \
uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, \
inst->GetDexPc(insns), \
- instrumentation); \
+ instr); \
if (found_dex_pc == DexFile::kDexNoIndex) { \
/* Structured locking is to be enforced for abnormal termination, too. */ \
DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame); \
@@ -47,6 +47,8 @@ namespace interpreter {
} \
} while (false)
+#define HANDLE_PENDING_EXCEPTION() HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(instrumentation)
+
#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _next_function) \
do { \
if (UNLIKELY(_is_exception_pending)) { \
@@ -218,6 +220,10 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ // Don't send another method exit event.
+ HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
+ }
}
if (interpret_one_instruction) {
/* Signal mterp to return to caller */
@@ -235,6 +241,10 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ // Don't send another method exit event.
+ HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
+ }
}
if (interpret_one_instruction) {
/* Signal mterp to return to caller */
@@ -253,6 +263,10 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ // Don't send another method exit event.
+ HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
+ }
}
if (interpret_one_instruction) {
/* Signal mterp to return to caller */
@@ -270,6 +284,10 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ // Don't send another method exit event.
+ HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
+ }
}
if (interpret_one_instruction) {
/* Signal mterp to return to caller */
@@ -307,6 +325,12 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ // Don't send another method exit event.
+ HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(nullptr);
+ }
+ // Re-load since it might have moved during the MethodExitEvent.
+ result.SetL(shadow_frame.GetVRegReference(ref_idx));
}
if (interpret_one_instruction) {
/* Signal mterp to return to caller */
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 96934bc0ca..152cce4c60 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -50,7 +50,7 @@
#include "mirror/string-inl.h"
#include "nth_caller_visitor.h"
#include "reflection.h"
-#include "thread.h"
+#include "thread-inl.h"
#include "transaction.h"
#include "well_known_classes.h"
#include "zip_archive.h"
diff --git a/runtime/invoke_type.h b/runtime/invoke_type.h
index de07c72ef0..a003f7fe9e 100644
--- a/runtime/invoke_type.h
+++ b/runtime/invoke_type.h
@@ -21,7 +21,7 @@
namespace art {
-enum InvokeType {
+enum InvokeType : uint32_t {
kStatic, // <<static>>
kDirect, // <<direct>>
kVirtual, // <<virtual>>
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index bd0f8420aa..2ad3b29f17 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "jni_internal.h"
+#include "java_vm_ext.h"
#include <dlfcn.h>
@@ -22,7 +22,7 @@
#include "art_method-inl.h"
#include "base/dumpable.h"
-#include "base/mutex.h"
+#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "check_jni.h"
@@ -30,11 +30,12 @@
#include "fault_handler.h"
#include "gc_root-inl.h"
#include "indirect_reference_table-inl.h"
+#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "nativebridge/native_bridge.h"
#include "nativeloader/native_loader.h"
-#include "java_vm_ext.h"
+#include "object_callbacks.h"
#include "parsed_options.h"
#include "runtime-inl.h"
#include "runtime_options.h"
@@ -145,19 +146,24 @@ class SharedLibrary {
return needs_native_bridge_;
}
- void* FindSymbol(const std::string& symbol_name, const char* shorty = nullptr) {
+ // No mutator lock since dlsym may block for a while if another thread is doing dlopen.
+ void* FindSymbol(const std::string& symbol_name, const char* shorty = nullptr)
+ REQUIRES(!Locks::mutator_lock_) {
return NeedsNativeBridge()
? FindSymbolWithNativeBridge(symbol_name.c_str(), shorty)
: FindSymbolWithoutNativeBridge(symbol_name.c_str());
}
- void* FindSymbolWithoutNativeBridge(const std::string& symbol_name) {
+ // No mutator lock since dlsym may block for a while if another thread is doing dlopen.
+ void* FindSymbolWithoutNativeBridge(const std::string& symbol_name)
+ REQUIRES(!Locks::mutator_lock_) {
CHECK(!NeedsNativeBridge());
return dlsym(handle_, symbol_name.c_str());
}
- void* FindSymbolWithNativeBridge(const std::string& symbol_name, const char* shorty) {
+ void* FindSymbolWithNativeBridge(const std::string& symbol_name, const char* shorty)
+ REQUIRES(!Locks::mutator_lock_) {
CHECK(NeedsNativeBridge());
uint32_t len = 0;
@@ -236,8 +242,8 @@ class Libraries {
}
// See section 11.3 "Linking Native Methods" of the JNI spec.
- void* FindNativeMethod(ArtMethod* m, std::string& detail)
- REQUIRES(Locks::jni_libraries_lock_)
+ void* FindNativeMethod(Thread* self, ArtMethod* m, std::string& detail)
+ REQUIRES(!Locks::jni_libraries_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::string jni_short_name(m->JniShortName());
std::string jni_long_name(m->JniLongName());
@@ -246,6 +252,34 @@ class Libraries {
void* const declaring_class_loader_allocator =
Runtime::Current()->GetClassLinker()->GetAllocatorForClassLoader(declaring_class_loader);
CHECK(declaring_class_loader_allocator != nullptr);
+ // TODO: Avoid calling GetShorty here to prevent dirtying dex pages?
+ const char* shorty = m->GetShorty();
+ {
+ // Go to suspended since dlsym may block for a long time if other threads are using dlopen.
+ ScopedThreadSuspension sts(self, kNative);
+ void* native_code = FindNativeMethodInternal(self,
+ declaring_class_loader_allocator,
+ shorty,
+ jni_short_name,
+ jni_long_name);
+ if (native_code != nullptr) {
+ return native_code;
+ }
+ }
+ detail += "No implementation found for ";
+ detail += m->PrettyMethod();
+ detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
+ return nullptr;
+ }
+
+ void* FindNativeMethodInternal(Thread* self,
+ void* declaring_class_loader_allocator,
+ const char* shorty,
+ const std::string& jni_short_name,
+ const std::string& jni_long_name)
+ REQUIRES(!Locks::jni_libraries_lock_)
+ REQUIRES(!Locks::mutator_lock_) {
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
for (const auto& lib : libraries_) {
SharedLibrary* const library = lib.second;
// Use the allocator address for class loader equality to avoid unnecessary weak root decode.
@@ -254,22 +288,17 @@ class Libraries {
continue;
}
// Try the short name then the long name...
- const char* shorty = library->NeedsNativeBridge()
- ? m->GetShorty()
- : nullptr;
- void* fn = library->FindSymbol(jni_short_name, shorty);
+ const char* arg_shorty = library->NeedsNativeBridge() ? shorty : nullptr;
+ void* fn = library->FindSymbol(jni_short_name, arg_shorty);
if (fn == nullptr) {
- fn = library->FindSymbol(jni_long_name, shorty);
+ fn = library->FindSymbol(jni_long_name, arg_shorty);
}
if (fn != nullptr) {
- VLOG(jni) << "[Found native code for " << m->PrettyMethod()
+ VLOG(jni) << "[Found native code for " << jni_long_name
<< " in \"" << library->GetPath() << "\"]";
return fn;
}
}
- detail += "No implementation found for ";
- detail += m->PrettyMethod();
- detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
return nullptr;
}
@@ -277,18 +306,17 @@ class Libraries {
void UnloadNativeLibraries()
REQUIRES(!Locks::jni_libraries_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
+ Thread* const self = Thread::Current();
std::vector<SharedLibrary*> unload_libraries;
{
- MutexLock mu(soa.Self(), *Locks::jni_libraries_lock_);
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
for (auto it = libraries_.begin(); it != libraries_.end(); ) {
SharedLibrary* const library = it->second;
// If class loader is null then it was unloaded, call JNI_OnUnload.
const jweak class_loader = library->GetClassLoader();
// If class_loader is a null jobject then it is the boot class loader. We should not unload
// the native libraries of the boot class loader.
- if (class_loader != nullptr &&
- soa.Self()->IsJWeakCleared(class_loader)) {
+ if (class_loader != nullptr && self->IsJWeakCleared(class_loader)) {
unload_libraries.push_back(library);
it = libraries_.erase(it);
} else {
@@ -296,6 +324,7 @@ class Libraries {
}
}
}
+ ScopedThreadSuspension sts(self, kNative);
// Do this without holding the jni libraries lock to prevent possible deadlocks.
typedef void (*JNI_OnUnloadFn)(JavaVM*, void*);
for (auto library : unload_libraries) {
@@ -305,7 +334,7 @@ class Libraries {
} else {
VLOG(jni) << "[JNI_OnUnload found for \"" << library->GetPath() << "\"]: Calling...";
JNI_OnUnloadFn jni_on_unload = reinterpret_cast<JNI_OnUnloadFn>(sym);
- jni_on_unload(soa.Vm(), nullptr);
+ jni_on_unload(self->GetJniEnv()->vm, nullptr);
}
delete library;
}
@@ -956,12 +985,8 @@ void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) {
// If this is a static method, it could be called before the class has been initialized.
CHECK(c->IsInitializing()) << c->GetStatus() << " " << m->PrettyMethod();
std::string detail;
- void* native_method;
- Thread* self = Thread::Current();
- {
- MutexLock mu(self, *Locks::jni_libraries_lock_);
- native_method = libraries_->FindNativeMethod(m, detail);
- }
+ Thread* const self = Thread::Current();
+ void* native_method = libraries_->FindNativeMethod(self, m, detail);
if (native_method == nullptr) {
// Lookup JNI native methods from native TI Agent libraries. See runtime/ti/agent.h for more
// information. Agent libraries are searched for native methods after all jni libraries.
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 7374920f2b..50aabdcdf5 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -32,6 +32,7 @@ namespace mirror {
} // namespace mirror
class ArtMethod;
+class IsMarkedVisitor;
class Libraries;
class ParsedOptions;
class Runtime;
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index 0aa04c10ca..ede4f9edb7 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -24,7 +24,7 @@
#include "base/logging.h"
#include "jdwp/jdwp_priv.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#ifdef ART_TARGET_ANDROID
#include "cutils/sockets.h"
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 96249f9b58..4ab3d69e35 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -500,8 +500,8 @@ static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket)
}
break;
case MK_CONDITIONAL:
- CHECK(false); // should not be getting these
- break;
+ LOG(FATAL) << "Unexpected MK_CONDITIONAL"; // should not be getting these
+ UNREACHABLE();
case MK_THREAD_ONLY:
if (!Dbg::MatchThread(pMod->threadOnly.threadId, basket.thread)) {
return false;
diff --git a/runtime/jdwp/jdwp_expand_buf.cc b/runtime/jdwp/jdwp_expand_buf.cc
index 961dd369c8..f0b8c918dc 100644
--- a/runtime/jdwp/jdwp_expand_buf.cc
+++ b/runtime/jdwp/jdwp_expand_buf.cc
@@ -152,7 +152,9 @@ void expandBufAdd8BE(ExpandBuf* pBuf, uint64_t val) {
static void SetUtf8String(uint8_t* buf, const char* str, size_t strLen) {
Set4BE(buf, strLen);
- memcpy(buf + sizeof(uint32_t), str, strLen);
+ if (str != nullptr) {
+ memcpy(buf + sizeof(uint32_t), str, strLen);
+ }
}
/*
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index e8a9904dc6..618332b7ef 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -33,7 +33,7 @@
#include "jdwp/jdwp_priv.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index ae0004426d..135d9b1f51 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -18,7 +18,7 @@
#include "base/logging.h"
#include "base/mutex.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread.h"
#include <unordered_map>
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ae474da7c0..969a5708c4 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -32,7 +32,9 @@
#include "profile_saver.h"
#include "runtime.h"
#include "runtime_options.h"
+#include "stack.h"
#include "stack_map.h"
+#include "thread-inl.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 75f9b0ac76..f898d416c1 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -23,7 +23,6 @@
#include "base/timing_logger.h"
#include "jit/profile_saver_options.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "profile_compilation_info.h"
#include "thread_pool.h"
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index e173ffe204..cd386c06fa 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -29,12 +29,16 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
#include "gc/scoped_gc_critical_section.h"
+#include "intern_table.h"
#include "jit/jit.h"
#include "jit/profiling_info.h"
#include "linear_alloc.h"
#include "mem_map.h"
#include "oat_file-inl.h"
+#include "oat_quick_method_header.h"
+#include "object_callbacks.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "thread_list.h"
namespace art {
@@ -532,7 +536,10 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
}
// We reset the counter to 1 so that the profile knows that the method was executed at least once.
// This is required for layout purposes.
- method->SetCounter(1);
+ // We also need to make sure we'll pass the warmup threshold again, so we set to 0 if
+ // the warmup threshold is 1.
+ uint16_t jit_warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold();
+ method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
}
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 811b3c7d32..daa1d616a6 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -29,7 +29,6 @@
#include "jni.h"
#include "method_reference.h"
#include "oat_file.h"
-#include "object_callbacks.h"
#include "profile_compilation_info.h"
#include "safe_map.h"
#include "thread_pool.h"
@@ -39,6 +38,8 @@ namespace art {
class ArtMethod;
class LinearAlloc;
class InlineCache;
+class IsMarkedVisitor;
+class OatQuickMethodHeader;
class ProfilingInfo;
namespace jit {
diff --git a/runtime/jit/profile_compilation_info-inl.h b/runtime/jit/profile_compilation_info-inl.h
new file mode 100644
index 0000000000..b71a95ed9b
--- /dev/null
+++ b/runtime/jit/profile_compilation_info-inl.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_INL_H_
+#define ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_INL_H_
+
+#include "profile_compilation_info.h"
+
+namespace art {
+
+template <class Iterator>
+inline bool ProfileCompilationInfo::AddMethodsForDex(bool startup,
+ bool hot,
+ const DexFile* dex_file,
+ Iterator index_begin,
+ Iterator index_end) {
+ DexFileData* data = GetOrAddDexFileData(dex_file);
+ if (data == nullptr) {
+ return false;
+ }
+ for (auto it = index_begin; it != index_end; ++it) {
+ DCHECK_LT(*it, data->num_method_ids);
+ data->AddSampledMethod(startup, *it);
+ if (hot) {
+ data->FindOrAddMethod(*it);
+ }
+ }
+ return true;
+}
+
+template <class Iterator>
+inline bool ProfileCompilationInfo::AddClassesForDex(const DexFile* dex_file,
+ Iterator index_begin,
+ Iterator index_end) {
+ DexFileData* data = GetOrAddDexFileData(dex_file);
+ if (data == nullptr) {
+ return false;
+ }
+ data->class_set.insert(index_begin, index_end);
+ return true;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_INL_H_
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 86c15e6227..a67fb38e05 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -47,9 +47,9 @@
namespace art {
const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
-// Last profile version: Instead of method index, put the difference with the last
-// method's index.
-const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '7', '\0' };
+// Last profile version: Move startup methods to use a bitmap. Also add support for post-startup
+// methods.
+const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '8', '\0' };
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
@@ -132,6 +132,21 @@ std::string ProfileCompilationInfo::GetProfileDexFileKey(const std::string& dex_
}
}
+bool ProfileCompilationInfo::AddSampledMethod(bool startup,
+ const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t method_idx,
+ uint32_t num_method_ids) {
+ DexFileData* data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location),
+ checksum,
+ num_method_ids);
+ if (data == nullptr) {
+ return false;
+ }
+ data->AddSampledMethod(startup, method_idx);
+ return true;
+}
+
bool ProfileCompilationInfo::AddMethodsAndClasses(
const std::vector<ProfileMethodInfo>& methods,
const std::set<DexCacheResolvedClasses>& resolved_classes) {
@@ -252,15 +267,18 @@ static void AddUintToBuffer(std::vector<uint8_t>* buffer, T value) {
static constexpr size_t kLineHeaderSize =
2 * sizeof(uint16_t) + // class_set.size + dex_location.size
- 2 * sizeof(uint32_t); // method_map.size + checksum
+ 3 * sizeof(uint32_t); // method_map.size + checksum + num_method_ids
/**
* Serialization format:
* magic,version,number_of_dex_files,uncompressed_size_of_zipped_data,compressed_data_size,
- * zipped[dex_location1,number_of_classes1,methods_region_size,dex_location_checksum1, \
+ * zipped[dex_location1,number_of_classes1,methods_region_size,dex_location_checksum1
+ * num_method_ids,
* method_encoding_11,method_encoding_12...,class_id1,class_id2...
- * dex_location2,number_of_classes2,methods_region_size,dex_location_checksum2, \
+ * startup/post startup bitmap,
+ * dex_location2,number_of_classes2,methods_region_size,dex_location_checksum2, num_method_ids,
* method_encoding_21,method_encoding_22...,,class_id1,class_id2...
+ * startup/post startup bitmap,
* .....]
* The method_encoding is:
* method_id,number_of_inline_caches,inline_cache1,inline_cache2...
@@ -297,7 +315,8 @@ bool ProfileCompilationInfo::Save(int fd) {
required_capacity += kLineHeaderSize +
dex_data.profile_key.size() +
sizeof(uint16_t) * dex_data.class_set.size() +
- methods_region_size;
+ methods_region_size +
+ dex_data.bitmap_storage.size();
}
if (required_capacity > kProfileSizeErrorThresholdInBytes) {
LOG(ERROR) << "Profile data size exceeds "
@@ -335,10 +354,12 @@ bool ProfileCompilationInfo::Save(int fd) {
DCHECK_LE(dex_data.profile_key.size(), std::numeric_limits<uint16_t>::max());
DCHECK_LE(dex_data.class_set.size(), std::numeric_limits<uint16_t>::max());
+ // Write profile line header.
AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.profile_key.size()));
AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.class_set.size()));
AddUintToBuffer(&buffer, methods_region_size); // uint32_t
AddUintToBuffer(&buffer, dex_data.checksum); // uint32_t
+ AddUintToBuffer(&buffer, dex_data.num_method_ids); // uint32_t
AddStringToBuffer(&buffer, dex_data.profile_key);
@@ -362,6 +383,10 @@ bool ProfileCompilationInfo::Save(int fd) {
last_class_index = class_id.index_;
AddUintToBuffer(&buffer, diff_with_last_class_index);
}
+
+ buffer.insert(buffer.end(),
+ dex_data.bitmap_storage.begin(),
+ dex_data.bitmap_storage.end());
}
uint32_t output_size = 0;
@@ -476,7 +501,8 @@ void ProfileCompilationInfo::GroupClassesByDex(
ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData(
const std::string& profile_key,
- uint32_t checksum) {
+ uint32_t checksum,
+ uint32_t num_method_ids) {
const auto profile_index_it = profile_key_map_.FindOrAdd(profile_key, profile_key_map_.size());
if (profile_key_map_.size() > std::numeric_limits<uint8_t>::max()) {
// Allow only 255 dex files to be profiled. This allows us to save bytes
@@ -492,7 +518,11 @@ ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData
if (info_.size() <= profile_index) {
// This is a new addition. Add it to the info_ array.
DexFileData* dex_file_data = new (&arena_) DexFileData(
- &arena_, profile_key, checksum, profile_index);
+ &arena_,
+ profile_key,
+ checksum,
+ profile_index,
+ num_method_ids);
info_.push_back(dex_file_data);
}
DexFileData* result = info_[profile_index];
@@ -500,6 +530,7 @@ ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData
// This should always be the case since since the cache map is managed by ProfileCompilationInfo.
DCHECK_EQ(profile_key, result->profile_key);
DCHECK_EQ(profile_index, result->profile_index);
+ DCHECK_EQ(num_method_ids, result->num_method_ids);
// Check that the checksum matches.
// This may different if for example the dex file was updated and
@@ -528,7 +559,7 @@ const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& classes) {
const std::string dex_location = GetProfileDexFileKey(classes.GetDexLocation());
const uint32_t checksum = classes.GetLocationChecksum();
- DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+ DexFileData* const data = GetOrAddDexFileData(dex_location, checksum, classes.NumMethodIds());
if (data == nullptr) {
return false;
}
@@ -538,15 +569,23 @@ bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& c
bool ProfileCompilationInfo::AddMethodIndex(const std::string& dex_location,
uint32_t dex_checksum,
- uint16_t method_index) {
- return AddMethod(dex_location, dex_checksum, method_index, OfflineProfileMethodInfo(nullptr));
+ uint16_t method_index,
+ uint32_t num_method_ids) {
+ return AddMethod(dex_location,
+ dex_checksum,
+ method_index,
+ num_method_ids,
+ OfflineProfileMethodInfo(nullptr));
}
bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t method_index,
+ uint32_t num_method_ids,
const OfflineProfileMethodInfo& pmi) {
- DexFileData* const data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location), dex_checksum);
+ DexFileData* const data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location),
+ dex_checksum,
+ num_method_ids);
if (data == nullptr) { // checksum mismatch
return false;
}
@@ -579,7 +618,8 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
const DexReference& dex_ref = pmi.dex_references[class_ref.dex_profile_index];
DexFileData* class_dex_data = GetOrAddDexFileData(
GetProfileDexFileKey(dex_ref.dex_location),
- dex_ref.dex_checksum);
+ dex_ref.dex_checksum,
+ dex_ref.num_method_ids);
if (class_dex_data == nullptr) { // checksum mismatch
return false;
}
@@ -590,9 +630,7 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
}
bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
- DexFileData* const data = GetOrAddDexFileData(
- GetProfileDexFileKey(pmi.dex_file->GetLocation()),
- pmi.dex_file->GetLocationChecksum());
+ DexFileData* const data = GetOrAddDexFileData(pmi.dex_file);
if (data == nullptr) { // checksum mismatch
return false;
}
@@ -604,9 +642,7 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
continue;
}
for (const TypeReference& class_ref : cache.classes) {
- DexFileData* class_dex_data = GetOrAddDexFileData(
- GetProfileDexFileKey(class_ref.dex_file->GetLocation()),
- class_ref.dex_file->GetLocationChecksum());
+ DexFileData* class_dex_data = GetOrAddDexFileData(class_ref.dex_file);
if (class_dex_data == nullptr) { // checksum mismatch
return false;
}
@@ -623,8 +659,9 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
bool ProfileCompilationInfo::AddClassIndex(const std::string& dex_location,
uint32_t checksum,
- dex::TypeIndex type_idx) {
- DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
+ dex::TypeIndex type_idx,
+ uint32_t num_method_ids) {
+ DexFileData* const data = GetOrAddDexFileData(dex_location, checksum, num_method_ids);
if (data == nullptr) {
return false;
}
@@ -694,7 +731,9 @@ bool ProfileCompilationInfo::ReadMethods(SafeBuffer& buffer,
- line_header.method_region_size_bytes;
uint16_t last_method_index = 0;
while (buffer.CountUnreadBytes() > expected_unread_bytes_after_operation) {
- DexFileData* const data = GetOrAddDexFileData(line_header.dex_location, line_header.checksum);
+ DexFileData* const data = GetOrAddDexFileData(line_header.dex_location,
+ line_header.checksum,
+ line_header.num_method_ids);
uint16_t diff_with_last_method_index;
READ_UINT(uint16_t, buffer, diff_with_last_method_index, error);
uint16_t method_index = last_method_index + diff_with_last_method_index;
@@ -729,7 +768,8 @@ bool ProfileCompilationInfo::ReadClasses(SafeBuffer& buffer,
last_class_index = type_index;
if (!AddClassIndex(line_header.dex_location,
line_header.checksum,
- dex::TypeIndex(type_index))) {
+ dex::TypeIndex(type_index),
+ line_header.num_method_ids)) {
return false;
}
}
@@ -863,6 +903,7 @@ bool ProfileCompilationInfo::ReadProfileLineHeaderElements(SafeBuffer& buffer,
READ_UINT(uint16_t, buffer, line_header->class_set_size, error);
READ_UINT(uint32_t, buffer, line_header->method_region_size_bytes, error);
READ_UINT(uint32_t, buffer, line_header->checksum, error);
+ READ_UINT(uint32_t, buffer, line_header->num_method_ids, error);
return true;
}
@@ -902,7 +943,10 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLine
uint8_t number_of_dex_files,
const ProfileLineHeader& line_header,
/*out*/std::string* error) {
- if (GetOrAddDexFileData(line_header.dex_location, line_header.checksum) == nullptr) {
+ DexFileData* data = GetOrAddDexFileData(line_header.dex_location,
+ line_header.checksum,
+ line_header.num_method_ids);
+ if (data == nullptr) {
*error = "Error when reading profile file line header: checksum mismatch for "
+ line_header.dex_location;
return kProfileLoadBadData;
@@ -915,6 +959,16 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLine
if (!ReadClasses(buffer, line_header, error)) {
return kProfileLoadBadData;
}
+
+ const size_t bytes = data->bitmap_storage.size();
+ if (buffer.CountUnreadBytes() < bytes) {
+ *error += "Profile EOF reached prematurely for ReadProfileHeaderDexLocation";
+ return kProfileLoadBadData;
+ }
+ const uint8_t* base_ptr = buffer.GetCurrentPtr();
+ std::copy_n(base_ptr, bytes, &data->bitmap_storage[0]);
+ buffer.Advance(bytes);
+ // Read method bitmap.
return kProfileLoadSuccess;
}
@@ -1110,7 +1164,8 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
for (const DexFileData* other_dex_data : other.info_) {
const DexFileData* dex_data = GetOrAddDexFileData(other_dex_data->profile_key,
- other_dex_data->checksum);
+ other_dex_data->checksum,
+ other_dex_data->num_method_ids);
if (dex_data == nullptr) {
return false; // Could happen if we exceed the number of allowed dex files.
}
@@ -1147,6 +1202,9 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
}
}
}
+
+ // Merge the bitmaps.
+ dex_data->MergeBitmap(*other_dex_data);
}
return true;
}
@@ -1159,7 +1217,46 @@ static bool ChecksumMatch(const DexFile& dex_file, uint32_t checksum) {
return ChecksumMatch(dex_file.GetLocationChecksum(), checksum);
}
-bool ProfileCompilationInfo::ContainsMethod(const MethodReference& method_ref) const {
+bool ProfileCompilationInfo::IsStartupOrHotMethod(const MethodReference& method_ref) const {
+ return IsStartupOrHotMethod(method_ref.dex_file->GetLocation(),
+ method_ref.dex_file->GetLocationChecksum(),
+ method_ref.dex_method_index);
+}
+
+const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
+ const DexFile* dex_file) const {
+ const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_file->GetLocation()));
+ if (dex_data == nullptr || !ChecksumMatch(*dex_file, dex_data->checksum)) {
+ return nullptr;
+ }
+ return dex_data;
+}
+
+bool ProfileCompilationInfo::IsStartupOrHotMethod(const std::string& dex_location,
+ uint32_t dex_checksum,
+ uint16_t dex_method_index) const {
+ const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_location));
+ if (dex_data == nullptr || !ChecksumMatch(dex_checksum, dex_data->checksum)) {
+ return false;
+ }
+ if (dex_data->HasSampledMethod(/*startup*/ true, dex_method_index)) {
+ return true;
+ }
+ const MethodMap& methods = dex_data->method_map;
+ const auto method_it = methods.find(dex_method_index);
+ return method_it != methods.end();
+}
+
+bool ProfileCompilationInfo::ContainsSampledMethod(bool startup,
+ const MethodReference& method_ref) const {
+ const DexFileData* dex_data = FindDexData(method_ref.dex_file);
+ if (dex_data == nullptr) {
+ return false;
+ }
+ return dex_data->HasSampledMethod(startup, method_ref.dex_method_index);
+}
+
+bool ProfileCompilationInfo::ContainsHotMethod(const MethodReference& method_ref) const {
return FindMethod(method_ref.dex_file->GetLocation(),
method_ref.dex_file->GetLocationChecksum(),
method_ref.dex_method_index) != nullptr;
@@ -1196,6 +1293,7 @@ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> ProfileCompila
for (const DexFileData* dex_data : info_) {
pmi->dex_references[dex_data->profile_index].dex_location = dex_data->profile_key;
pmi->dex_references[dex_data->profile_index].dex_checksum = dex_data->checksum;
+ pmi->dex_references[dex_data->profile_index].num_method_ids = dex_data->num_method_ids;
}
return pmi;
@@ -1277,7 +1375,7 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
}
}
}
- os << "\n\tmethods: ";
+ os << "\n\thot methods: ";
for (const auto& method_it : dex_data->method_map) {
if (dex_file != nullptr) {
os << "\n\t\t" << dex_file->PrettyMethod(method_it.first, true);
@@ -1302,6 +1400,19 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
}
os << "], ";
}
+ bool startup = true;
+ while (true) {
+ os << "\n\t" << (startup ? "startup methods: " : "post startup methods: ");
+ for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
+ if (dex_data->HasSampledMethod(startup, method_idx)) {
+ os << method_idx << ", ";
+ }
+ }
+ if (startup == false) {
+ break;
+ }
+ startup = false;
+ }
os << "\n\tclasses: ";
for (const auto class_it : dex_data->class_set) {
if (dex_file != nullptr) {
@@ -1314,9 +1425,12 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
return os.str();
}
-bool ProfileCompilationInfo::GetClassesAndMethods(const DexFile& dex_file,
- std::set<dex::TypeIndex>* class_set,
- std::set<uint16_t>* method_set) const {
+bool ProfileCompilationInfo::GetClassesAndMethods(
+ const DexFile& dex_file,
+ /*out*/std::set<dex::TypeIndex>* class_set,
+ /*out*/std::set<uint16_t>* hot_method_set,
+ /*out*/std::set<uint16_t>* startup_method_set,
+ /*out*/std::set<uint16_t>* post_startup_method_method_set) const {
std::set<std::string> ret;
std::string profile_key = GetProfileDexFileKey(dex_file.GetLocation());
const DexFileData* dex_data = FindDexData(profile_key);
@@ -1324,7 +1438,15 @@ bool ProfileCompilationInfo::GetClassesAndMethods(const DexFile& dex_file,
return false;
}
for (const auto& it : dex_data->method_map) {
- method_set->insert(it.first);
+ hot_method_set->insert(it.first);
+ }
+ for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
+ if (dex_data->HasSampledMethod(/*startup*/ true, method_idx)) {
+ startup_method_set->insert(method_idx);
+ }
+ if (dex_data->HasSampledMethod(/*startup*/ false, method_idx)) {
+ post_startup_method_method_set->insert(method_idx);
+ }
}
for (const dex::TypeIndex& type_index : dex_data->class_set) {
class_set->insert(type_index);
@@ -1366,7 +1488,10 @@ std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses(
<< ", profile checksum=" << dex_data->checksum;
return std::set<DexCacheResolvedClasses>();
}
- DexCacheResolvedClasses classes(dex_location, dex_location, dex_data->checksum);
+ DexCacheResolvedClasses classes(dex_location,
+ dex_location,
+ dex_data->checksum,
+ dex_data->num_method_ids);
classes.AddClasses(dex_data->class_set.begin(), dex_data->class_set.end());
ret.insert(classes);
}
@@ -1383,8 +1508,8 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd,
const std::string base_dex_location = "base.apk";
ProfileCompilationInfo info;
// The limits are defined by the dex specification.
- uint16_t max_method = std::numeric_limits<uint16_t>::max();
- uint16_t max_classes = std::numeric_limits<uint16_t>::max();
+ const uint16_t max_method = std::numeric_limits<uint16_t>::max();
+ const uint16_t max_classes = std::numeric_limits<uint16_t>::max();
uint16_t number_of_methods = max_method * method_ratio / 100;
uint16_t number_of_classes = max_classes * class_ratio / 100;
@@ -1404,7 +1529,7 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd,
if (m < (number_of_methods / kFavorSplit)) {
method_idx %= kFavorFirstN;
}
- info.AddMethodIndex(profile_key, 0, method_idx);
+ info.AddMethodIndex(profile_key, 0, method_idx, max_method);
}
for (uint16_t c = 0; c < number_of_classes; c++) {
@@ -1412,7 +1537,7 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd,
if (c < (number_of_classes / kFavorSplit)) {
type_idx %= kFavorFirstN;
}
- info.AddClassIndex(profile_key, 0, dex::TypeIndex(type_idx));
+ info.AddClassIndex(profile_key, 0, dex::TypeIndex(type_idx), max_method);
}
}
return info.Save(fd);
@@ -1431,13 +1556,16 @@ bool ProfileCompilationInfo::GenerateTestProfile(
for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
// Randomly add a class from the dex file (with 50% chance).
if (std::rand() % 2 != 0) {
- info.AddClassIndex(location, checksum, dex::TypeIndex(dex_file->GetClassDef(i).class_idx_));
+ info.AddClassIndex(location,
+ checksum,
+ dex::TypeIndex(dex_file->GetClassDef(i).class_idx_),
+ dex_file->NumMethodIds());
}
}
for (uint32_t i = 0; i < dex_file->NumMethodIds(); ++i) {
// Randomly add a method from the dex file (with 50% chance).
if (std::rand() % 2 != 0) {
- info.AddMethodIndex(location, checksum, i);
+ info.AddMethodIndex(location, checksum, i, dex_file->NumMethodIds());
}
}
}
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index ca5b28a809..7bcaffb74f 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -23,6 +23,7 @@
#include "atomic.h"
#include "base/arena_object.h"
#include "base/arena_containers.h"
+#include "bit_memory_region.h"
#include "dex_cache_resolved_classes.h"
#include "dex_file.h"
#include "dex_file_types.h"
@@ -54,7 +55,9 @@ struct ProfileMethodInfo {
ProfileMethodInfo(const DexFile* dex,
uint32_t method_index,
const std::vector<ProfileInlineCache>& caches)
- : dex_file(dex), dex_method_index(method_index), inline_caches(caches) {}
+ : dex_file(dex),
+ dex_method_index(method_index),
+ inline_caches(caches) {}
const DexFile* dex_file;
const uint32_t dex_method_index;
@@ -79,13 +82,15 @@ class ProfileCompilationInfo {
// A dex location together with its checksum.
struct DexReference {
- DexReference() : dex_checksum(0) {}
+ DexReference() : dex_checksum(0), num_method_ids(0) {}
- DexReference(const std::string& location, uint32_t checksum)
- : dex_location(location), dex_checksum(checksum) {}
+ DexReference(const std::string& location, uint32_t checksum, uint32_t num_methods)
+ : dex_location(location), dex_checksum(checksum), num_method_ids(num_methods) {}
bool operator==(const DexReference& other) const {
- return dex_checksum == other.dex_checksum && dex_location == other.dex_location;
+ return dex_checksum == other.dex_checksum &&
+ dex_location == other.dex_location &&
+ num_method_ids == other.num_method_ids;
}
bool MatchesDex(const DexFile* dex_file) const {
@@ -95,6 +100,7 @@ class ProfileCompilationInfo {
std::string dex_location;
uint32_t dex_checksum;
+ uint32_t num_method_ids;
};
// Encodes a class reference in the profile.
@@ -191,6 +197,37 @@ class ProfileCompilationInfo {
bool AddMethodsAndClasses(const std::vector<ProfileMethodInfo>& methods,
const std::set<DexCacheResolvedClasses>& resolved_classes);
+ // Iterator is type for ids not class defs.
+ template <class Iterator>
+ bool AddClassesForDex(const DexFile* dex_file, Iterator index_begin, Iterator index_end);
+
+ // Add a method index to the profile (without inline caches).
+ bool AddMethodIndex(const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t method_idx,
+ uint32_t num_method_ids);
+
+ // Add a method to the profile using its online representation (containing runtime structures).
+ bool AddMethod(const ProfileMethodInfo& pmi);
+
+ // Add methods that have samples but are are not necessarily hot. These are partitioned into two
+ // possibly intersecting sets startup and post startup. Sampled methods are used for layout but
+ // not necessarily determining what gets compiled.
+ bool AddSampledMethod(bool startup,
+ const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t method_idx,
+ uint32_t num_method_ids);
+
+ // Bulk add sampled methods and/or hot methods for a single dex, fast since it only has one
+ // GetOrAddDexFileData call.
+ template <class Iterator>
+ ALWAYS_INLINE bool AddMethodsForDex(bool startup,
+ bool hot,
+ const DexFile* dex_file,
+ Iterator index_begin,
+ Iterator index_end);
+
// Load profile information from the given file descriptor.
// If the current profile is non-empty the load will fail.
bool Load(int fd);
@@ -216,8 +253,18 @@ class ProfileCompilationInfo {
// Return the number of resolved classes that were profiled.
uint32_t GetNumberOfResolvedClasses() const;
- // Return true if the method reference is present in the profiling info.
- bool ContainsMethod(const MethodReference& method_ref) const;
+ // Return true if the method reference is a hot or startup method in the profiling info.
+ bool IsStartupOrHotMethod(const MethodReference& method_ref) const;
+ bool IsStartupOrHotMethod(const std::string& dex_location,
+ uint32_t dex_checksum,
+ uint16_t dex_method_index) const;
+
+ // Return true if the method reference iS present and hot in the profiling info.
+ bool ContainsHotMethod(const MethodReference& method_ref) const;
+
+
+ // Return true if the profile contains a startup or post startup method.
+ bool ContainsSampledMethod(bool startup, const MethodReference& method_ref) const;
// Return true if the class's type is present in the profiling info.
bool ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const;
@@ -244,7 +291,9 @@ class ProfileCompilationInfo {
// file is register and has a matching checksum, false otherwise.
bool GetClassesAndMethods(const DexFile& dex_file,
/*out*/std::set<dex::TypeIndex>* class_set,
- /*out*/std::set<uint16_t>* method_set) const;
+ /*out*/std::set<uint16_t>* hot_method_set,
+ /*out*/std::set<uint16_t>* startup_method_set,
+ /*out*/std::set<uint16_t>* post_startup_method_method_set) const;
// Perform an equality test with the `other` profile information.
bool Equals(const ProfileCompilationInfo& other);
@@ -301,13 +350,43 @@ class ProfileCompilationInfo {
DexFileData(ArenaAllocator* arena,
const std::string& key,
uint32_t location_checksum,
- uint16_t index)
+ uint16_t index,
+ uint32_t num_methods)
: arena_(arena),
profile_key(key),
profile_index(index),
checksum(location_checksum),
method_map(std::less<uint16_t>(), arena->Adapter(kArenaAllocProfile)),
- class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)) {}
+ class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)),
+ num_method_ids(num_methods),
+ bitmap_storage(arena->Adapter(kArenaAllocProfile)) {
+ const size_t num_bits = num_method_ids * kBitmapIndexCount;
+ bitmap_storage.resize(RoundUp(num_bits, kBitsPerByte) / kBitsPerByte);
+ if (!bitmap_storage.empty()) {
+ method_bitmap =
+ BitMemoryRegion(MemoryRegion(&bitmap_storage[0], bitmap_storage.size()), 0, num_bits);
+ }
+ }
+
+ bool operator==(const DexFileData& other) const {
+ return checksum == other.checksum && method_map == other.method_map;
+ }
+
+ // Mark a method as executed at least once.
+ void AddSampledMethod(bool startup, size_t index) {
+ method_bitmap.StoreBit(MethodBitIndex(startup, index), true);
+ }
+
+ bool HasSampledMethod(bool startup, size_t index) const {
+ return method_bitmap.LoadBit(MethodBitIndex(startup, index));
+ }
+
+ void MergeBitmap(const DexFileData& other) {
+ DCHECK_EQ(bitmap_storage.size(), other.bitmap_storage.size());
+ for (size_t i = 0; i < bitmap_storage.size(); ++i) {
+ bitmap_storage[i] |= other.bitmap_storage[i];
+ }
+ }
// The arena used to allocate new inline cache maps.
ArenaAllocator* arena_;
@@ -322,32 +401,57 @@ class ProfileCompilationInfo {
// The classes which have been profiled. Note that these don't necessarily include
// all the classes that can be found in the inline caches reference.
ArenaSet<dex::TypeIndex> class_set;
-
- bool operator==(const DexFileData& other) const {
- return checksum == other.checksum && method_map == other.method_map;
- }
-
// Find the inline caches of the the given method index. Add an empty entry if
// no previous data is found.
InlineCacheMap* FindOrAddMethod(uint16_t method_index);
+ // Num method ids.
+ uint32_t num_method_ids;
+ ArenaVector<uint8_t> bitmap_storage;
+ BitMemoryRegion method_bitmap;
+
+ private:
+ enum BitmapIndex {
+ kBitmapIndexStartup,
+ kBitmapIndexPostStartup,
+ kBitmapIndexCount,
+ };
+
+ size_t MethodBitIndex(bool startup, size_t index) const {
+ DCHECK_LT(index, num_method_ids);
+ // The format is [startup bitmap][post startup bitmap]
+ // This compresses better than ([startup bit][post statup bit])*
+
+ return index + (startup
+ ? kBitmapIndexStartup * num_method_ids
+ : kBitmapIndexPostStartup * num_method_ids);
+ }
};
// Return the profile data for the given profile key or null if the dex location
// already exists but has a different checksum
- DexFileData* GetOrAddDexFileData(const std::string& profile_key, uint32_t checksum);
+ DexFileData* GetOrAddDexFileData(const std::string& profile_key,
+ uint32_t checksum,
+ uint32_t num_method_ids);
- // Add a method to the profile using its online representation (containing runtime structures).
- bool AddMethod(const ProfileMethodInfo& pmi);
+ DexFileData* GetOrAddDexFileData(const DexFile* dex_file) {
+ return GetOrAddDexFileData(GetProfileDexFileKey(dex_file->GetLocation()),
+ dex_file->GetLocationChecksum(),
+ dex_file->NumMethodIds());
+ }
// Add a method to the profile using its offline representation.
// This is mostly used to facilitate testing.
bool AddMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t method_index,
+ uint32_t num_method_ids,
const OfflineProfileMethodInfo& pmi);
// Add a class index to the profile.
- bool AddClassIndex(const std::string& dex_location, uint32_t checksum, dex::TypeIndex type_idx);
+ bool AddClassIndex(const std::string& dex_location,
+ uint32_t checksum,
+ dex::TypeIndex type_idx,
+ uint32_t num_method_ids);
// Add all classes from the given dex cache to the the profile.
bool AddResolvedClasses(const DexCacheResolvedClasses& classes);
@@ -366,6 +470,10 @@ class ProfileCompilationInfo {
// doesn't contain the key.
const DexFileData* FindDexData(const std::string& profile_key) const;
+ // Return the dex data associated with the given dex file or null if the profile doesn't contain
+ // the key or the checksum mismatches.
+ const DexFileData* FindDexData(const DexFile* dex_file) const;
+
// Checks if the profile is empty.
bool IsEmpty() const;
@@ -392,6 +500,7 @@ class ProfileCompilationInfo {
uint16_t class_set_size;
uint32_t method_region_size_bytes;
uint32_t checksum;
+ uint32_t num_method_ids;
};
// A helper structure to make sure we don't read past our buffers in the loops.
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 1cfa3552b9..5528366512 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -25,13 +25,15 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "handle_scope-inl.h"
-#include "jit/profile_compilation_info.h"
+#include "jit/profile_compilation_info-inl.h"
#include "linear_alloc.h"
#include "scoped_thread_state_change-inl.h"
#include "type_reference.h"
namespace art {
+static constexpr size_t kMaxMethodIds = 65535;
+
class ProfileCompilationInfoTest : public CommonRuntimeTest {
public:
void PostRuntimeCreate() OVERRIDE {
@@ -61,7 +63,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
uint32_t checksum,
uint16_t method_index,
ProfileCompilationInfo* info) {
- return info->AddMethodIndex(dex_location, checksum, method_index);
+ return info->AddMethodIndex(dex_location, checksum, method_index, kMaxMethodIds);
}
bool AddMethod(const std::string& dex_location,
@@ -69,14 +71,14 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
uint16_t method_index,
const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi,
ProfileCompilationInfo* info) {
- return info->AddMethod(dex_location, checksum, method_index, pmi);
+ return info->AddMethod(dex_location, checksum, method_index, kMaxMethodIds, pmi);
}
bool AddClass(const std::string& dex_location,
uint32_t checksum,
uint16_t class_index,
ProfileCompilationInfo* info) {
- return info->AddMethodIndex(dex_location, checksum, class_index);
+ return info->AddMethodIndex(dex_location, checksum, class_index, kMaxMethodIds);
}
uint32_t GetFd(const ScratchFile& file) {
@@ -149,7 +151,9 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
std::vector<TypeReference> classes;
caches.emplace_back(dex_pc, /*is_missing_types*/true, classes);
}
- ProfileMethodInfo pmi(method->GetDexFile(), method->GetDexMethodIndex(), caches);
+ ProfileMethodInfo pmi(method->GetDexFile(),
+ method->GetDexMethodIndex(),
+ caches);
profile_methods.push_back(pmi);
profile_methods_map->Put(method, pmi);
}
@@ -191,7 +195,8 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileKey(
class_ref.dex_file->GetLocation());
offline_pmi.dex_references.emplace_back(dex_key,
- class_ref.dex_file->GetLocationChecksum());
+ class_ref.dex_file->GetLocationChecksum(),
+ class_ref.dex_file->NumMethodIds());
}
}
}
@@ -201,6 +206,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
// Creates an offline profile used for testing inline caches.
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() {
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+
// Monomorphic
for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
@@ -231,9 +237,9 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum */2, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
return pmi;
}
@@ -292,7 +298,8 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
{
ScopedObjectAccess soa(self);
for (ArtMethod* m : main_methods) {
- ASSERT_TRUE(info1.ContainsMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ ASSERT_TRUE(info1.ContainsHotMethod(
+ MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
}
}
@@ -308,10 +315,12 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethods) {
{
ScopedObjectAccess soa(self);
for (ArtMethod* m : main_methods) {
- ASSERT_TRUE(info2.ContainsMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ ASSERT_TRUE(
+ info2.ContainsHotMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
}
for (ArtMethod* m : second_methods) {
- ASSERT_TRUE(info2.ContainsMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ ASSERT_TRUE(
+ info2.ContainsHotMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
}
}
}
@@ -657,7 +666,8 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
{
ScopedObjectAccess soa(self);
for (ArtMethod* m : main_methods) {
- ASSERT_TRUE(info.ContainsMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
+ ASSERT_TRUE(
+ info.ContainsHotMethod(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())));
const ProfileMethodInfo& pmi = profile_methods_map.find(m)->second;
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_pmi =
info.GetMethod(m->GetDexFile()->GetLocation(),
@@ -694,8 +704,8 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
@@ -705,8 +715,8 @@ TEST_F(ProfileCompilationInfoTest, MergeInlineCacheTriggerReindex) {
ProfileCompilationInfo::InlineCacheMap* ic_map_reindexed = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(ic_map_reindexed);
- pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2);
- pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1);
+ pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
+ pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.AddClass(1, dex::TypeIndex(0));
@@ -761,7 +771,7 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
// Create a megamorphic inline cache.
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
@@ -791,7 +801,7 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
// Create an inline cache with missing types
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
ProfileCompilationInfo::DexPcData dex_pc_data(arena_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
@@ -839,4 +849,91 @@ TEST_F(ProfileCompilationInfoTest, LoadShouldClearExistingDataFromProfiles) {
// This should fail since the test_info already contains data and the load would overwrite it.
ASSERT_FALSE(test_info.Load(GetFd(profile)));
}
+
+TEST_F(ProfileCompilationInfoTest, SampledMethodsTest) {
+ ProfileCompilationInfo test_info;
+ static constexpr size_t kNumMethods = 1000;
+ static constexpr size_t kChecksum1 = 1234;
+ static constexpr size_t kChecksum2 = 4321;
+ static const std::string kDex1 = "dex1";
+ static const std::string kDex2 = "dex2";
+ test_info.AddSampledMethod(true, kDex1, kChecksum1, 1, kNumMethods);
+ test_info.AddSampledMethod(true, kDex1, kChecksum1, 5, kNumMethods);
+ test_info.AddSampledMethod(false, kDex2, kChecksum2, 1, kNumMethods);
+ test_info.AddSampledMethod(false, kDex2, kChecksum2, 5, kNumMethods);
+ auto run_test = [](const ProfileCompilationInfo& info) {
+ EXPECT_FALSE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 0));
+ EXPECT_TRUE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 1));
+ EXPECT_FALSE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 3));
+ EXPECT_TRUE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 5));
+ EXPECT_FALSE(info.IsStartupOrHotMethod(kDex1, kChecksum1, 6));
+ EXPECT_FALSE(info.IsStartupOrHotMethod(kDex2, kChecksum2, 5));
+ EXPECT_FALSE(info.IsStartupOrHotMethod(kDex2, kChecksum2, 5));
+ };
+ run_test(test_info);
+
+ // Save the profile.
+ ScratchFile profile;
+ ASSERT_TRUE(test_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+ // Load the profile and make sure we can read the data and it matches what we expect.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+ run_test(loaded_info);
+
+ // Test that the bitmap gets merged properly.
+ EXPECT_FALSE(test_info.IsStartupOrHotMethod(kDex1, kChecksum1, 11));
+ {
+ ProfileCompilationInfo merge_info;
+ merge_info.AddSampledMethod(true, kDex1, kChecksum1, 11, kNumMethods);
+ test_info.MergeWith(merge_info);
+ }
+ EXPECT_TRUE(test_info.IsStartupOrHotMethod(kDex1, kChecksum1, 11));
+
+ // Test bulk adding.
+ {
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+ ProfileCompilationInfo info;
+ std::vector<uint16_t> hot_methods = {1, 3, 5};
+ std::vector<uint16_t> startup_methods = {1, 2};
+ std::vector<uint16_t> post_methods = {0, 2, 6};
+ ASSERT_GE(dex->NumMethodIds(), 7u);
+ info.AddMethodsForDex(/*startup*/true,
+ /*hot*/true,
+ dex.get(),
+ hot_methods.begin(),
+ hot_methods.end());
+ info.AddMethodsForDex(/*startup*/true,
+ /*hot*/false,
+ dex.get(),
+ startup_methods.begin(),
+ startup_methods.end());
+ info.AddMethodsForDex(/*startup*/false,
+ /*hot*/false,
+ dex.get(),
+ post_methods.begin(),
+ post_methods.end());
+ for (uint16_t id : hot_methods) {
+ EXPECT_TRUE(info.ContainsHotMethod(MethodReference(dex.get(), id)));
+ EXPECT_TRUE(info.ContainsSampledMethod(/*startup*/true, MethodReference(dex.get(), id)));
+ }
+ for (uint16_t id : startup_methods) {
+ EXPECT_TRUE(info.ContainsSampledMethod(/*startup*/true, MethodReference(dex.get(), id)));
+ }
+ for (uint16_t id : post_methods) {
+ EXPECT_TRUE(info.ContainsSampledMethod(/*startup*/false, MethodReference(dex.get(), id)));
+ }
+ EXPECT_TRUE(info.ContainsSampledMethod(/*startup*/false, MethodReference(dex.get(), 6)));
+ // Check that methods that shouldn't have been touched are OK.
+ EXPECT_FALSE(info.ContainsHotMethod(MethodReference(dex.get(), 0)));
+ EXPECT_FALSE(info.ContainsHotMethod(MethodReference(dex.get(), 2)));
+ EXPECT_FALSE(info.ContainsHotMethod(MethodReference(dex.get(), 4)));
+ EXPECT_FALSE(info.ContainsSampledMethod(/*startup*/false, MethodReference(dex.get(), 1)));
+ EXPECT_FALSE(info.ContainsSampledMethod(/*startup*/true, MethodReference(dex.get(), 4)));
+ EXPECT_FALSE(info.ContainsSampledMethod(/*startup*/true, MethodReference(dex.get(), 6)));
+ }
+}
+
} // namespace art
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index bc829cf9a7..c6c46de213 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -25,12 +25,16 @@
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
#include "compiler_filter.h"
+#include "dex_reference_collection.h"
#include "gc/collector_type.h"
#include "gc/gc_cause.h"
#include "gc/scoped_gc_critical_section.h"
+#include "jit/profile_compilation_info-inl.h"
#include "oat_file_manager.h"
#include "scoped_thread_state_change-inl.h"
@@ -179,99 +183,151 @@ void ProfileSaver::NotifyJitActivityInternal() {
}
}
+using MethodReferenceCollection = DexReferenceCollection<uint16_t, ScopedArenaAllocatorAdapter>;
+using TypeReferenceCollection = DexReferenceCollection<dex::TypeIndex,
+ ScopedArenaAllocatorAdapter>;
+
// Get resolved methods that have a profile info or more than kStartupMethodSamples samples.
// Excludes native methods and classes in the boot image.
-class GetMethodsVisitor : public ClassVisitor {
+class GetClassesAndMethodsVisitor : public ClassVisitor {
public:
- GetMethodsVisitor(std::vector<MethodReference>* methods, uint32_t startup_method_samples)
- : methods_(methods),
- startup_method_samples_(startup_method_samples) {}
+ GetClassesAndMethodsVisitor(MethodReferenceCollection* hot_methods,
+ MethodReferenceCollection* sampled_methods,
+ TypeReferenceCollection* resolved_classes,
+ uint32_t hot_method_sample_threshold,
+ bool profile_boot_class_path)
+ : hot_methods_(hot_methods),
+ sampled_methods_(sampled_methods),
+ resolved_classes_(resolved_classes),
+ hot_method_sample_threshold_(hot_method_sample_threshold),
+ profile_boot_class_path_(profile_boot_class_path) {}
virtual bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
+ if (klass->IsProxyClass() ||
+ klass->IsArrayClass() ||
+ klass->IsPrimitive() ||
+ !klass->IsResolved() ||
+ klass->IsErroneousResolved() ||
+ (!profile_boot_class_path_ && klass->GetClassLoader() == nullptr)) {
return true;
}
+ CHECK(klass->GetDexCache() != nullptr) << klass->PrettyClass();
+ resolved_classes_->AddReference(&klass->GetDexFile(), klass->GetDexTypeIndex());
for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
if (!method.IsNative()) {
- if (method.GetCounter() >= startup_method_samples_ ||
- method.GetProfilingInfo(kRuntimePointerSize) != nullptr ||
- (method.GetAccessFlags() & kAccPreviouslyWarm) != 0) {
- // Have samples, add to profile.
- const DexFile* dex_file =
- method.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetDexFile();
- methods_->push_back(MethodReference(dex_file, method.GetDexMethodIndex()));
+ DCHECK(!method.IsProxyMethod());
+ const uint16_t counter = method.GetCounter();
+ // Mark startup methods as hot if they have more than hot_method_sample_threshold_ samples.
+ // This means they will get compiled by the compiler driver.
+ if (method.GetProfilingInfo(kRuntimePointerSize) != nullptr ||
+ (method.GetAccessFlags() & kAccPreviouslyWarm) != 0 ||
+ counter >= hot_method_sample_threshold_) {
+ hot_methods_->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
+ } else if (counter != 0) {
+ sampled_methods_->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
}
+ } else {
+ CHECK_EQ(method.GetCounter(), 0u);
}
}
return true;
}
private:
- std::vector<MethodReference>* const methods_;
- uint32_t startup_method_samples_;
+ MethodReferenceCollection* const hot_methods_;
+ MethodReferenceCollection* const sampled_methods_;
+ TypeReferenceCollection* const resolved_classes_;
+ uint32_t hot_method_sample_threshold_;
+ const bool profile_boot_class_path_;
};
void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
ScopedTrace trace(__PRETTY_FUNCTION__);
+ const uint64_t start_time = NanoTime();
// Resolve any new registered locations.
ResolveTrackedLocations();
Thread* const self = Thread::Current();
- std::vector<MethodReference> methods;
- std::set<DexCacheResolvedClasses> resolved_classes;
+ Runtime* const runtime = Runtime::Current();
+ ArenaStack stack(runtime->GetArenaPool());
+ ScopedArenaAllocator allocator(&stack);
+ MethodReferenceCollection hot_methods(allocator.Adapter(), allocator.Adapter());
+ MethodReferenceCollection startup_methods(allocator.Adapter(), allocator.Adapter());
+ TypeReferenceCollection resolved_classes(allocator.Adapter(), allocator.Adapter());
+ const bool is_low_ram = Runtime::Current()->GetHeap()->IsLowMemoryMode();
+ const size_t hot_threshold = options_.GetHotStartupMethodSamples(is_low_ram);
{
ScopedObjectAccess soa(self);
gc::ScopedGCCriticalSection sgcs(self,
gc::kGcCauseProfileSaver,
gc::kCollectorTypeCriticalSection);
-
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- resolved_classes = class_linker->GetResolvedClasses(/*ignore boot classes*/ true);
-
{
ScopedTrace trace2("Get hot methods");
- GetMethodsVisitor visitor(&methods, options_.GetStartupMethodSamples());
- class_linker->VisitClasses(&visitor);
- VLOG(profiler) << "Methods with samples greater than "
- << options_.GetStartupMethodSamples() << " = " << methods.size();
+ GetClassesAndMethodsVisitor visitor(&hot_methods,
+ &startup_methods,
+ &resolved_classes,
+ hot_threshold,
+ options_.GetProfileBootClassPath());
+ runtime->GetClassLinker()->VisitClasses(&visitor);
}
}
+
MutexLock mu(self, *Locks::profiler_lock_);
uint64_t total_number_of_profile_entries_cached = 0;
for (const auto& it : tracked_dex_base_locations_) {
std::set<DexCacheResolvedClasses> resolved_classes_for_location;
const std::string& filename = it.first;
+ auto info_it = profile_cache_.Put(
+ filename,
+ new ProfileCompilationInfo(Runtime::Current()->GetArenaPool()));
+ ProfileCompilationInfo* cached_info = info_it->second;
+
const std::set<std::string>& locations = it.second;
- std::vector<ProfileMethodInfo> profile_methods_for_location;
- for (const MethodReference& ref : methods) {
- if (locations.find(ref.dex_file->GetBaseLocation()) != locations.end()) {
- profile_methods_for_location.emplace_back(ref.dex_file, ref.dex_method_index);
+ for (const auto& pair : hot_methods.GetMap()) {
+ const DexFile* const dex_file = pair.first;
+ if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
+ const MethodReferenceCollection::IndexVector& indices = pair.second;
+ cached_info->AddMethodsForDex(/*startup*/ true,
+ /*hot*/ true,
+ dex_file,
+ indices.begin(),
+ indices.end());
}
}
- for (const DexCacheResolvedClasses& classes : resolved_classes) {
- if (locations.find(classes.GetBaseLocation()) != locations.end()) {
- VLOG(profiler) << "Added " << classes.GetClasses().size() << " classes for location "
- << classes.GetBaseLocation() << " (" << classes.GetDexLocation() << ")";
- resolved_classes_for_location.insert(classes);
+ for (const auto& pair : startup_methods.GetMap()) {
+ const DexFile* const dex_file = pair.first;
+ if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
+ const MethodReferenceCollection::IndexVector& indices = pair.second;
+ cached_info->AddMethodsForDex(/*startup*/ true,
+ /*hot*/ false,
+ dex_file,
+ indices.begin(),
+ indices.end());
+ }
+ }
+ for (const auto& pair : resolved_classes.GetMap()) {
+ const DexFile* const dex_file = pair.first;
+ if (locations.find(dex_file->GetBaseLocation()) != locations.end()) {
+ const TypeReferenceCollection::IndexVector& classes = pair.second;
+ VLOG(profiler) << "Added " << classes.size() << " classes for location "
+ << dex_file->GetBaseLocation()
+ << " (" << dex_file->GetLocation() << ")";
+ cached_info->AddClassesForDex(dex_file, classes.begin(), classes.end());
} else {
- VLOG(profiler) << "Location not found " << classes.GetBaseLocation()
- << " (" << classes.GetDexLocation() << ")";
+ VLOG(profiler) << "Location not found " << dex_file->GetBaseLocation()
+ << " (" << dex_file->GetLocation() << ")";
}
}
- auto info_it = profile_cache_.Put(
- filename,
- new ProfileCompilationInfo(Runtime::Current()->GetArenaPool()));
-
- ProfileCompilationInfo* cached_info = info_it->second;
- cached_info->AddMethodsAndClasses(profile_methods_for_location,
- resolved_classes_for_location);
total_number_of_profile_entries_cached += resolved_classes_for_location.size();
}
max_number_of_profile_entries_cached_ = std::max(
max_number_of_profile_entries_cached_,
total_number_of_profile_entries_cached);
+ VLOG(profiler) << "Profile saver recorded " << hot_methods.NumReferences() << " hot methods and "
+ << startup_methods.NumReferences() << " startup methods with threshold "
+ << hot_threshold << " in " << PrettyDuration(NanoTime() - start_time);
}
bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number_of_new_methods) {
@@ -317,8 +373,7 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number
uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
- info.AddMethodsAndClasses(profile_methods,
- std::set<DexCacheResolvedClasses>());
+ info.AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>());
auto profile_cache_it = profile_cache_.find(filename);
if (profile_cache_it != profile_cache_.end()) {
info.MergeWith(*(profile_cache_it->second));
@@ -421,24 +476,49 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
const std::vector<std::string>& code_paths) {
+ Runtime* const runtime = Runtime::Current();
DCHECK(options.IsEnabled());
- DCHECK(Runtime::Current()->GetJit() != nullptr);
+ DCHECK(runtime->GetJit() != nullptr);
DCHECK(!output_filename.empty());
DCHECK(jit_code_cache != nullptr);
std::vector<std::string> code_paths_to_profile;
-
for (const std::string& location : code_paths) {
if (ShouldProfileLocation(location)) {
code_paths_to_profile.push_back(location);
}
}
+
+ MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
+ // Support getting profile samples for the boot class path. This will be used to generate the boot
+ // image profile. The intention is to use this code to generate to boot image but not use it in
+ // production. b/37966211
+ if (options.GetProfileBootClassPath()) {
+ std::set<std::string> code_paths_keys;
+ for (const std::string& location : code_paths) {
+ code_paths_keys.insert(ProfileCompilationInfo::GetProfileDexFileKey(location));
+ }
+ for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
+ // Don't check ShouldProfileLocation since the boot class path may be speed compiled.
+ const std::string& location = dex_file->GetLocation();
+ const std::string key = ProfileCompilationInfo::GetProfileDexFileKey(location);
+ VLOG(profiler) << "Registering boot dex file " << location;
+ if (code_paths_keys.find(key) != code_paths_keys.end()) {
+ LOG(WARNING) << "Boot class path location key conflicts with code path " << location;
+ } else if (instance_ == nullptr) {
+ // Only add the boot class path once since Start may be called multiple times for secondary
+ // dexes.
+ // We still do the collision check above. This handles any secondary dexes that conflict
+ // with the boot class path dex files.
+ code_paths_to_profile.push_back(location);
+ }
+ }
+ }
if (code_paths_to_profile.empty()) {
VLOG(profiler) << "No code paths should be profiled.";
return;
}
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
// If we already have an instance, make sure it uses the same jit_code_cache.
// This may be called multiple times via Runtime::registerAppInfo (e.g. for
@@ -601,7 +681,7 @@ bool ProfileSaver::HasSeenMethod(const std::string& profile,
if (!info.Load(profile, /*clear_if_invalid*/false)) {
return false;
}
- return info.ContainsMethod(MethodReference(dex_file, method_idx));
+ return info.ContainsHotMethod(MethodReference(dex_file, method_idx));
}
return false;
}
diff --git a/runtime/jit/profile_saver_options.h b/runtime/jit/profile_saver_options.h
index 07aeb66eb6..251227e89c 100644
--- a/runtime/jit/profile_saver_options.h
+++ b/runtime/jit/profile_saver_options.h
@@ -22,43 +22,48 @@ struct ProfileSaverOptions {
public:
static constexpr uint32_t kMinSavePeriodMs = 40 * 1000; // 40 seconds
static constexpr uint32_t kSaveResolvedClassesDelayMs = 5 * 1000; // 5 seconds
- // Minimum number of JIT samples during launch to include a method into the profile.
- static constexpr uint32_t kStartupMethodSamples = 1;
+ // Minimum number of JIT samples during launch to mark a method as hot in the profile.
+ static constexpr uint32_t kHotStartupMethodSamples = 1;
+ static constexpr uint32_t kHotStartupMethodSamplesLowRam = 256;
static constexpr uint32_t kMinMethodsToSave = 10;
static constexpr uint32_t kMinClassesToSave = 10;
static constexpr uint32_t kMinNotificationBeforeWake = 10;
static constexpr uint32_t kMaxNotificationBeforeWake = 50;
+ static constexpr uint32_t kHotStartupMethodSamplesNotSet = std::numeric_limits<uint32_t>::max();
ProfileSaverOptions() :
enabled_(false),
min_save_period_ms_(kMinSavePeriodMs),
save_resolved_classes_delay_ms_(kSaveResolvedClassesDelayMs),
- startup_method_samples_(kStartupMethodSamples),
+ hot_startup_method_samples_(kHotStartupMethodSamplesNotSet),
min_methods_to_save_(kMinMethodsToSave),
min_classes_to_save_(kMinClassesToSave),
min_notification_before_wake_(kMinNotificationBeforeWake),
max_notification_before_wake_(kMaxNotificationBeforeWake),
- profile_path_("") {}
+ profile_path_(""),
+ profile_boot_class_path_(false) {}
ProfileSaverOptions(
bool enabled,
uint32_t min_save_period_ms,
uint32_t save_resolved_classes_delay_ms,
- uint32_t startup_method_samples,
+ uint32_t hot_startup_method_samples,
uint32_t min_methods_to_save,
uint32_t min_classes_to_save,
uint32_t min_notification_before_wake,
uint32_t max_notification_before_wake,
- const std::string& profile_path):
- enabled_(enabled),
+ const std::string& profile_path,
+ bool profile_boot_class_path)
+ : enabled_(enabled),
min_save_period_ms_(min_save_period_ms),
save_resolved_classes_delay_ms_(save_resolved_classes_delay_ms),
- startup_method_samples_(startup_method_samples),
+ hot_startup_method_samples_(hot_startup_method_samples),
min_methods_to_save_(min_methods_to_save),
min_classes_to_save_(min_classes_to_save),
min_notification_before_wake_(min_notification_before_wake),
max_notification_before_wake_(max_notification_before_wake),
- profile_path_(profile_path) {}
+ profile_path_(profile_path),
+ profile_boot_class_path_(profile_boot_class_path) {}
bool IsEnabled() const {
return enabled_;
@@ -73,8 +78,12 @@ struct ProfileSaverOptions {
uint32_t GetSaveResolvedClassesDelayMs() const {
return save_resolved_classes_delay_ms_;
}
- uint32_t GetStartupMethodSamples() const {
- return startup_method_samples_;
+ uint32_t GetHotStartupMethodSamples(bool is_low_ram) const {
+ uint32_t ret = hot_startup_method_samples_;
+ if (ret == kHotStartupMethodSamplesNotSet) {
+ ret = is_low_ram ? kHotStartupMethodSamplesLowRam : kHotStartupMethodSamples;
+ }
+ return ret;
}
uint32_t GetMinMethodsToSave() const {
return min_methods_to_save_;
@@ -91,28 +100,35 @@ struct ProfileSaverOptions {
std::string GetProfilePath() const {
return profile_path_;
}
+ bool GetProfileBootClassPath() const {
+ return profile_boot_class_path_;
+ }
friend std::ostream & operator<<(std::ostream &os, const ProfileSaverOptions& pso) {
os << "enabled_" << pso.enabled_
<< ", min_save_period_ms_" << pso.min_save_period_ms_
<< ", save_resolved_classes_delay_ms_" << pso.save_resolved_classes_delay_ms_
- << ", startup_method_samples_" << pso.startup_method_samples_
+ << ", hot_startup_method_samples_" << pso.hot_startup_method_samples_
<< ", min_methods_to_save_" << pso.min_methods_to_save_
<< ", min_classes_to_save_" << pso.min_classes_to_save_
<< ", min_notification_before_wake_" << pso.min_notification_before_wake_
- << ", max_notification_before_wake_" << pso.max_notification_before_wake_;
+ << ", max_notification_before_wake_" << pso.max_notification_before_wake_
+ << ", profile_boot_class_path_" << pso.profile_boot_class_path_;
return os;
}
bool enabled_;
uint32_t min_save_period_ms_;
uint32_t save_resolved_classes_delay_ms_;
- uint32_t startup_method_samples_;
+ // Do not access hot_startup_method_samples_ directly for reading since it may be set to the
+ // placeholder default.
+ uint32_t hot_startup_method_samples_;
uint32_t min_methods_to_save_;
uint32_t min_classes_to_save_;
uint32_t min_notification_before_wake_;
uint32_t max_notification_before_wake_;
std::string profile_path_;
+ bool profile_boot_class_path_;
};
} // namespace art
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 0148a1c3b0..3ff94f995d 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -28,7 +28,7 @@
#include "lock_word.h"
#include "mirror/object-inl.h"
#include "nth_caller_visitor.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
@@ -123,8 +123,8 @@ void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
monitors.Dump(os);
}
-void JNIEnvExt::PushFrame(int capacity ATTRIBUTE_UNUSED) {
- // TODO: take 'capacity' into account.
+void JNIEnvExt::PushFrame(int capacity) {
+ DCHECK_GE(locals.FreeCapacity(), static_cast<size_t>(capacity));
stacked_local_ref_cookies.push_back(local_ref_cookie);
local_ref_cookie = locals.GetSegmentState();
}
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 60e4295e40..af933ae835 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -22,7 +22,6 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "indirect_reference_table.h"
-#include "object_callbacks.h"
#include "obj_ptr.h"
#include "reference_table.h"
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 0fde41bd4f..6be0953727 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -106,10 +106,9 @@ static void ThrowNoSuchMethodError(ScopedObjectAccess& soa,
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa,
ObjPtr<mirror::Class> c,
const char* kind,
- jint idx,
- bool return_errors)
+ jint idx)
REQUIRES_SHARED(Locks::mutator_lock_) {
- LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
+ LOG(ERROR)
<< "Failed to register native method in " << c->PrettyDescriptor()
<< " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
@@ -2145,13 +2144,10 @@ class JNI {
buf);
}
- static jint RegisterNatives(JNIEnv* env, jclass java_class, const JNINativeMethod* methods,
+ static jint RegisterNatives(JNIEnv* env,
+ jclass java_class,
+ const JNINativeMethod* methods,
jint method_count) {
- return RegisterNativeMethods(env, java_class, methods, method_count, true);
- }
-
- static jint RegisterNativeMethods(JNIEnv* env, jclass java_class, const JNINativeMethod* methods,
- jint method_count, bool return_errors) {
if (UNLIKELY(method_count < 0)) {
JavaVmExtFromEnv(env)->JniAbortF("RegisterNatives", "negative method count: %d",
method_count);
@@ -2172,13 +2168,13 @@ class JNI {
const char* sig = methods[i].signature;
const void* fnPtr = methods[i].fnPtr;
if (UNLIKELY(name == nullptr)) {
- ReportInvalidJNINativeMethod(soa, c.Get(), "method name", i, return_errors);
+ ReportInvalidJNINativeMethod(soa, c.Get(), "method name", i);
return JNI_ERR;
} else if (UNLIKELY(sig == nullptr)) {
- ReportInvalidJNINativeMethod(soa, c.Get(), "method signature", i, return_errors);
+ ReportInvalidJNINativeMethod(soa, c.Get(), "method signature", i);
return JNI_ERR;
} else if (UNLIKELY(fnPtr == nullptr)) {
- ReportInvalidJNINativeMethod(soa, c.Get(), "native function", i, return_errors);
+ ReportInvalidJNINativeMethod(soa, c.Get(), "native function", i);
return JNI_ERR;
}
bool is_fast = false;
@@ -2244,19 +2240,15 @@ class JNI {
}
if (m == nullptr) {
- c->DumpClass(
- LOG_STREAM(return_errors
- ? ::android::base::ERROR
- : ::android::base::FATAL_WITHOUT_ABORT),
- mirror::Class::kDumpClassFullDetail);
- LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
+ c->DumpClass(LOG_STREAM(ERROR), mirror::Class::kDumpClassFullDetail);
+ LOG(ERROR)
<< "Failed to register native method "
<< c->PrettyDescriptor() << "." << name << sig << " in "
<< c->GetDexCache()->GetLocation()->ToModifiedUtf8();
ThrowNoSuchMethodError(soa, c.Get(), name, sig, "static or non-static");
return JNI_ERR;
} else if (!m->IsNative()) {
- LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
+ LOG(ERROR)
<< "Failed to register non-native method "
<< c->PrettyDescriptor() << "." << name << sig
<< " as native";
@@ -2407,18 +2399,18 @@ class JNI {
static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
const char* caller)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // TODO: we should try to expand the table if necessary.
- if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsInitial)) {
+ if (desired_capacity < 0) {
LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
return JNI_ERR;
}
- // TODO: this isn't quite right, since "capacity" includes holes.
- const size_t capacity = soa.Env()->locals.Capacity();
- bool okay = (static_cast<jint>(kLocalsInitial - capacity) >= desired_capacity);
- if (!okay) {
- soa.Self()->ThrowOutOfMemoryError(caller);
+
+ std::string error_msg;
+ if (!soa.Env()->locals.EnsureFreeCapacity(static_cast<size_t>(desired_capacity), &error_msg)) {
+ std::string caller_error = android::base::StringPrintf("%s: %s", caller, error_msg.c_str());
+ soa.Self()->ThrowOutOfMemoryError(caller_error.c_str());
+ return JNI_ERR;
}
- return okay ? JNI_OK : JNI_ERR;
+ return JNI_OK;
}
template<typename JniT, typename ArtT>
@@ -3051,16 +3043,6 @@ const JNINativeInterface* GetRuntimeShutdownNativeInterface() {
return reinterpret_cast<JNINativeInterface*>(&gJniSleepForeverStub);
}
-void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
- jint method_count) {
- ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name));
- if (c.get() == nullptr) {
- LOG(FATAL) << "Couldn't find class: " << jni_class_name;
- }
- jint jni_result = env->RegisterNatives(c.get(), methods, method_count);
- CHECK_EQ(JNI_OK, jni_result);
-}
-
} // namespace art
std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 24bee6fb1d..2c90b3ba78 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -22,9 +22,6 @@
#include "base/macros.h"
-#define REGISTER_NATIVE_METHODS(jni_class_name) \
- RegisterNativeMethods(env, jni_class_name, gMethods, arraysize(gMethods))
-
namespace art {
class ArtField;
@@ -33,11 +30,6 @@ class ArtMethod;
const JNINativeInterface* GetJniNativeInterface();
const JNINativeInterface* GetRuntimeShutdownNativeInterface();
-// Similar to RegisterNatives except its passed a descriptor for a class name and failures are
-// fatal.
-void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
- jint method_count);
-
int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
namespace jni {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 08d1eeb95d..e1e4f9c7d6 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1908,9 +1908,6 @@ TEST_F(JniInternalTest, PushLocalFrame_10395422) {
// Negative capacities are not allowed.
ASSERT_EQ(JNI_ERR, env_->PushLocalFrame(-1));
-
- // And it's okay to have an upper limit. Ours is currently 512.
- ASSERT_EQ(JNI_ERR, env_->PushLocalFrame(8192));
}
TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) {
@@ -1962,6 +1959,28 @@ TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) {
check_jni_abort_catcher.Check("use of deleted local reference");
}
+TEST_F(JniInternalTest, PushLocalFrame_LimitAndOverflow) {
+ // Try a very large value that should fail.
+ ASSERT_NE(JNI_OK, env_->PushLocalFrame(std::numeric_limits<jint>::max()));
+ ASSERT_TRUE(env_->ExceptionCheck());
+ env_->ExceptionClear();
+
+ // On 32-bit, also check for some overflow conditions.
+#ifndef __LP64__
+ ASSERT_EQ(JNI_OK, env_->PushLocalFrame(10));
+ ASSERT_NE(JNI_OK, env_->PushLocalFrame(std::numeric_limits<jint>::max() - 10));
+ ASSERT_TRUE(env_->ExceptionCheck());
+ env_->ExceptionClear();
+ EXPECT_EQ(env_->PopLocalFrame(nullptr), nullptr);
+#endif
+}
+
+TEST_F(JniInternalTest, PushLocalFrame_b62223672) {
+ // The 512 entry limit has been lifted, try a larger value.
+ ASSERT_EQ(JNI_OK, env_->PushLocalFrame(1024));
+ EXPECT_EQ(env_->PopLocalFrame(nullptr), nullptr);
+}
+
TEST_F(JniInternalTest, NewGlobalRef_nullptr) {
EXPECT_EQ(env_->NewGlobalRef(nullptr), nullptr);
}
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
index e9db9b8b4c..3f01fc329a 100644
--- a/runtime/linear_alloc.cc
+++ b/runtime/linear_alloc.cc
@@ -16,7 +16,7 @@
#include "linear_alloc.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/managed_stack-inl.h b/runtime/managed_stack-inl.h
new file mode 100644
index 0000000000..f3f31cf8e8
--- /dev/null
+++ b/runtime/managed_stack-inl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MANAGED_STACK_INL_H_
+#define ART_RUNTIME_MANAGED_STACK_INL_H_
+
+#include "managed_stack.h"
+
+#include <cstring>
+#include <stdint.h>
+#include <string>
+
+#include "stack.h"
+
+namespace art {
+
+inline ShadowFrame* ManagedStack::PushShadowFrame(ShadowFrame* new_top_frame) {
+ DCHECK(top_quick_frame_ == nullptr);
+ ShadowFrame* old_frame = top_shadow_frame_;
+ top_shadow_frame_ = new_top_frame;
+ new_top_frame->SetLink(old_frame);
+ return old_frame;
+}
+
+inline ShadowFrame* ManagedStack::PopShadowFrame() {
+ DCHECK(top_quick_frame_ == nullptr);
+ CHECK(top_shadow_frame_ != nullptr);
+ ShadowFrame* frame = top_shadow_frame_;
+ top_shadow_frame_ = frame->GetLink();
+ return frame;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_MANAGED_STACK_INL_H_
diff --git a/runtime/managed_stack.cc b/runtime/managed_stack.cc
new file mode 100644
index 0000000000..be609c325d
--- /dev/null
+++ b/runtime/managed_stack.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_stack-inl.h"
+
+#include "android-base/stringprintf.h"
+
+#include "art_method.h"
+#include "mirror/object.h"
+#include "stack_reference.h"
+
+namespace art {
+
+size_t ManagedStack::NumJniShadowFrameReferences() const {
+ size_t count = 0;
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
+ current_fragment = current_fragment->GetLink()) {
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_;
+ current_frame != nullptr;
+ current_frame = current_frame->GetLink()) {
+ if (current_frame->GetMethod()->IsNative()) {
+ // The JNI ShadowFrame only contains references. (For indirect reference.)
+ count += current_frame->NumberOfVRegs();
+ }
+ }
+ }
+ return count;
+}
+
+bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
+ current_fragment = current_fragment->GetLink()) {
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_;
+ current_frame != nullptr;
+ current_frame = current_frame->GetLink()) {
+ if (current_frame->Contains(shadow_frame_entry)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+} // namespace art
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
new file mode 100644
index 0000000000..8337f968ac
--- /dev/null
+++ b/runtime/managed_stack.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MANAGED_STACK_H_
+#define ART_RUNTIME_MANAGED_STACK_H_
+
+#include <cstring>
+#include <stdint.h>
+#include <string>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+
+namespace art {
+
+namespace mirror {
+class Object;
+} // namespace mirror
+
+class ArtMethod;
+class ShadowFrame;
+template <typename T> class StackReference;
+
+// The managed stack is used to record fragments of managed code stacks. Managed code stacks
+// may either be shadow frames or lists of frames using fixed frame sizes. Transition records are
+// necessary for transitions between code using different frame layouts and transitions into native
+// code.
+class PACKED(4) ManagedStack {
+ public:
+ ManagedStack()
+ : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {}
+
+ void PushManagedStackFragment(ManagedStack* fragment) {
+ // Copy this top fragment into given fragment.
+ memcpy(fragment, this, sizeof(ManagedStack));
+ // Clear this fragment, which has become the top.
+ memset(this, 0, sizeof(ManagedStack));
+ // Link our top fragment onto the given fragment.
+ link_ = fragment;
+ }
+
+ void PopManagedStackFragment(const ManagedStack& fragment) {
+ DCHECK(&fragment == link_);
+ // Copy this given fragment back to the top.
+ memcpy(this, &fragment, sizeof(ManagedStack));
+ }
+
+ ManagedStack* GetLink() const {
+ return link_;
+ }
+
+ ArtMethod** GetTopQuickFrame() const {
+ return top_quick_frame_;
+ }
+
+ void SetTopQuickFrame(ArtMethod** top) {
+ DCHECK(top_shadow_frame_ == nullptr);
+ top_quick_frame_ = top;
+ }
+
+ static size_t TopQuickFrameOffset() {
+ return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
+ }
+
+ ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
+ ALWAYS_INLINE ShadowFrame* PopShadowFrame();
+
+ ShadowFrame* GetTopShadowFrame() const {
+ return top_shadow_frame_;
+ }
+
+ void SetTopShadowFrame(ShadowFrame* top) {
+ DCHECK(top_quick_frame_ == nullptr);
+ top_shadow_frame_ = top;
+ }
+
+ static size_t TopShadowFrameOffset() {
+ return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
+ }
+
+ size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
+
+ private:
+ ArtMethod** top_quick_frame_;
+ ManagedStack* link_;
+ ShadowFrame* top_shadow_frame_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_MANAGED_STACK_H_
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 6c39361e24..c847942fd1 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -23,6 +23,7 @@
#include <sys/resource.h>
#endif
+#include <map>
#include <memory>
#include <sstream>
@@ -32,6 +33,7 @@
#include "cutils/ashmem.h"
#include "base/allocator.h"
+#include "base/bit_utils.h"
#include "base/memory_tool.h"
#include "globals.h"
#include "utils.h"
@@ -46,6 +48,10 @@ namespace art {
using android::base::StringPrintf;
using android::base::unique_fd;
+template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
+using AllocationTrackingMultiMap =
+ std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
+
using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
@@ -187,7 +193,7 @@ static bool CheckNonOverlapping(uintptr_t begin,
*error_msg = StringPrintf("Failed to build process map");
return false;
}
- ScopedBacktraceMapIteratorLock(map.get());
+ ScopedBacktraceMapIteratorLock lock(map.get());
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
if ((begin >= it->start && begin < it->end) // start of new within old
|| (end > it->start && end < it->end) // end of new within old
@@ -952,6 +958,9 @@ void MemMap::TryReadable() {
}
void ZeroAndReleasePages(void* address, size_t length) {
+ if (length == 0) {
+ return;
+ }
uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
uint8_t* const mem_end = mem_begin + length;
uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
diff --git a/runtime/method_bss_mapping.h b/runtime/method_bss_mapping.h
new file mode 100644
index 0000000000..1476f93e21
--- /dev/null
+++ b/runtime/method_bss_mapping.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_METHOD_BSS_MAPPING_H_
+#define ART_RUNTIME_METHOD_BSS_MAPPING_H_
+
+#include "base/bit_utils.h"
+#include "base/length_prefixed_array.h"
+
+namespace art {
+
+// MethodBssMappingEntry describes a mapping of up to 17 method indexes to their offsets
+// in the .bss. The highest index and its associated .bss offset are stored in plain form
+// as `method_index` and `bss_offset`, respectively, while the additional indexes can be
+// stored in compressed form if their associated .bss entries are consecutive and in the
+// method index order. Each of the 16 bits of the `index_mask` corresponds to one of the
+// previous 16 method indexes and indicates whether there is a .bss entry for that index.
+//
+struct MethodBssMappingEntry {
+ bool CoversIndex(uint32_t method_idx) const {
+ uint32_t diff = method_index - method_idx;
+ return (diff == 0) || (diff <= 16 && ((index_mask >> (16u - diff)) & 1u) != 0);
+ }
+
+ uint32_t GetBssOffset(uint32_t method_idx, size_t entry_size) const {
+ DCHECK(CoversIndex(method_idx));
+ uint32_t diff = method_index - method_idx;
+ if (diff == 0) {
+ return bss_offset;
+ } else {
+ return bss_offset - POPCOUNT(index_mask >> (16u - diff)) * entry_size;
+ }
+ }
+
+ uint16_t method_index;
+ uint16_t index_mask;
+ uint32_t bss_offset;
+};
+
+using MethodBssMapping = LengthPrefixedArray<MethodBssMappingEntry>;
+
+} // namespace art
+
+#endif // ART_RUNTIME_METHOD_BSS_MAPPING_H_
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index e8a2dce42e..e02e62052c 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -23,6 +23,7 @@
#include "handle.h"
#include "jvalue.h"
#include "mirror/class.h"
+#include "stack.h"
namespace art {
diff --git a/runtime/method_reference.h b/runtime/method_reference.h
index 0b0afe64a6..3948ed5bb9 100644
--- a/runtime/method_reference.h
+++ b/runtime/method_reference.h
@@ -44,6 +44,56 @@ struct MethodReferenceComparator {
}
};
+// Compare the actual referenced method signatures. Used for method reference deduplication.
+struct MethodReferenceValueComparator {
+ bool operator()(MethodReference mr1, MethodReference mr2) const {
+ if (mr1.dex_file == mr2.dex_file) {
+ DCHECK_EQ(mr1.dex_method_index < mr2.dex_method_index, SlowCompare(mr1, mr2));
+ return mr1.dex_method_index < mr2.dex_method_index;
+ } else {
+ return SlowCompare(mr1, mr2);
+ }
+ }
+
+ bool SlowCompare(MethodReference mr1, MethodReference mr2) const {
+ // The order is the same as for method ids in a single dex file.
+ // Compare the class descriptors first.
+ const DexFile::MethodId& mid1 = mr1.dex_file->GetMethodId(mr1.dex_method_index);
+ const DexFile::MethodId& mid2 = mr2.dex_file->GetMethodId(mr2.dex_method_index);
+ int descriptor_diff = strcmp(mr1.dex_file->StringByTypeIdx(mid1.class_idx_),
+ mr2.dex_file->StringByTypeIdx(mid2.class_idx_));
+ if (descriptor_diff != 0) {
+ return descriptor_diff < 0;
+ }
+ // Compare names second.
+ int name_diff = strcmp(mr1.dex_file->GetMethodName(mid1), mr2.dex_file->GetMethodName(mid2));
+ if (name_diff != 0) {
+ return name_diff < 0;
+ }
+ // And then compare proto ids, starting with return type comparison.
+ const DexFile::ProtoId& prid1 = mr1.dex_file->GetProtoId(mid1.proto_idx_);
+ const DexFile::ProtoId& prid2 = mr2.dex_file->GetProtoId(mid2.proto_idx_);
+ int return_type_diff = strcmp(mr1.dex_file->StringByTypeIdx(prid1.return_type_idx_),
+ mr2.dex_file->StringByTypeIdx(prid2.return_type_idx_));
+ if (return_type_diff != 0) {
+ return return_type_diff < 0;
+ }
+ // And finishing with lexicographical parameter comparison.
+ const DexFile::TypeList* params1 = mr1.dex_file->GetProtoParameters(prid1);
+ size_t param1_size = (params1 != nullptr) ? params1->Size() : 0u;
+ const DexFile::TypeList* params2 = mr2.dex_file->GetProtoParameters(prid2);
+ size_t param2_size = (params2 != nullptr) ? params2->Size() : 0u;
+ for (size_t i = 0, num = std::min(param1_size, param2_size); i != num; ++i) {
+ int param_diff = strcmp(mr1.dex_file->StringByTypeIdx(params1->GetTypeItem(i).type_idx_),
+ mr2.dex_file->StringByTypeIdx(params2->GetTypeItem(i).type_idx_));
+ if (param_diff != 0) {
+ return param_diff < 0;
+ }
+ }
+ return param1_size < param2_size;
+ }
+};
+
} // namespace art
#endif // ART_RUNTIME_METHOD_REFERENCE_H_
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index 2581ac214f..a217193522 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -20,7 +20,6 @@
#include "class.h"
#include "gc_root.h"
#include "object.h"
-#include "object_callbacks.h"
#include "read_barrier_option.h"
#include "thread.h"
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 51d9d24619..99565c6f5d 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -22,7 +22,6 @@
#include "gc/allocator_type.h"
#include "obj_ptr.h"
#include "object.h"
-#include "object_callbacks.h"
namespace art {
@@ -189,6 +188,16 @@ class MANAGED PrimitiveArray : public Array {
DISALLOW_IMPLICIT_CONSTRUCTORS(PrimitiveArray);
};
+// Declare the different primitive arrays. Instantiations will be in array.cc.
+extern template class PrimitiveArray<uint8_t>; // BooleanArray
+extern template class PrimitiveArray<int8_t>; // ByteArray
+extern template class PrimitiveArray<uint16_t>; // CharArray
+extern template class PrimitiveArray<double>; // DoubleArray
+extern template class PrimitiveArray<float>; // FloatArray
+extern template class PrimitiveArray<int32_t>; // IntArray
+extern template class PrimitiveArray<int64_t>; // LongArray
+extern template class PrimitiveArray<int16_t>; // ShortArray
+
// Either an IntArray or a LongArray.
class PointerArray : public Array {
public:
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 5122b37aa6..c8d455711d 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -23,13 +23,14 @@
#include "art_method.h"
#include "base/array_slice.h"
#include "base/length_prefixed_array.h"
-#include "class_linker-inl.h"
+#include "class_linker.h"
#include "class_loader.h"
#include "common_throws.h"
+#include "dex_cache.h"
#include "dex_file-inl.h"
#include "gc/heap-inl.h"
#include "iftable.h"
-#include "object_array-inl.h"
+#include "object_array.h"
#include "object-inl.h"
#include "read_barrier-inl.h"
#include "reference-inl.h"
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 06ee3d36fe..e4b53209e9 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1143,9 +1143,7 @@ uint32_t Class::Depth() {
dex::TypeIndex Class::FindTypeIndexInOtherDexFile(const DexFile& dex_file) {
std::string temp;
const DexFile::TypeId* type_id = dex_file.FindTypeId(GetDescriptor(&temp));
- return (type_id == nullptr)
- ? dex::TypeIndex(DexFile::kDexNoIndex)
- : dex_file.GetIndexForTypeId(*type_id);
+ return (type_id == nullptr) ? dex::TypeIndex() : dex_file.GetIndexForTypeId(*type_id);
}
template <PointerSize kPointerSize, bool kTransactionActive>
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index dfb2788c51..913ab796a1 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_H_
#define ART_RUNTIME_MIRROR_CLASS_H_
+#include "base/bit_utils.h"
#include "base/enums.h"
#include "base/iteration_range.h"
#include "dex_file.h"
@@ -29,7 +30,6 @@
#include "modifiers.h"
#include "object.h"
#include "object_array.h"
-#include "object_callbacks.h"
#include "primitive.h"
#include "read_barrier_option.h"
#include "stride_iterator.h"
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index 708665d46b..75a3800989 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -23,7 +23,6 @@
#include "gc_root.h"
#include "object.h"
#include "object_array.h"
-#include "object_callbacks.h"
#include "string.h"
namespace art {
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index f5ecdaea26..39c8ee0d60 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -19,9 +19,7 @@
#include "class_loader.h"
-#include "base/mutex-inl.h"
#include "class_table-inl.h"
-#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 6e1f44bb46..381d96b541 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -17,7 +17,10 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_LOADER_H_
#define ART_RUNTIME_MIRROR_CLASS_LOADER_H_
+#include "base/mutex.h"
#include "object.h"
+#include "object_reference.h"
+#include "obj_ptr.h"
namespace art {
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index c95d92e34b..96e347576a 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -23,6 +23,7 @@
#include "gc/heap.h"
#include "globals.h"
#include "linear_alloc.h"
+#include "oat_file.h"
#include "object.h"
#include "object-inl.h"
#include "object_array-inl.h"
diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h
index 6c465f6bbb..8a28f66868 100644
--- a/runtime/mirror/executable.h
+++ b/runtime/mirror/executable.h
@@ -20,7 +20,6 @@
#include "accessible_object.h"
#include "gc_root.h"
#include "object.h"
-#include "object_callbacks.h"
#include "read_barrier_option.h"
namespace art {
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 222d709cef..40186a689b 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -22,7 +22,6 @@
#include "gc_root.h"
#include "obj_ptr.h"
#include "object.h"
-#include "object_callbacks.h"
#include "read_barrier_option.h"
namespace art {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index baed5f167c..95f829dc23 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -26,8 +26,7 @@
#include "class-inl.h"
#include "class_flags.h"
#include "class_linker.h"
-#include "class_loader-inl.h"
-#include "dex_cache-inl.h"
+#include "dex_cache.h"
#include "lock_word-inl.h"
#include "monitor.h"
#include "object_array-inl.h"
@@ -899,6 +898,36 @@ inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier(
return success;
}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakReleaseObjectWithoutWriteBarrier(
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeWeakRelease(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
template<bool kIsStatic,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 58e7c20667..69365af7fd 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -221,6 +221,36 @@ inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier(
return success;
}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldStrongReleaseObjectWithoutWriteBarrier(
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeStrongRelease(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object-refvisitor-inl.h b/runtime/mirror/object-refvisitor-inl.h
index 49ab7c2a53..f5ab4dd8db 100644
--- a/runtime/mirror/object-refvisitor-inl.h
+++ b/runtime/mirror/object-refvisitor-inl.h
@@ -19,7 +19,9 @@
#include "object-inl.h"
+#include "class_loader-inl.h"
#include "class-refvisitor-inl.h"
+#include "dex_cache-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 35a1b733e1..9cf42522d1 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -350,10 +350,25 @@ class MANAGED LOCKABLE Object {
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakReleaseObjectWithoutWriteBarrier(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
ObjPtr<Object> old_value,
ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldStrongReleaseObjectWithoutWriteBarrier(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index a449b41087..84e54948dd 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -19,7 +19,9 @@
#include "reference.h"
+#include "gc_root-inl.h"
#include "obj_ptr-inl.h"
+#include "runtime.h"
namespace art {
namespace mirror {
@@ -47,6 +49,12 @@ inline void FinalizerReference::SetZombie(ObjPtr<Object> zombie) {
return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
}
+template<ReadBarrierOption kReadBarrierOption>
+inline Class* Reference::GetJavaLangRefReference() {
+ DCHECK(!java_lang_ref_Reference_.IsNull());
+ return java_lang_ref_Reference_.Read<kReadBarrierOption>();
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index cfcbd5ae1f..b10c29444e 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -18,6 +18,8 @@
#define ART_RUNTIME_MIRROR_REFERENCE_H_
#include "base/enums.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "class.h"
#include "gc_root.h"
#include "obj_ptr.h"
@@ -97,10 +99,7 @@ class MANAGED Reference : public Object {
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangRefReference() REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(!java_lang_ref_Reference_.IsNull());
- return java_lang_ref_Reference_.Read<kReadBarrierOption>();
- }
+ static ALWAYS_INLINE Class* GetJavaLangRefReference() REQUIRES_SHARED(Locks::mutator_lock_);
static void SetClass(ObjPtr<Class> klass);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index c00cf916a8..53de821498 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -19,6 +19,7 @@
#include "class.h"
#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
+#include "gc_root-inl.h"
#include "object-inl.h"
#include "handle_scope-inl.h"
#include "string.h"
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index d32d8dca26..87e8a1f659 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -19,7 +19,6 @@
#include "gc_root.h"
#include "object.h"
-#include "object_callbacks.h"
namespace art {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 57b20a193b..75606391ad 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -26,7 +26,6 @@
#include "common_throws.h"
#include "gc/heap-inl.h"
#include "globals.h"
-#include "intern_table.h"
#include "runtime.h"
#include "thread.h"
#include "utf.h"
@@ -161,10 +160,6 @@ class SetStringCountAndValueVisitorFromString {
const int32_t offset_;
};
-inline ObjPtr<String> String::Intern() {
- return Runtime::Current()->GetInternTable()->InternWeak(this);
-}
-
inline uint16_t String::CharAt(int32_t index) {
int32_t count = GetLength();
if (UNLIKELY((index < 0) || (index >= count))) {
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index de0e75b083..82ff6ddead 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -18,8 +18,11 @@
#include "arch/memcmp16.h"
#include "array.h"
+#include "base/array_ref.h"
+#include "base/stl_util.h"
#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
+#include "gc_root-inl.h"
#include "handle_scope-inl.h"
#include "intern_table.h"
#include "object-inl.h"
@@ -418,5 +421,9 @@ std::string String::PrettyStringDescriptor() {
return PrettyDescriptor(ToModifiedUtf8().c_str());
}
+ObjPtr<String> String::Intern() {
+ return Runtime::Current()->GetInternTable()->InternWeak(this);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index b59bbfbd68..7fbe8bd3a6 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -20,7 +20,6 @@
#include "gc_root.h"
#include "gc/allocator_type.h"
#include "object.h"
-#include "object_callbacks.h"
namespace art {
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index e50409f2c5..7027410ca6 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -26,7 +26,9 @@
#include "object-inl.h"
#include "object_array.h"
#include "object_array-inl.h"
+#include "object_callbacks.h"
#include "stack_trace_element.h"
+#include "string.h"
#include "utils.h"
#include "well_known_classes.h"
@@ -169,5 +171,17 @@ void Throwable::VisitRoots(RootVisitor* visitor) {
java_lang_Throwable_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
+Object* Throwable::GetStackState() {
+ return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
+}
+
+Object* Throwable::GetStackTrace() {
+ return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
+}
+
+String* Throwable::GetDetailMessage() {
+ return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 0a4ab6fe5c..fb45228f49 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -19,23 +19,22 @@
#include "gc_root.h"
#include "object.h"
-#include "object_callbacks.h"
-#include "string.h"
namespace art {
+class RootVisitor;
struct ThrowableOffsets;
namespace mirror {
+class String;
+
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
void SetDetailMessage(ObjPtr<String> new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
- String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
- }
+ String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_);
std::string Dump() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -59,12 +58,8 @@ class MANAGED Throwable : public Object {
REQUIRES_SHARED(Locks::mutator_lock_);
private:
- Object* GetStackState() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
- }
- Object* GetStackTrace() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
- }
+ Object* GetStackState() REQUIRES_SHARED(Locks::mutator_lock_);
+ Object* GetStackTrace() REQUIRES_SHARED(Locks::mutator_lock_);
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
HeapReference<Object> backtrace_; // Note this is Java volatile:
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index bb33047895..a617818c3f 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -31,7 +31,9 @@
#include "lock_word-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "object_callbacks.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "thread.h"
#include "thread_list.h"
#include "verifier/method_verifier.h"
@@ -437,17 +439,11 @@ void Monitor::Lock(Thread* self) {
<< " in " << ArtMethod::PrettyMethod(m) << " for "
<< PrettyDuration(MsToNs(wait_ms));
}
- const char* owners_filename;
- int32_t owners_line_number;
- TranslateLocation(owners_method,
- owners_dex_pc,
- &owners_filename,
- &owners_line_number);
LogContentionEvent(self,
wait_ms,
sample_percent,
- owners_filename,
- owners_line_number);
+ owners_method,
+ owners_dex_pc);
}
}
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index e80d31cdd5..96c5a5b7cc 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -30,13 +30,13 @@
#include "base/mutex.h"
#include "gc_root.h"
#include "lock_word.h"
-#include "object_callbacks.h"
#include "read_barrier_option.h"
#include "thread_state.h"
namespace art {
class ArtMethod;
+class IsMarkedVisitor;
class LockWord;
template<class T> class Handle;
class StackVisitor;
@@ -181,8 +181,11 @@ class Monitor {
REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
- void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, int32_t owner_line_number)
+ void LogContentionEvent(Thread* self,
+ uint32_t wait_ms,
+ uint32_t sample_percent,
+ ArtMethod* owner_method,
+ uint32_t owner_dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj,
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 1dd60f8d78..74623dab31 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -15,96 +15,94 @@
*/
#include "monitor.h"
-#include "thread.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <log/log.h>
+#include <log/log_event_list.h>
+
+#include "art_method.h"
+#include "thread.h"
#define EVENT_LOG_TAG_dvm_lock_sample 20003
namespace art {
-static void Set4LE(uint8_t* buf, uint32_t val) {
- *buf++ = (uint8_t)(val);
- *buf++ = (uint8_t)(val >> 8);
- *buf++ = (uint8_t)(val >> 16);
- *buf = (uint8_t)(val >> 24);
-}
+void Monitor::LogContentionEvent(Thread* self,
+ uint32_t wait_ms,
+ uint32_t sample_percent,
+ ArtMethod* owner_method,
+ uint32_t owner_dex_pc) {
+ android_log_event_list ctx(EVENT_LOG_TAG_dvm_lock_sample);
-static char* EventLogWriteInt(char* dst, int value) {
- *dst++ = EVENT_TYPE_INT;
- Set4LE(reinterpret_cast<uint8_t*>(dst), value);
- return dst + 4;
-}
+ const char* owner_filename;
+ int32_t owner_line_number;
+ TranslateLocation(owner_method, owner_dex_pc, &owner_filename, &owner_line_number);
-static char* EventLogWriteString(char* dst, const char* value, size_t len) {
- *dst++ = EVENT_TYPE_STRING;
- len = len < 32 ? len : 32;
- Set4LE(reinterpret_cast<uint8_t*>(dst), len);
- dst += 4;
- memcpy(dst, value, len);
- return dst + len;
-}
+ // Emit the process name, <= 37 bytes.
+ {
+ int fd = open("/proc/self/cmdline", O_RDONLY);
+ char procName[33];
+ memset(procName, 0, sizeof(procName));
+ read(fd, procName, sizeof(procName) - 1);
+ close(fd);
+ ctx << procName;
+ }
-void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, int32_t owner_line_number) {
- // Emit the event list length, 1 byte.
- char eventBuffer[174];
- char* cp = eventBuffer;
- *cp++ = 9;
+ // Emit the sensitive thread ("main thread") status. We follow tradition that this corresponds
+ // to a C++ bool's value, but be explicit.
+ constexpr uint32_t kIsSensitive = 1u;
+ constexpr uint32_t kIsNotSensitive = 0u;
+ ctx << (Thread::IsSensitiveThread() ? kIsSensitive : kIsNotSensitive);
+
+ // Emit self thread name string.
+ {
+ std::string thread_name;
+ self->GetThreadName(thread_name);
+ ctx << thread_name;
+ }
- // Emit the process name, <= 37 bytes.
- int fd = open("/proc/self/cmdline", O_RDONLY);
- char procName[33];
- memset(procName, 0, sizeof(procName));
- read(fd, procName, sizeof(procName) - 1);
- close(fd);
- size_t len = strlen(procName);
- cp = EventLogWriteString(cp, procName, len);
-
- // Emit the sensitive thread ("main thread") status, 5 bytes.
- cp = EventLogWriteInt(cp, Thread::IsSensitiveThread());
-
- // Emit self thread name string, <= 37 bytes.
- std::string thread_name;
- self->GetThreadName(thread_name);
- cp = EventLogWriteString(cp, thread_name.c_str(), thread_name.size());
-
- // Emit the wait time, 5 bytes.
- cp = EventLogWriteInt(cp, wait_ms);
-
- // Emit the source code file name, <= 37 bytes.
- uint32_t pc;
- ArtMethod* m = self->GetCurrentMethod(&pc);
- const char* filename;
- int32_t line_number;
- TranslateLocation(m, pc, &filename, &line_number);
- cp = EventLogWriteString(cp, filename, strlen(filename));
-
- // Emit the source code line number, 5 bytes.
- cp = EventLogWriteInt(cp, line_number);
-
- // Emit the lock owner source code file name, <= 37 bytes.
+ // Emit the wait time.
+ ctx << wait_ms;
+
+ const char* filename = nullptr;
+ {
+ uint32_t pc;
+ ArtMethod* m = self->GetCurrentMethod(&pc);
+ int32_t line_number;
+ TranslateLocation(m, pc, &filename, &line_number);
+
+ // Emit the source code file name.
+ ctx << filename;
+
+ // Emit the source code line number.
+ ctx << line_number;
+
+ // Emit the method name.
+ ctx << ArtMethod::PrettyMethod(m);
+ }
+
+ // Emit the lock owner source code file name.
if (owner_filename == nullptr) {
owner_filename = "";
} else if (strcmp(filename, owner_filename) == 0) {
// Common case, so save on log space.
owner_filename = "-";
}
- cp = EventLogWriteString(cp, owner_filename, strlen(owner_filename));
+ ctx << owner_filename;
+
+ // Emit the source code line number.
+ ctx << owner_line_number;
- // Emit the source code line number, 5 bytes.
- cp = EventLogWriteInt(cp, owner_line_number);
+ // Emit the owner method name.
+ ctx << ArtMethod::PrettyMethod(owner_method);
- // Emit the sample percentage, 5 bytes.
- cp = EventLogWriteInt(cp, sample_percent);
+ // Emit the sample percentage.
+ ctx << sample_percent;
- CHECK_LE((size_t)(cp - eventBuffer), sizeof(eventBuffer));
- android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample, EVENT_TYPE_LIST, eventBuffer,
- (size_t)(cp - eventBuffer));
+ ctx << LOG_ID_EVENTS;
}
} // namespace art
diff --git a/runtime/monitor_linux.cc b/runtime/monitor_linux.cc
index 1c77ac0eb3..667866149b 100644
--- a/runtime/monitor_linux.cc
+++ b/runtime/monitor_linux.cc
@@ -18,7 +18,7 @@
namespace art {
-void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, int32_t) {
+void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, ArtMethod*, uint32_t) {
}
} // namespace art
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index 0f4e2387cc..48e9a6b47d 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -18,7 +18,7 @@
#include "base/logging.h"
#include "base/mutex-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "monitor.h"
namespace art {
diff --git a/runtime/monitor_pool_test.cc b/runtime/monitor_pool_test.cc
index a111c6c16a..5463877b83 100644
--- a/runtime/monitor_pool_test.cc
+++ b/runtime/monitor_pool_test.cc
@@ -18,7 +18,7 @@
#include "common_runtime_test.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 870402d301..ad009668bf 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -31,6 +31,7 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
+#include "native_util.h"
#include "oat_file.h"
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index bb8233b9e8..e1eae21df9 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -39,6 +39,7 @@
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/object_array-inl.h"
+#include "native_util.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
#include "scoped_fast_native_object_access-inl.h"
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 18b871cca3..fed9c1cf5b 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -52,6 +52,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
+#include "native_util.h"
#include "runtime.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 6c41d515de..e86e64ed6a 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -25,6 +25,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread_list.h"
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 4c6f53081a..31aeba06f9 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -28,10 +28,12 @@
#include "jit/jit.h"
#include "jni_internal.h"
#include "JNIHelp.h"
+#include "native_util.h"
#include "non_debuggable_classes.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
-#include "thread-inl.h"
+#include "stack.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "trace.h"
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 9e07a5c1a4..d3377be393 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -36,6 +36,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
+#include "native_util.h"
#include "obj_ptr-inl.h"
#include "reflection.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/native/java_lang_Object.cc b/runtime/native/java_lang_Object.cc
index c9841d1b23..d52bf0490b 100644
--- a/runtime/native/java_lang_Object.cc
+++ b/runtime/native/java_lang_Object.cc
@@ -20,6 +20,7 @@
#include "jni_internal.h"
#include "mirror/object-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
namespace art {
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 4928c01c96..ac0d6337b2 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -24,6 +24,7 @@
#include "mirror/object-inl.h"
#include "mirror/string.h"
#include "mirror/string-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index c1292ef6c4..9c2e91843e 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -22,6 +22,7 @@
#include "jni_internal.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 264b427460..0e5d740cab 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -26,6 +26,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
namespace art {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index f3254c4b18..e4d1705d28 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -22,6 +22,7 @@
#include "jni_internal.h"
#include "monitor.h"
#include "mirror/object.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
diff --git a/runtime/native/java_lang_Throwable.cc b/runtime/native/java_lang_Throwable.cc
index b69fbef8d8..03b7f9dfba 100644
--- a/runtime/native/java_lang_Throwable.cc
+++ b/runtime/native/java_lang_Throwable.cc
@@ -19,6 +19,7 @@
#include "nativehelper/jni_macros.h"
#include "jni_internal.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "thread.h"
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 55955e7c57..fc50d5584d 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -22,6 +22,7 @@
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
+#include "native_util.h"
#include "obj_ptr.h"
#include "scoped_fast_native_object_access-inl.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/native/java_lang_Void.cc b/runtime/native/java_lang_Void.cc
index b0d63ef222..af83dd1a79 100644
--- a/runtime/native/java_lang_Void.cc
+++ b/runtime/native/java_lang_Void.cc
@@ -20,6 +20,7 @@
#include "class_linker-inl.h"
#include "jni_internal.h"
+#include "native_util.h"
#include "runtime.h"
#include "scoped_fast_native_object_access-inl.h"
diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.cc b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
index 63168cec24..2e3b4d41ef 100644
--- a/runtime/native/java_lang_invoke_MethodHandleImpl.cc
+++ b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
@@ -24,6 +24,7 @@
#include "mirror/field.h"
#include "mirror/method.h"
#include "mirror/method_handle_impl.h"
+#include "native_util.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc
index c75837aa96..72af5f7ea7 100644
--- a/runtime/native/java_lang_ref_FinalizerReference.cc
+++ b/runtime/native/java_lang_ref_FinalizerReference.cc
@@ -23,6 +23,7 @@
#include "jni_internal.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
namespace art {
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index 606e656f3c..524a18ca20 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -23,6 +23,7 @@
#include "jni_internal.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
namespace art {
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index 96623950aa..5be317147b 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -21,11 +21,12 @@
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file-inl.h"
+#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
-#include "handle_scope-inl.h"
namespace art {
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index d1953adacf..242e87afa9 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -27,6 +27,7 @@
#include "mirror/class-inl.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
+#include "native_util.h"
#include "reflection.h"
#include "scoped_fast_native_object_access-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index 256a3d04de..2aad12d3b8 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -27,6 +27,7 @@
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "native_util.h"
#include "reflection.h"
#include "scoped_fast_native_object_access-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index e38bcd691a..f19004dab5 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -28,6 +28,7 @@
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/field.h"
+#include "native_util.h"
#include "reflection-inl.h"
#include "scoped_fast_native_object_access-inl.h"
#include "utils.h"
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index c9e8dba551..cbbb6a8ea3 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -27,6 +27,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "native_util.h"
#include "reflection.h"
#include "scoped_fast_native_object_access-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/native/java_lang_reflect_Parameter.cc b/runtime/native/java_lang_reflect_Parameter.cc
index 92a7ac9836..c4ab5d69fc 100644
--- a/runtime/native/java_lang_reflect_Parameter.cc
+++ b/runtime/native/java_lang_reflect_Parameter.cc
@@ -24,6 +24,7 @@
#include "dex_file-inl.h"
#include "dex_file_annotations.h"
#include "jni_internal.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "utils.h"
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index 518aaa7317..691ed28b0b 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -23,6 +23,7 @@
#include "mirror/class_loader.h"
#include "mirror/object_array.h"
#include "mirror/string.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "verify_object.h"
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
index 101f386ff8..bd4b0fec70 100644
--- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
@@ -21,6 +21,7 @@
#include "arch/instruction_set.h"
#include "atomic.h"
#include "jni_internal.h"
+#include "native_util.h"
namespace art {
diff --git a/runtime/native/libcore_util_CharsetUtils.cc b/runtime/native/libcore_util_CharsetUtils.cc
index c388ea1438..38634e6d0c 100644
--- a/runtime/native/libcore_util_CharsetUtils.cc
+++ b/runtime/native/libcore_util_CharsetUtils.cc
@@ -23,6 +23,7 @@
#include "jni_internal.h"
#include "mirror/string.h"
#include "mirror/string-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "ScopedPrimitiveArray.h"
#include "unicode/utf16.h"
diff --git a/runtime/native/native_util.h b/runtime/native/native_util.h
new file mode 100644
index 0000000000..98384e0178
--- /dev/null
+++ b/runtime/native/native_util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_NATIVE_UTIL_H_
+#define ART_RUNTIME_NATIVE_NATIVE_UTIL_H_
+
+#include <jni.h>
+
+#include "android-base/logging.h"
+#include "base/macros.h"
+#include "ScopedLocalRef.h"
+
+namespace art {
+
+ALWAYS_INLINE inline void RegisterNativeMethodsInternal(JNIEnv* env,
+ const char* jni_class_name,
+ const JNINativeMethod* methods,
+ jint method_count) {
+ ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name));
+ if (c.get() == nullptr) {
+ LOG(FATAL) << "Couldn't find class: " << jni_class_name;
+ }
+ jint jni_result = env->RegisterNatives(c.get(), methods, method_count);
+ CHECK_EQ(JNI_OK, jni_result);
+}
+
+#define REGISTER_NATIVE_METHODS(jni_class_name) \
+ RegisterNativeMethodsInternal(env, (jni_class_name), gMethods, arraysize(gMethods))
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_NATIVE_UTIL_H_
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index a860977c4c..925b90931c 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -21,6 +21,7 @@
#include "base/logging.h"
#include "debugger.h"
#include "jni_internal.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "ScopedPrimitiveArray.h"
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 71c5b50216..0a254aca54 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -23,6 +23,7 @@
#include "debugger.h"
#include "gc/heap.h"
#include "jni_internal.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index b42cedfaf0..e78c9da5e5 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -29,9 +29,9 @@
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
-
namespace art {
static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, jlong offset,
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index cbc502487f..cbff0bb2f2 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -45,7 +45,7 @@
#include "base/unix_file/fd_file.h"
#include "oat_quick_method_header.h"
#include "os.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
#endif
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index 829ea65876..9cc7e60fa8 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -21,7 +21,7 @@
#include "mirror/class-inl.h"
#include "obj_ptr-inl.h"
#include "ScopedLocalRef.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/oat.cc b/runtime/oat.cc
index d14b399a9a..21e20e9b74 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -23,6 +23,7 @@
#include "arch/instruction_set_features.h"
#include "base/bit_utils.h"
+#include "base/strlcpy.h"
namespace art {
@@ -71,6 +72,7 @@ OatHeader::OatHeader(InstructionSet instruction_set,
instruction_set_(instruction_set),
instruction_set_features_bitmap_(instruction_set_features->AsBitmap()),
dex_file_count_(dex_file_count),
+ oat_dex_files_offset_(0),
executable_offset_(0),
interpreter_to_interpreter_bridge_offset_(0),
interpreter_to_compiled_code_bridge_offset_(0),
@@ -203,6 +205,20 @@ uint32_t OatHeader::GetInstructionSetFeaturesBitmap() const {
return instruction_set_features_bitmap_;
}
+uint32_t OatHeader::GetOatDexFilesOffset() const {
+ DCHECK(IsValid());
+ DCHECK_GT(oat_dex_files_offset_, sizeof(OatHeader));
+ return oat_dex_files_offset_;
+}
+
+void OatHeader::SetOatDexFilesOffset(uint32_t oat_dex_files_offset) {
+ DCHECK_GT(oat_dex_files_offset, sizeof(OatHeader));
+ DCHECK(IsValid());
+ DCHECK_EQ(oat_dex_files_offset_, 0u);
+
+ oat_dex_files_offset_ = oat_dex_files_offset;
+}
+
uint32_t OatHeader::GetExecutableOffset() const {
DCHECK(IsValid());
DCHECK_ALIGNED(executable_offset_, kPageSize);
@@ -505,9 +521,9 @@ void OatHeader::Flatten(const SafeMap<std::string, std::string>* key_value_store
SafeMap<std::string, std::string>::const_iterator it = key_value_store->begin();
SafeMap<std::string, std::string>::const_iterator end = key_value_store->end();
for ( ; it != end; ++it) {
- strcpy(data_ptr, it->first.c_str());
+ strlcpy(data_ptr, it->first.c_str(), it->first.length() + 1);
data_ptr += it->first.length() + 1;
- strcpy(data_ptr, it->second.c_str());
+ strlcpy(data_ptr, it->second.c_str(), it->second.length() + 1);
data_ptr += it->second.length() + 1;
}
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 57c2f9f6e6..521cc40764 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '2', '6', '\0' }; // Shuffle access flags.
+ static constexpr uint8_t kOatVersion[] = { '1', '2', '7', '\0' }; // .bss ArtMethod* section.
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
@@ -64,6 +64,8 @@ class PACKED(4) OatHeader {
DCHECK(IsValid());
return dex_file_count_;
}
+ uint32_t GetOatDexFilesOffset() const;
+ void SetOatDexFilesOffset(uint32_t oat_dex_files_offset);
uint32_t GetExecutableOffset() const;
void SetExecutableOffset(uint32_t executable_offset);
@@ -135,6 +137,7 @@ class PACKED(4) OatHeader {
InstructionSet instruction_set_;
uint32_t instruction_set_features_bitmap_;
uint32_t dex_file_count_;
+ uint32_t oat_dex_files_offset_;
uint32_t executable_offset_;
uint32_t interpreter_to_interpreter_bridge_offset_;
uint32_t interpreter_to_compiled_code_bridge_offset_;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index a816522b9e..888de457dc 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -246,6 +246,9 @@ bool OatFileBase::ComputeFields(uint8_t* requested_base,
}
// Readjust to be non-inclusive upper bound.
bss_end_ += sizeof(uint32_t);
+ // Find bss methods if present.
+ bss_methods_ =
+ const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbssmethods", &symbol_error_msg));
// Find bss roots if present.
bss_roots_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbssroots", &symbol_error_msg));
}
@@ -311,51 +314,63 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
cause.c_str());
return false;
}
- const uint8_t* oat = Begin();
- oat += sizeof(OatHeader);
- if (oat > End()) {
- *error_msg = StringPrintf("In oat file '%s' found truncated OatHeader", GetLocation().c_str());
+ PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
+ size_t key_value_store_size =
+ (Size() >= sizeof(OatHeader)) ? GetOatHeader().GetKeyValueStoreSize() : 0u;
+ if (Size() < sizeof(OatHeader) + key_value_store_size) {
+ *error_msg = StringPrintf("In oat file '%s' found truncated OatHeader, "
+ "size = %zu < %zu + %zu",
+ GetLocation().c_str(),
+ Size(),
+ sizeof(OatHeader),
+ key_value_store_size);
return false;
}
- oat += GetOatHeader().GetKeyValueStoreSize();
- if (oat > End()) {
- *error_msg = StringPrintf("In oat file '%s' found truncated variable-size data: "
- "%p + %zu + %u <= %p",
+ size_t oat_dex_files_offset = GetOatHeader().GetOatDexFilesOffset();
+ if (oat_dex_files_offset < GetOatHeader().GetHeaderSize() || oat_dex_files_offset > Size()) {
+ *error_msg = StringPrintf("In oat file '%s' found invalid oat dex files offset: "
+ "%zu is not in [%zu, %zu]",
GetLocation().c_str(),
- Begin(),
- sizeof(OatHeader),
- GetOatHeader().GetKeyValueStoreSize(),
- End());
+ oat_dex_files_offset,
+ GetOatHeader().GetHeaderSize(),
+ Size());
return false;
}
+ const uint8_t* oat = Begin() + oat_dex_files_offset; // Jump to the OatDexFile records.
- if (!IsAligned<alignof(GcRoot<mirror::Object>)>(bss_begin_) ||
- !IsAligned<alignof(GcRoot<mirror::Object>)>(bss_roots_) ||
+ DCHECK_GE(static_cast<size_t>(pointer_size), alignof(GcRoot<mirror::Object>));
+ if (!IsAligned<kPageSize>(bss_begin_) ||
+ !IsAlignedParam(bss_methods_, static_cast<size_t>(pointer_size)) ||
+ !IsAlignedParam(bss_roots_, static_cast<size_t>(pointer_size)) ||
!IsAligned<alignof(GcRoot<mirror::Object>)>(bss_end_)) {
*error_msg = StringPrintf("In oat file '%s' found unaligned bss symbol(s): "
- "begin = %p, roots = %p, end = %p",
+ "begin = %p, methods_ = %p, roots = %p, end = %p",
GetLocation().c_str(),
bss_begin_,
+ bss_methods_,
bss_roots_,
bss_end_);
return false;
}
- if (bss_roots_ != nullptr && (bss_roots_ < bss_begin_ || bss_roots_ > bss_end_)) {
- *error_msg = StringPrintf("In oat file '%s' found bss roots outside .bss: "
- "%p is outside range [%p, %p]",
+ if ((bss_methods_ != nullptr && (bss_methods_ < bss_begin_ || bss_methods_ > bss_end_)) ||
+ (bss_roots_ != nullptr && (bss_roots_ < bss_begin_ || bss_roots_ > bss_end_)) ||
+ (bss_methods_ != nullptr && bss_roots_ != nullptr && bss_methods_ > bss_roots_)) {
+ *error_msg = StringPrintf("In oat file '%s' found bss symbol(s) outside .bss or unordered: "
+ "begin = %p, methods_ = %p, roots = %p, end = %p",
GetLocation().c_str(),
- bss_roots_,
bss_begin_,
+ bss_methods_,
+ bss_roots_,
bss_end_);
return false;
}
- PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
- uint8_t* dex_cache_arrays = (bss_begin_ == bss_roots_) ? nullptr : bss_begin_;
+ uint8_t* after_arrays = (bss_methods_ != nullptr) ? bss_methods_ : bss_roots_; // May be null.
+ uint8_t* dex_cache_arrays = (bss_begin_ == after_arrays) ? nullptr : bss_begin_;
uint8_t* dex_cache_arrays_end =
- (bss_begin_ == bss_roots_) ? nullptr : (bss_roots_ != nullptr) ? bss_roots_ : bss_end_;
+ (bss_begin_ == after_arrays) ? nullptr : (after_arrays != nullptr) ? after_arrays : bss_end_;
DCHECK_EQ(dex_cache_arrays != nullptr, dex_cache_arrays_end != nullptr);
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
@@ -529,6 +544,55 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
return false;
}
+ uint32_t method_bss_mapping_offset;
+ if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &method_bss_mapping_offset))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated "
+ "after method bss mapping offset",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str());
+ return false;
+ }
+ const bool readable_method_bss_mapping_size =
+ method_bss_mapping_offset != 0u &&
+ method_bss_mapping_offset <= Size() &&
+ IsAligned<alignof(MethodBssMapping)>(method_bss_mapping_offset) &&
+ Size() - method_bss_mapping_offset >= MethodBssMapping::ComputeSize(0);
+ const MethodBssMapping* method_bss_mapping = readable_method_bss_mapping_size
+ ? reinterpret_cast<const MethodBssMapping*>(Begin() + method_bss_mapping_offset)
+ : nullptr;
+ if (method_bss_mapping_offset != 0u &&
+ (UNLIKELY(method_bss_mapping == nullptr) ||
+ UNLIKELY(method_bss_mapping->size() == 0u) ||
+ UNLIKELY(Size() - method_bss_mapping_offset <
+ MethodBssMapping::ComputeSize(method_bss_mapping->size())))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with unaligned or "
+ " truncated method bss mapping, offset %u of %zu, length %zu",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ method_bss_mapping_offset,
+ Size(),
+ method_bss_mapping != nullptr ? method_bss_mapping->size() : 0u);
+ return false;
+ }
+ if (kIsDebugBuild && method_bss_mapping != nullptr) {
+ const MethodBssMappingEntry* prev_entry = nullptr;
+ for (const MethodBssMappingEntry& entry : *method_bss_mapping) {
+ CHECK_ALIGNED_PARAM(entry.bss_offset, static_cast<size_t>(pointer_size));
+ CHECK_LT(entry.bss_offset, BssSize());
+ CHECK_LE(POPCOUNT(entry.index_mask) * static_cast<size_t>(pointer_size), entry.bss_offset);
+ size_t index_mask_span = (entry.index_mask != 0u) ? 16u - CTZ(entry.index_mask) : 0u;
+ CHECK_LE(index_mask_span, entry.method_index);
+ if (prev_entry != nullptr) {
+ CHECK_LT(prev_entry->method_index, entry.method_index - index_mask_span);
+ }
+ prev_entry = &entry;
+ }
+ CHECK_LT(prev_entry->method_index,
+ reinterpret_cast<const DexFile::Header*>(dex_file_pointer)->method_ids_size_);
+ }
+
uint8_t* current_dex_cache_arrays = nullptr;
if (dex_cache_arrays != nullptr) {
// All DexCache types except for CallSite have their instance counts in the
@@ -569,6 +633,7 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
dex_file_checksum,
dex_file_pointer,
lookup_table_data,
+ method_bss_mapping,
class_offsets_pointer,
current_dex_cache_arrays);
oat_dex_files_storage_.push_back(oat_dex_file);
@@ -1158,6 +1223,7 @@ OatFile::OatFile(const std::string& location, bool is_executable)
end_(nullptr),
bss_begin_(nullptr),
bss_end_(nullptr),
+ bss_methods_(nullptr),
bss_roots_(nullptr),
is_executable_(is_executable),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
@@ -1198,6 +1264,17 @@ const uint8_t* OatFile::DexEnd() const {
return kIsVdexEnabled ? vdex_->End() : End();
}
+ArrayRef<ArtMethod*> OatFile::GetBssMethods() const {
+ if (bss_methods_ != nullptr) {
+ ArtMethod** methods = reinterpret_cast<ArtMethod**>(bss_methods_);
+ ArtMethod** methods_end =
+ reinterpret_cast<ArtMethod**>(bss_roots_ != nullptr ? bss_roots_ : bss_end_);
+ return ArrayRef<ArtMethod*>(methods, methods_end - methods);
+ } else {
+ return ArrayRef<ArtMethod*>();
+ }
+}
+
ArrayRef<GcRoot<mirror::Object>> OatFile::GetBssGcRoots() const {
if (bss_roots_ != nullptr) {
auto* roots = reinterpret_cast<GcRoot<mirror::Object>*>(bss_roots_);
@@ -1283,6 +1360,7 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
uint32_t dex_file_location_checksum,
const uint8_t* dex_file_pointer,
const uint8_t* lookup_table_data,
+ const MethodBssMapping* method_bss_mapping_data,
const uint32_t* oat_class_offsets_pointer,
uint8_t* dex_cache_arrays)
: oat_file_(oat_file),
@@ -1291,6 +1369,7 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
dex_file_location_checksum_(dex_file_location_checksum),
dex_file_pointer_(dex_file_pointer),
lookup_table_data_(lookup_table_data),
+ method_bss_mapping_(method_bss_mapping_data),
oat_class_offsets_pointer_(oat_class_offsets_pointer),
dex_cache_arrays_(dex_cache_arrays) {
// Initialize TypeLookupTable.
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 06c76b5464..66ed44f1b9 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -26,6 +26,7 @@
#include "base/stringpiece.h"
#include "compiler_filter.h"
#include "dex_file.h"
+#include "method_bss_mapping.h"
#include "mirror/class.h"
#include "oat.h"
#include "os.h"
@@ -39,9 +40,10 @@ class BitVector;
class ElfFile;
template <class MirrorType> class GcRoot;
class MemMap;
-class OatMethodOffsets;
-class OatHeader;
class OatDexFile;
+class OatHeader;
+class OatMethodOffsets;
+class OatQuickMethodHeader;
class VdexFile;
namespace gc {
@@ -256,8 +258,14 @@ class OatFile {
return BssEnd() - BssBegin();
}
+ size_t BssMethodsOffset() const {
+ // Note: This is used only for symbolizer and needs to return a valid .bss offset.
+ return (bss_methods_ != nullptr) ? bss_methods_ - BssBegin() : BssRootsOffset();
+ }
+
size_t BssRootsOffset() const {
- return bss_roots_ - BssBegin();
+ // Note: This is used only for symbolizer and needs to return a valid .bss offset.
+ return (bss_roots_ != nullptr) ? bss_roots_ - BssBegin() : BssSize();
}
size_t DexSize() const {
@@ -273,6 +281,7 @@ class OatFile {
const uint8_t* DexBegin() const;
const uint8_t* DexEnd() const;
+ ArrayRef<ArtMethod*> GetBssMethods() const;
ArrayRef<GcRoot<mirror::Object>> GetBssGcRoots() const;
// Returns the absolute dex location for the encoded relative dex location.
@@ -324,6 +333,9 @@ class OatFile {
// Pointer to the end of the .bss section, if present, otherwise null.
uint8_t* bss_end_;
+ // Pointer to the beginning of the ArtMethod*s in .bss section, if present, otherwise null.
+ uint8_t* bss_methods_;
+
// Pointer to the beginning of the GC roots in .bss section, if present, otherwise null.
uint8_t* bss_roots_;
@@ -421,6 +433,10 @@ class OatDexFile FINAL {
return lookup_table_data_;
}
+ const MethodBssMapping* GetMethodBssMapping() const {
+ return method_bss_mapping_;
+ }
+
const uint8_t* GetDexFilePointer() const {
return dex_file_pointer_;
}
@@ -447,6 +463,7 @@ class OatDexFile FINAL {
uint32_t dex_file_checksum,
const uint8_t* dex_file_pointer,
const uint8_t* lookup_table_data,
+ const MethodBssMapping* method_bss_mapping,
const uint32_t* oat_class_offsets_pointer,
uint8_t* dex_cache_arrays);
@@ -457,7 +474,8 @@ class OatDexFile FINAL {
const std::string canonical_dex_file_location_;
const uint32_t dex_file_location_checksum_ = 0u;
const uint8_t* const dex_file_pointer_ = nullptr;
- const uint8_t* lookup_table_data_ = nullptr;
+ const uint8_t* const lookup_table_data_ = nullptr;
+ const MethodBssMapping* const method_bss_mapping_ = nullptr;
const uint32_t* const oat_class_offsets_pointer_ = 0u;
uint8_t* const dex_cache_arrays_ = nullptr;
mutable std::unique_ptr<TypeLookupTable> lookup_table_;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 9e08b34b27..2e2e8c3ef6 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -24,6 +24,7 @@
#include "android-base/strings.h"
#include "base/logging.h"
+#include "base/stl_util.h"
#include "compiler_filter.h"
#include "class_linker.h"
#include "exec_utils.h"
@@ -239,7 +240,7 @@ OatFileAssistant::MakeUpToDate(bool profile_changed, std::string* error_msg) {
case kDex2OatForBootImage:
case kDex2OatForRelocation:
case kDex2OatForFilter:
- return GenerateOatFileNoChecks(info, error_msg);
+ return GenerateOatFileNoChecks(info, target, error_msg);
}
UNREACHABLE();
}
@@ -614,7 +615,7 @@ static bool PrepareOdexDirectories(const std::string& dex_location,
}
OatFileAssistant::ResultOfAttemptToUpdate OatFileAssistant::GenerateOatFileNoChecks(
- OatFileAssistant::OatFileInfo& info, std::string* error_msg) {
+ OatFileAssistant::OatFileInfo& info, CompilerFilter::Filter filter, std::string* error_msg) {
CHECK(error_msg != nullptr);
Runtime* runtime = Runtime::Current();
@@ -689,6 +690,7 @@ OatFileAssistant::ResultOfAttemptToUpdate OatFileAssistant::GenerateOatFileNoChe
args.push_back("--output-vdex-fd=" + std::to_string(vdex_file->Fd()));
args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
args.push_back("--oat-location=" + oat_file_name);
+ args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
if (!Dex2Oat(args, error_msg)) {
// Manually delete the oat and vdex files. This ensures there is no garbage
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 7e2385ec6c..03d9ca38a8 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -366,14 +366,16 @@ class OatFileAssistant {
};
// Generate the oat file for the given info from the dex file using the
- // current runtime compiler options.
+ // current runtime compiler options and the specified filter.
// This does not check the current status before attempting to generate the
// oat file.
//
// If the result is not kUpdateSucceeded, the value of error_msg will be set
// to a string describing why there was a failure or the update was not
// attempted. error_msg must not be null.
- ResultOfAttemptToUpdate GenerateOatFileNoChecks(OatFileInfo& info, std::string* error_msg);
+ ResultOfAttemptToUpdate GenerateOatFileNoChecks(OatFileInfo& info,
+ CompilerFilter::Filter target,
+ std::string* error_msg);
// Return info for the best oat file.
OatFileInfo& GetBestInfo();
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index b2b86ee289..3619129718 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -28,7 +28,7 @@
#include "oat_file_manager.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "utils.h"
namespace art {
@@ -1232,6 +1232,25 @@ TEST_F(OatFileAssistantTest, DexOptStatusValues) {
}
}
+// Verify that when no compiler filter is passed the default one from OatFileAssistant is used.
+TEST_F(OatFileAssistantTest, DefaultMakeUpToDateFilter) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ const CompilerFilter::Filter default_filter =
+ OatFileAssistant::kDefaultCompilerFilterForDexLoading;
+ std::string error_msg;
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(false, &error_msg)) << error_msg;
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(default_filter));
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ EXPECT_NE(nullptr, oat_file.get());
+ EXPECT_EQ(default_filter, oat_file->GetCompilerFilter());
+}
+
// TODO: More Tests:
// * Test class linker falls back to unquickened dex for DexNoOat
// * Test class linker falls back to unquickened dex for MultiDexNoOat
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index c1cf800e5d..630945a829 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -29,6 +29,7 @@
#include "base/systrace.h"
#include "class_linker.h"
#include "dex_file-inl.h"
+#include "dex_file_tracking_registrar.h"
#include "gc/scoped_gc_critical_section.h"
#include "gc/space/image_space.h"
#include "handle_scope-inl.h"
@@ -38,7 +39,7 @@
#include "oat_file_assistant.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "well_known_classes.h"
@@ -737,6 +738,11 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
// Successfully added image space to heap, release the map so that it does not get
// freed.
image_space.release();
+
+ // Register for tracking.
+ for (const auto& dex_file : dex_files) {
+ dex::tracking::RegisterDexFile(dex_file.get());
+ }
} else {
LOG(INFO) << "Failed to add image file " << temp_error_msg;
dex_files.clear();
@@ -756,6 +762,11 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (!added_image_space) {
DCHECK(dex_files.empty());
dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
+
+ // Register for tracking.
+ for (const auto& dex_file : dex_files) {
+ dex::tracking::RegisterDexFile(dex_file.get());
+ }
}
if (dex_files.empty()) {
error_msgs->push_back("Failed to open dex files from " + source_oat_file->GetLocation());
diff --git a/runtime/obj_ptr-inl.h b/runtime/obj_ptr-inl.h
index f2921daeaa..f1e3b5053b 100644
--- a/runtime/obj_ptr-inl.h
+++ b/runtime/obj_ptr-inl.h
@@ -17,8 +17,9 @@
#ifndef ART_RUNTIME_OBJ_PTR_INL_H_
#define ART_RUNTIME_OBJ_PTR_INL_H_
+#include "base/bit_utils.h"
#include "obj_ptr.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 9be486e269..3ec5b323c8 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -46,7 +46,7 @@
#include "object_tagging.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "ti_class.h"
#include "ti_dump.h"
@@ -1731,6 +1731,7 @@ extern "C" bool ArtPlugin_Initialize() {
}
extern "C" bool ArtPlugin_Deinitialize() {
+ gEventHandler.Shutdown();
PhaseUtil::Unregister();
ThreadUtil::Unregister();
ClassUtil::Unregister();
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index 2a2aa4c199..af85fb0f4c 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -41,6 +41,7 @@
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/strlcpy.h"
#include "events.h"
#include "java_vm_ext.h"
#include "jni_env_ext.h"
@@ -187,7 +188,7 @@ static inline JvmtiUniquePtr<char[]> CopyString(jvmtiEnv* env, const char* src,
size_t len = strlen(src) + 1;
JvmtiUniquePtr<char[]> ret = AllocJvmtiUniquePtr<char[]>(env, len, error);
if (ret != nullptr) {
- strcpy(ret.get(), src);
+ strlcpy(ret.get(), src, len);
}
return ret;
}
@@ -217,8 +218,8 @@ const jvmtiCapabilities kPotentialCapabilities = {
.can_redefine_any_class = 0,
.can_get_current_thread_cpu_time = 0,
.can_get_thread_cpu_time = 0,
- .can_generate_method_entry_events = 0,
- .can_generate_method_exit_events = 0,
+ .can_generate_method_entry_events = 1,
+ .can_generate_method_exit_events = 1,
.can_generate_all_class_hook_events = 0,
.can_generate_compiled_method_load_events = 0,
.can_generate_monitor_events = 0,
diff --git a/runtime/openjdkjvmti/events-inl.h b/runtime/openjdkjvmti/events-inl.h
index 57abf3142d..cb7e6a9ad0 100644
--- a/runtime/openjdkjvmti/events-inl.h
+++ b/runtime/openjdkjvmti/events-inl.h
@@ -20,6 +20,7 @@
#include <array>
#include "events.h"
+#include "ScopedLocalRef.h"
#include "art_jvmti.h"
@@ -135,6 +136,8 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
continue;
}
if (ShouldDispatch<kEvent>(env, thread)) {
+ ScopedLocalRef<jthrowable> thr(jnienv, jnienv->ExceptionOccurred());
+ jnienv->ExceptionClear();
jint new_len = 0;
unsigned char* new_data = nullptr;
auto callback = impl::GetCallback<kEvent>(env);
@@ -148,6 +151,9 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
current_class_data,
&new_len,
&new_data);
+ if (thr.get() != nullptr && !jnienv->ExceptionCheck()) {
+ jnienv->Throw(thr.get());
+ }
if (new_data != nullptr && new_data != current_class_data) {
// Destroy the data the last transformer made. We skip this if the previous state was the
// initial one since we don't know here which jvmtiEnv allocated it.
@@ -180,6 +186,25 @@ inline void EventHandler::DispatchEvent(art::Thread* thread, Args... args) const
}
}
+// Events with JNIEnvs need to stash pending exceptions since they can cause new ones to be thrown.
+// In accordance with the JVMTI specification we allow exceptions originating from events to
+// overwrite the current exception, including exceptions originating from earlier events.
+// TODO It would be nice to add the overwritten exceptions to the suppressed exceptions list of the
+// newest exception.
+template <ArtJvmtiEvent kEvent, typename ...Args>
+inline void EventHandler::DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const {
+ for (ArtJvmTiEnv* env : envs) {
+ if (env != nullptr) {
+ ScopedLocalRef<jthrowable> thr(jnienv, jnienv->ExceptionOccurred());
+ jnienv->ExceptionClear();
+ DispatchEvent<kEvent, JNIEnv*, Args...>(env, thread, jnienv, args...);
+ if (thr.get() != nullptr && !jnienv->ExceptionCheck()) {
+ jnienv->Throw(thr.get());
+ }
+ }
+ }
+}
+
template <ArtJvmtiEvent kEvent, typename ...Args>
inline void EventHandler::DispatchEvent(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const {
using FnType = void(jvmtiEnv*, Args...);
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 0ec92b7c60..90bc122220 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -32,19 +32,24 @@
#include "events-inl.h"
#include "art_jvmti.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "gc/allocation_listener.h"
#include "gc/gc_pause_listener.h"
#include "gc/heap.h"
+#include "gc/scoped_gc_critical_section.h"
#include "handle_scope-inl.h"
#include "instrumentation.h"
#include "jni_env_ext-inl.h"
+#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
+#include "thread_list.h"
+#include "ti_phase.h"
namespace openjdkjvmti {
@@ -294,6 +299,222 @@ static void SetupGcPauseTracking(JvmtiGcPauseListener* listener, ArtJvmtiEvent e
}
}
+template<typename Type>
+static Type AddLocalRef(art::JNIEnvExt* e, art::mirror::Object* obj)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return (obj == nullptr) ? nullptr : e->AddLocalReference<Type>(obj);
+}
+
+class JvmtiMethodTraceListener FINAL : public art::instrumentation::InstrumentationListener {
+ public:
+ explicit JvmtiMethodTraceListener(EventHandler* handler) : event_handler_(handler) {}
+
+ template<ArtJvmtiEvent kEvent, typename ...Args>
+ void RunEventCallback(art::Thread* self, art::JNIEnvExt* jnienv, Args... args)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ ScopedLocalRef<jthread> thread_jni(jnienv, AddLocalRef<jthread>(jnienv, self->GetPeer()));
+ // Just give the event a good sized JNI frame. 100 should be fine.
+ jnienv->PushFrame(100);
+ {
+ // Need to do trampoline! :(
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ event_handler_->DispatchEvent<kEvent>(self,
+ static_cast<JNIEnv*>(jnienv),
+ thread_jni.get(),
+ args...);
+ }
+ jnienv->PopFrame();
+ }
+
+ // Call-back for when a method is entered.
+ void MethodEntered(art::Thread* self,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* method,
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ if (!method->IsRuntimeMethod() &&
+ event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodEntry)) {
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ RunEventCallback<ArtJvmtiEvent::kMethodEntry>(self,
+ jnienv,
+ art::jni::EncodeArtMethod(method));
+ }
+ }
+
+ // Callback for when a method is exited with a reference return value.
+ void MethodExited(art::Thread* self,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* method,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Object> return_value)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ if (!method->IsRuntimeMethod() &&
+ event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
+ DCHECK_EQ(method->GetReturnTypePrimitive(), art::Primitive::kPrimNot)
+ << method->PrettyMethod();
+ DCHECK(!self->IsExceptionPending());
+ jvalue val;
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ ScopedLocalRef<jobject> return_jobj(jnienv, AddLocalRef<jobject>(jnienv, return_value.Get()));
+ val.l = return_jobj.get();
+ RunEventCallback<ArtJvmtiEvent::kMethodExit>(
+ self,
+ jnienv,
+ art::jni::EncodeArtMethod(method),
+ /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+ val);
+ }
+ }
+
+ // Call-back for when a method is exited.
+ void MethodExited(art::Thread* self,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* method,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ const art::JValue& return_value)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ if (!method->IsRuntimeMethod() &&
+ event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
+ DCHECK_NE(method->GetReturnTypePrimitive(), art::Primitive::kPrimNot)
+ << method->PrettyMethod();
+ DCHECK(!self->IsExceptionPending());
+ jvalue val;
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ // 64bit integer is the largest value in the union so we should be fine simply copying it into
+ // the union.
+ val.j = return_value.GetJ();
+ RunEventCallback<ArtJvmtiEvent::kMethodExit>(
+ self,
+ jnienv,
+ art::jni::EncodeArtMethod(method),
+ /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+ val);
+ }
+ }
+
+ // Call-back for when a method is popped due to an exception throw. A method will either cause a
+ // MethodExited call-back or a MethodUnwind call-back when its activation is removed.
+ void MethodUnwind(art::Thread* self,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* method,
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ if (!method->IsRuntimeMethod() &&
+ event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
+ jvalue val;
+ // Just set this to 0xffffffffffffffff so it's not uninitialized.
+ val.j = static_cast<jlong>(-1);
+ art::JNIEnvExt* jnienv = self->GetJniEnv();
+ art::StackHandleScope<1> hs(self);
+ art::Handle<art::mirror::Throwable> old_exception(hs.NewHandle(self->GetException()));
+ CHECK(!old_exception.IsNull());
+ self->ClearException();
+ RunEventCallback<ArtJvmtiEvent::kMethodExit>(
+ self,
+ jnienv,
+ art::jni::EncodeArtMethod(method),
+ /*was_popped_by_exception*/ static_cast<jboolean>(JNI_TRUE),
+ val);
+ // Match RI behavior of just throwing away original exception if a new one is thrown.
+ if (LIKELY(!self->IsExceptionPending())) {
+ self->SetException(old_exception.Get());
+ }
+ }
+ }
+
+ // Call-back for when the dex pc moves in a method. We don't currently have any events associated
+ // with this.
+ void DexPcMoved(art::Thread* self ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t new_dex_pc ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ return;
+ }
+
+ // Call-back for when we read from a field.
+ void FieldRead(art::Thread* self ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ art::ArtField* field ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ return;
+ }
+
+ // Call-back for when we write into a field.
+ void FieldWritten(art::Thread* self ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ art::ArtField* field ATTRIBUTE_UNUSED,
+ const art::JValue& field_value ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ return;
+ }
+
+ // Call-back when an exception is caught.
+ void ExceptionCaught(art::Thread* self ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ return;
+ }
+
+ // Call-back for when we execute a branch.
+ void Branch(art::Thread* self ATTRIBUTE_UNUSED,
+ art::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ int32_t dex_pc_offset ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ return;
+ }
+
+ // Call-back for when we get an invokevirtual or an invokeinterface.
+ void InvokeVirtualOrInterface(art::Thread* self ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+ art::ArtMethod* caller ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ art::ArtMethod* callee ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+ return;
+ }
+
+ private:
+ EventHandler* const event_handler_;
+};
+
+static uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event) {
+ switch (event) {
+ case ArtJvmtiEvent::kMethodEntry:
+ return art::instrumentation::Instrumentation::kMethodEntered;
+ case ArtJvmtiEvent::kMethodExit:
+ return art::instrumentation::Instrumentation::kMethodExited |
+ art::instrumentation::Instrumentation::kMethodUnwind;
+ default:
+ LOG(FATAL) << "Unknown event ";
+ return 0;
+ }
+}
+
+static void SetupMethodTraceListener(JvmtiMethodTraceListener* listener,
+ ArtJvmtiEvent event,
+ bool enable) {
+ uint32_t new_events = GetInstrumentationEventsFor(event);
+ art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
+ art::gc::ScopedGCCriticalSection gcs(art::Thread::Current(),
+ art::gc::kGcCauseInstrumentation,
+ art::gc::kCollectorTypeInstrumentation);
+ art::ScopedSuspendAll ssa("jvmti method tracing installation");
+ if (enable) {
+ if (!instr->AreAllMethodsDeoptimized()) {
+ instr->EnableMethodTracing("jvmti-tracing", /*needs_interpreter*/true);
+ }
+ instr->AddListener(listener, new_events);
+ } else {
+ instr->RemoveListener(listener, new_events);
+ }
+}
+
// Handle special work for the given event type, if necessary.
void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
switch (event) {
@@ -306,6 +527,11 @@ void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
SetupGcPauseTracking(gc_pause_listener_.get(), event, enable);
return;
+ case ArtJvmtiEvent::kMethodEntry:
+ case ArtJvmtiEvent::kMethodExit:
+ SetupMethodTraceListener(method_trace_listener_.get(), event, enable);
+ return;
+
default:
break;
}
@@ -419,9 +645,21 @@ jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
return ERR(NONE);
}
+void EventHandler::Shutdown() {
+ // Need to remove the method_trace_listener_ if it's there.
+ art::Thread* self = art::Thread::Current();
+ art::gc::ScopedGCCriticalSection gcs(self,
+ art::gc::kGcCauseInstrumentation,
+ art::gc::kCollectorTypeInstrumentation);
+ art::ScopedSuspendAll ssa("jvmti method tracing uninstallation");
+ // Just remove every possible event.
+ art::Runtime::Current()->GetInstrumentation()->RemoveListener(method_trace_listener_.get(), ~0);
+}
+
EventHandler::EventHandler() {
alloc_listener_.reset(new JvmtiAllocationListener(this));
gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
+ method_trace_listener_.reset(new JvmtiMethodTraceListener(this));
}
EventHandler::~EventHandler() {
diff --git a/runtime/openjdkjvmti/events.h b/runtime/openjdkjvmti/events.h
index b9e3cf0b09..5f37dcf0a7 100644
--- a/runtime/openjdkjvmti/events.h
+++ b/runtime/openjdkjvmti/events.h
@@ -29,6 +29,7 @@ namespace openjdkjvmti {
struct ArtJvmTiEnv;
class JvmtiAllocationListener;
class JvmtiGcPauseListener;
+class JvmtiMethodTraceListener;
// an enum for ArtEvents. This differs from the JVMTI events only in that we distinguish between
// retransformation capable and incapable loading
@@ -137,6 +138,9 @@ class EventHandler {
EventHandler();
~EventHandler();
+ // do cleanup for the event handler.
+ void Shutdown();
+
// Register an env. It is assumed that this happens on env creation, that is, no events are
// enabled, yet.
void RegisterArtJvmTiEnv(ArtJvmTiEnv* env);
@@ -160,6 +164,12 @@ class EventHandler {
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
inline void DispatchEvent(art::Thread* thread, Args... args) const;
+ // Dispatch event to all registered environments stashing exceptions as needed. This works since
+ // JNIEnv* is always the second argument if it is passed to an event. Needed since C++ does not
+ // allow partial template function specialization.
+ template <ArtJvmtiEvent kEvent, typename ...Args>
+ ALWAYS_INLINE
+ void DispatchEvent(art::Thread* thread, JNIEnv* jnienv, Args... args) const;
// Dispatch event to the given environment, only.
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
@@ -211,6 +221,7 @@ class EventHandler {
std::unique_ptr<JvmtiAllocationListener> alloc_listener_;
std::unique_ptr<JvmtiGcPauseListener> gc_pause_listener_;
+ std::unique_ptr<JvmtiMethodTraceListener> method_trace_listener_;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/jvmti_weak_table.h b/runtime/openjdkjvmti/jvmti_weak_table.h
index be6edefae2..01c24b1917 100644
--- a/runtime/openjdkjvmti/jvmti_weak_table.h
+++ b/runtime/openjdkjvmti/jvmti_weak_table.h
@@ -41,7 +41,7 @@
#include "globals.h"
#include "jvmti.h"
#include "mirror/object.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace openjdkjvmti {
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index dd90a71240..ed54cd13c3 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -63,7 +63,7 @@
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "ti_class_loader.h"
#include "ti_phase.h"
@@ -103,7 +103,8 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
return nullptr;
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
- std::unique_ptr<const art::DexFile> dex_file(art::DexFile::Open(map->GetName(),
+ std::string map_name = map->GetName();
+ std::unique_ptr<const art::DexFile> dex_file(art::DexFile::Open(map_name,
checksum,
std::move(map),
/*verify*/true,
diff --git a/runtime/openjdkjvmti/ti_dump.cc b/runtime/openjdkjvmti/ti_dump.cc
index d9e3ef1bcf..7a1e53f6e5 100644
--- a/runtime/openjdkjvmti/ti_dump.cc
+++ b/runtime/openjdkjvmti/ti_dump.cc
@@ -39,7 +39,7 @@
#include "events-inl.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace openjdkjvmti {
diff --git a/runtime/openjdkjvmti/ti_field.cc b/runtime/openjdkjvmti/ti_field.cc
index 1e5fbda35b..342d8be2b0 100644
--- a/runtime/openjdkjvmti/ti_field.cc
+++ b/runtime/openjdkjvmti/ti_field.cc
@@ -39,7 +39,7 @@
#include "mirror/object_array-inl.h"
#include "modifiers.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace openjdkjvmti {
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 99774c67b5..319b1c2a9c 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -35,6 +35,7 @@
#include "primitive.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "thread-inl.h"
#include "thread_list.h"
diff --git a/runtime/openjdkjvmti/ti_jni.cc b/runtime/openjdkjvmti/ti_jni.cc
index 88f0395ba5..dd2dda118a 100644
--- a/runtime/openjdkjvmti/ti_jni.cc
+++ b/runtime/openjdkjvmti/ti_jni.cc
@@ -38,7 +38,7 @@
#include "java_vm_ext.h"
#include "jni_env_ext.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace openjdkjvmti {
diff --git a/runtime/openjdkjvmti/ti_method.cc b/runtime/openjdkjvmti/ti_method.cc
index f7e53474aa..beb639e208 100644
--- a/runtime/openjdkjvmti/ti_method.cc
+++ b/runtime/openjdkjvmti/ti_method.cc
@@ -42,7 +42,7 @@
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "ti_phase.h"
diff --git a/runtime/openjdkjvmti/ti_monitor.cc b/runtime/openjdkjvmti/ti_monitor.cc
index 645faea41b..61bf533eb7 100644
--- a/runtime/openjdkjvmti/ti_monitor.cc
+++ b/runtime/openjdkjvmti/ti_monitor.cc
@@ -39,7 +39,7 @@
#include "art_jvmti.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace openjdkjvmti {
diff --git a/runtime/openjdkjvmti/ti_object.cc b/runtime/openjdkjvmti/ti_object.cc
index bf84499035..2506acac3a 100644
--- a/runtime/openjdkjvmti/ti_object.cc
+++ b/runtime/openjdkjvmti/ti_object.cc
@@ -34,7 +34,7 @@
#include "art_jvmti.h"
#include "mirror/object-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace openjdkjvmti {
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index 941cf7b73b..3c8bdc61d0 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -38,7 +38,7 @@
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "ti_thread.h"
diff --git a/runtime/openjdkjvmti/ti_properties.cc b/runtime/openjdkjvmti/ti_properties.cc
index 8ee5366140..e399b484ec 100644
--- a/runtime/openjdkjvmti/ti_properties.cc
+++ b/runtime/openjdkjvmti/ti_properties.cc
@@ -40,7 +40,7 @@
#include "art_jvmti.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "ti_phase.h"
#include "well_known_classes.h"
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index ca3a0e631a..b382a3e7c3 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -48,6 +48,7 @@
#include "gc/allocation_listener.h"
#include "gc/heap.h"
#include "instrumentation.h"
+#include "intern_table.h"
#include "jdwp/jdwp.h"
#include "jdwp/jdwp_constants.h"
#include "jdwp/jdwp_event.h"
@@ -452,7 +453,30 @@ art::mirror::ClassLoader* Redefiner::ClassRedefinition::GetClassLoader() {
art::mirror::DexCache* Redefiner::ClassRedefinition::CreateNewDexCache(
art::Handle<art::mirror::ClassLoader> loader) {
- return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get()).Ptr();
+ art::StackHandleScope<2> hs(driver_->self_);
+ art::ClassLinker* cl = driver_->runtime_->GetClassLinker();
+ art::Handle<art::mirror::DexCache> cache(hs.NewHandle(
+ art::ObjPtr<art::mirror::DexCache>::DownCast(
+ cl->GetClassRoot(art::ClassLinker::kJavaLangDexCache)->AllocObject(driver_->self_))));
+ if (cache.IsNull()) {
+ driver_->self_->AssertPendingOOMException();
+ return nullptr;
+ }
+ art::Handle<art::mirror::String> location(hs.NewHandle(
+ cl->GetInternTable()->InternStrong(dex_file_->GetLocation().c_str())));
+ if (location.IsNull()) {
+ driver_->self_->AssertPendingOOMException();
+ return nullptr;
+ }
+ art::WriterMutexLock mu(driver_->self_, *art::Locks::dex_lock_);
+ art::mirror::DexCache::InitializeDexCache(driver_->self_,
+ cache.Get(),
+ location.Get(),
+ dex_file_.get(),
+ loader.IsNull() ? driver_->runtime_->GetLinearAlloc()
+ : loader->GetAllocator(),
+ art::kRuntimePointerSize);
+ return cache.Get();
}
void Redefiner::RecordFailure(jvmtiError result,
@@ -1293,8 +1317,10 @@ jvmtiError Redefiner::Run() {
// At this point we can no longer fail without corrupting the runtime state.
for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ art::ClassLinker* cl = runtime_->GetClassLinker();
+ cl->RegisterExistingDexCache(data.GetNewDexCache(), data.GetSourceClassLoader());
if (data.GetSourceClassLoader() == nullptr) {
- runtime_->GetClassLinker()->AppendToBootClassPath(self_, data.GetRedefinition().GetDexFile());
+ cl->AppendToBootClassPath(self_, data.GetRedefinition().GetDexFile());
}
}
UnregisterAllBreakpoints();
diff --git a/runtime/openjdkjvmti/ti_search.cc b/runtime/openjdkjvmti/ti_search.cc
index ec139f2004..6e0196edc3 100644
--- a/runtime/openjdkjvmti/ti_search.cc
+++ b/runtime/openjdkjvmti/ti_search.cc
@@ -49,7 +49,7 @@
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
#include "ti_phase.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "well_known_classes.h"
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 1ddf04feb4..550b97272d 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -52,20 +52,25 @@
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
#include "stack.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "thread_pool.h"
#include "well_known_classes.h"
namespace openjdkjvmti {
+template <typename FrameFn>
struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitor(art::Thread* thread_in,
size_t start_,
- size_t stop_)
+ size_t stop_,
+ FrameFn fn_)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ fn(fn_),
start(start_),
stop(stop_) {}
+ GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
+ GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ArtMethod* m = GetMethod();
@@ -81,7 +86,7 @@ struct GetStackTraceVisitor : public art::StackVisitor {
jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
jvmtiFrameInfo info = { id, dex_location };
- frames.push_back(info);
+ fn(info);
if (stop == 1) {
return false; // We're done.
@@ -95,24 +100,34 @@ struct GetStackTraceVisitor : public art::StackVisitor {
return true;
}
- std::vector<jvmtiFrameInfo> frames;
+ FrameFn fn;
size_t start;
size_t stop;
};
-struct GetStackTraceClosure : public art::Closure {
+template <typename FrameFn>
+GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
+ size_t start,
+ size_t stop,
+ FrameFn fn) {
+ return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
+}
+
+struct GetStackTraceVectorClosure : public art::Closure {
public:
- GetStackTraceClosure(size_t start, size_t stop)
+ GetStackTraceVectorClosure(size_t start, size_t stop)
: start_input(start),
stop_input(stop),
start_result(0),
stop_result(0) {}
void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
- GetStackTraceVisitor visitor(self, start_input, stop_input);
- visitor.WalkStack(false);
+ auto frames_fn = [&](jvmtiFrameInfo info) {
+ frames.push_back(info);
+ };
+ auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
+ visitor.WalkStack(/* include_transitions */ false);
- frames.swap(visitor.frames);
start_result = visitor.start;
stop_result = visitor.stop;
}
@@ -163,6 +178,33 @@ static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames
return ERR(NONE);
}
+struct GetStackTraceDirectClosure : public art::Closure {
+ public:
+ GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
+ : frame_buffer(frame_buffer_),
+ start_input(start),
+ stop_input(stop),
+ index(0) {
+ DCHECK_GE(start_input, 0u);
+ }
+
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ auto frames_fn = [&](jvmtiFrameInfo info) {
+ frame_buffer[index] = info;
+ ++index;
+ };
+ auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
+ visitor.WalkStack(/* include_transitions */ false);
+ }
+
+ jvmtiFrameInfo* frame_buffer;
+
+ const size_t start_input;
+ const size_t stop_input;
+
+ size_t index = 0;
+};
+
static jvmtiError GetThread(JNIEnv* env, jthread java_thread, art::Thread** thread) {
if (java_thread == nullptr) {
*thread = art::Thread::Current();
@@ -220,8 +262,20 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
- GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
- start_depth >= 0 ? static_cast<size_t>(max_frame_count) : 0);
+ if (start_depth >= 0) {
+ // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
+ GetStackTraceDirectClosure closure(frame_buffer,
+ static_cast<size_t>(start_depth),
+ static_cast<size_t>(max_frame_count));
+ thread->RequestSynchronousCheckpoint(&closure);
+ *count_ptr = static_cast<jint>(closure.index);
+ if (closure.index < static_cast<size_t>(start_depth)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ return ERR(NONE);
+ }
+
+ GetStackTraceVectorClosure closure(0, 0);
thread->RequestSynchronousCheckpoint(&closure);
return TranslateFrameVector(closure.frames,
@@ -232,42 +286,6 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
count_ptr);
}
-struct GetAllStackTraceClosure : public art::Closure {
- public:
- explicit GetAllStackTraceClosure(size_t stop)
- : start_input(0),
- stop_input(stop),
- frames_lock("GetAllStackTraceGuard", art::LockLevel::kAbortLock),
- start_result(0),
- stop_result(0) {}
-
- void Run(art::Thread* self)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!frames_lock) {
- // self should be live here (so it could be suspended). No need to filter.
-
- art::Thread* current = art::Thread::Current();
- std::vector<jvmtiFrameInfo> self_frames;
-
- GetStackTraceVisitor visitor(self, start_input, stop_input);
- visitor.WalkStack(false);
-
- self_frames.swap(visitor.frames);
-
- art::MutexLock mu(current, frames_lock);
- frames.emplace(self, self_frames);
- }
-
- const size_t start_input;
- const size_t stop_input;
-
- art::Mutex frames_lock;
- std::unordered_map<art::Thread*, std::vector<jvmtiFrameInfo>> frames GUARDED_BY(frames_lock);
- size_t start_result;
- size_t stop_result;
-};
-
-
-
jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
@@ -300,7 +318,7 @@ jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
continue;
}
- GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ GetStackTraceVectorClosure closure(0u, static_cast<size_t>(max_frame_count));
thread->RequestSynchronousCheckpoint(&closure);
threads.push_back(thread);
@@ -460,7 +478,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
for (size_t index = 0; index != handles.size(); ++index) {
if (peer == handles[index].Get()) {
// Found the thread.
- GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ GetStackTraceVectorClosure closure(0u, static_cast<size_t>(max_frame_count));
thread->RequestSynchronousCheckpoint(&closure);
threads.push_back(thread);
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index 3dfa63313d..2cc2a26c3b 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -49,7 +49,7 @@
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "well_known_classes.h"
diff --git a/runtime/openjdkjvmti/ti_threadgroup.cc b/runtime/openjdkjvmti/ti_threadgroup.cc
index dd7be113d6..c0597ad0cc 100644
--- a/runtime/openjdkjvmti/ti_threadgroup.cc
+++ b/runtime/openjdkjvmti/ti_threadgroup.cc
@@ -45,7 +45,7 @@
#include "object_lock.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
#include "well_known_classes.h"
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index ef4957c0ba..abb6f8c018 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -709,7 +709,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xmethod-trace-file-size:integervalue\n");
UsageMessage(stream, " -Xps-min-save-period-ms:integervalue\n");
UsageMessage(stream, " -Xps-save-resolved-classes-delay-ms:integervalue\n");
- UsageMessage(stream, " -Xps-startup-method-samples:integervalue\n");
+ UsageMessage(stream, " -Xps-hot-startup-method-samples:integervalue\n");
UsageMessage(stream, " -Xps-min-methods-to-save:integervalue\n");
UsageMessage(stream, " -Xps-min-classes-to-save:integervalue\n");
UsageMessage(stream, " -Xps-min-notification-before-wake:integervalue\n");
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index db774909dd..2d06e54f78 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -63,7 +63,7 @@ inline MirrorType* ReadBarrier::Barrier(
// If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
// updates before us, but it's OK.
if (kAlwaysUpdateField && ref != old_ref) {
- obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
+ obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
offset, old_ref, ref);
}
}
@@ -81,7 +81,7 @@ inline MirrorType* ReadBarrier::Barrier(
ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
- obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
+ obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
offset, old_ref, ref);
}
}
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 8423e04e88..010c6f8fde 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -26,7 +26,6 @@
#include "base/mutex.h"
#include "gc_root.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
namespace art {
namespace mirror {
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index e809ecf1f6..260be8f41f 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -29,7 +29,7 @@
#include "primitive.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index e16ef1d77c..532da2b16e 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -31,6 +31,7 @@
#include "nth_caller_visitor.h"
#include "scoped_thread_state_change-inl.h"
#include "stack_reference.h"
+#include "ScopedLocalRef.h"
#include "well_known_classes.h"
namespace art {
@@ -668,7 +669,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
// Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early.
if (soa.Self()->IsExceptionPending()) {
// If we get another exception when we are trying to wrap, then just use that instead.
- jthrowable th = soa.Env()->ExceptionOccurred();
+ ScopedLocalRef<jthrowable> th(soa.Env(), soa.Env()->ExceptionOccurred());
soa.Self()->ClearException();
jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
if (exception_class == nullptr) {
@@ -677,7 +678,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
}
jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
CHECK(mid != nullptr);
- jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th);
+ jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th.get());
if (exception_instance == nullptr) {
soa.Self()->AssertPendingException();
return nullptr;
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 9281577b3e..609f0d658d 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -20,6 +20,7 @@
#include "runtime.h"
#include "art_method.h"
+#include "base/callee_save_type.h"
#include "gc_root-inl.h"
#include "obj_ptr-inl.h"
@@ -41,15 +42,15 @@ inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(ArtMethod* method
DCHECK_NE(method, GetImtConflictMethod());
DCHECK_NE(method, GetResolutionMethod());
// Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
- if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveRefsAndArgs)) {
- return GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
- } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveAllCalleeSaves)) {
- return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAllCalleeSaves);
- } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveRefsOnly)) {
- return GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsOnly);
+ if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsAndArgs)) {
+ return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+ } else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveAllCalleeSaves)) {
+ return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveAllCalleeSaves);
+ } else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsOnly)) {
+ return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsOnly);
} else {
- DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kSaveEverything));
- return GetCalleeSaveMethodFrameInfo(Runtime::kSaveEverything);
+ DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverything));
+ return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveEverything);
}
}
@@ -76,7 +77,7 @@ inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]);
+ return reinterpret_cast<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 74e291e2d0..c11e4bd448 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -57,6 +57,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "asm_support.h"
+#include "asm_support_check.h"
#include "atomic.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
@@ -217,6 +218,7 @@ Runtime::Runtime()
intern_table_(nullptr),
class_linker_(nullptr),
signal_catcher_(nullptr),
+ use_tombstoned_traces_(false),
java_vm_(nullptr),
fault_message_lock_("Fault message lock"),
fault_message_(""),
@@ -259,6 +261,9 @@ Runtime::Runtime()
process_state_(kProcessStateJankPerceptible),
zygote_no_threads_(false),
cha_(nullptr) {
+ static_assert(Runtime::kCalleeSaveSize ==
+ static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
+
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
interpreter::CheckInterpreterAsmConstants();
@@ -1331,8 +1336,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// TODO: Should we move the following to InitWithoutImage?
SetInstructionSet(instruction_set_);
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+ for (uint32_t i = 0; i < kCalleeSaveSize; i++) {
+ CalleeSaveType type = CalleeSaveType(i);
if (!HasCalleeSaveMethod(type)) {
SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
}
@@ -1797,7 +1802,7 @@ void Runtime::VisitConstantRoots(RootVisitor* visitor) {
if (imt_unimplemented_method_ != nullptr) {
imt_unimplemented_method_->VisitRoots(buffered_visitor, pointer_size);
}
- for (size_t i = 0; i < kLastCalleeSaveType; ++i) {
+ for (uint32_t i = 0; i < kCalleeSaveSize; ++i) {
auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
if (m != nullptr) {
m->VisitRoots(buffered_visitor, pointer_size);
@@ -1973,32 +1978,32 @@ void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
void Runtime::SetInstructionSet(InstructionSet instruction_set) {
instruction_set_ = instruction_set;
if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
- for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
}
} else if (instruction_set_ == kMips) {
- for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
}
} else if (instruction_set_ == kMips64) {
- for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
}
} else if (instruction_set_ == kX86) {
- for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
}
} else if (instruction_set_ == kX86_64) {
- for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
}
} else if (instruction_set_ == kArm64) {
- for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
}
@@ -2012,15 +2017,14 @@ void Runtime::ClearInstructionSet() {
}
void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
- DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
+ DCHECK_LT(static_cast<uint32_t>(type), kCalleeSaveSize);
CHECK(method != nullptr);
- callee_save_methods_[type] = reinterpret_cast<uintptr_t>(method);
+ callee_save_methods_[static_cast<size_t>(type)] = reinterpret_cast<uintptr_t>(method);
}
void Runtime::ClearCalleeSaveMethods() {
- for (size_t i = 0; i < static_cast<size_t>(kLastCalleeSaveType); ++i) {
- CalleeSaveType type = static_cast<CalleeSaveType>(i);
- callee_save_methods_[type] = reinterpret_cast<uintptr_t>(nullptr);
+ for (size_t i = 0; i < kCalleeSaveSize; ++i) {
+ callee_save_methods_[i] = reinterpret_cast<uintptr_t>(nullptr);
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 992c5c827f..2505d8706e 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -71,6 +71,7 @@ namespace verifier {
} // namespace verifier
class ArenaPool;
class ArtMethod;
+enum class CalleeSaveType: uint32_t;
class ClassHierarchyAnalysis;
class ClassLinker;
class CompilerCallbacks;
@@ -377,17 +378,8 @@ class Runtime {
imt_unimplemented_method_ = nullptr;
}
- // Returns a special method that describes all callee saves being spilled to the stack.
- enum CalleeSaveType {
- kSaveAllCalleeSaves, // All callee-save registers.
- kSaveRefsOnly, // Only those callee-save registers that can hold references.
- kSaveRefsAndArgs, // References (see above) and arguments (usually caller-save registers).
- kSaveEverything, // All registers, including both callee-save and caller-save.
- kLastCalleeSaveType // Value used for iteration
- };
-
bool HasCalleeSaveMethod(CalleeSaveType type) const {
- return callee_save_methods_[type] != 0u;
+ return callee_save_methods_[static_cast<size_t>(type)] != 0u;
}
ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
@@ -397,14 +389,14 @@ class Runtime {
REQUIRES_SHARED(Locks::mutator_lock_);
QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
- return callee_save_method_frame_infos_[type];
+ return callee_save_method_frame_infos_[static_cast<size_t>(type)];
}
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
- return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
+ return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
}
InstructionSet GetInstructionSet() const {
@@ -724,8 +716,10 @@ class Runtime {
static constexpr int kProfileForground = 0;
static constexpr int kProfileBackground = 1;
+ static constexpr uint32_t kCalleeSaveSize = 4u;
+
// 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
- uint64_t callee_save_methods_[kLastCalleeSaveType];
+ uint64_t callee_save_methods_[kCalleeSaveSize];
GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
ArtMethod* resolution_method_;
@@ -739,7 +733,7 @@ class Runtime {
GcRoot<mirror::Object> sentinel_;
InstructionSet instruction_set_;
- QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
+ QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
@@ -959,7 +953,6 @@ class Runtime {
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
-std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
} // namespace art
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index 5511fb7b6d..940e4611f6 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -29,7 +29,8 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "native_stack_dump.h"
-#include "thread-inl.h"
+#include "runtime.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index e638fdb504..b54f587715 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -46,6 +46,7 @@ class SafeMap {
SafeMap() = default;
SafeMap(const SafeMap&) = default;
+ SafeMap(SafeMap&&) = default;
explicit SafeMap(const key_compare& cmp, const allocator_type& allocator = allocator_type())
: map_(cmp, allocator) {
}
@@ -151,6 +152,11 @@ class SafeMap {
return map_ == rhs.map_;
}
+ template <class... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return map_.emplace(std::forward<Args>(args)...);
+ }
+
private:
::std::map<K, V, Comparator, Allocator> map_;
};
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index ed6e349de4..aa96871145 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -22,6 +22,7 @@
#include "base/casts.h"
#include "jni_env_ext-inl.h"
#include "obj_ptr-inl.h"
+#include "runtime.h"
#include "thread-inl.h"
namespace art {
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 5c6eead34b..eec0460015 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -20,6 +20,7 @@
#include "arch/context.h"
#include "art_method-inl.h"
+#include "base/callee_save_type.h"
#include "base/enums.h"
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -29,6 +30,7 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "linear_alloc.h"
+#include "managed_stack.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -68,34 +70,6 @@ mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
}
}
-size_t ManagedStack::NumJniShadowFrameReferences() const {
- size_t count = 0;
- for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
- current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
- current_frame = current_frame->GetLink()) {
- if (current_frame->GetMethod()->IsNative()) {
- // The JNI ShadowFrame only contains references. (For indirect reference.)
- count += current_frame->NumberOfVRegs();
- }
- }
- }
- return count;
-}
-
-bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
- for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
- current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
- current_frame = current_frame->GetLink()) {
- if (current_frame->Contains(shadow_frame_entry)) {
- return true;
- }
- }
- }
- return false;
-}
-
StackVisitor::StackVisitor(Thread* thread,
Context* context,
StackWalkKind walk_kind,
@@ -648,6 +622,12 @@ static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
return;
}
+ Runtime* runtime = Runtime::Current();
+ if (runtime->UseJitCompilation() &&
+ runtime->GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(pc))) {
+ return;
+ }
+
const void* code = method->GetEntryPointFromQuickCompiledCode();
if (code == GetQuickInstrumentationEntryPoint() || code == GetInvokeObsoleteMethodStub()) {
return;
@@ -659,9 +639,6 @@ static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
return;
}
- // If we are the JIT then we may have just compiled the method after the
- // IsQuickToInterpreterBridge check.
- Runtime* runtime = Runtime::Current();
if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) {
return;
}
@@ -758,7 +735,7 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
Runtime* runtime = Runtime::Current();
if (method->IsAbstract()) {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
+ return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
}
// This goes before IsProxyMethod since runtime methods have a null declaring class.
@@ -772,7 +749,7 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
// compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
DCHECK(!method->IsDirect() && !method->IsConstructor())
<< "Constructors of proxy classes must have a OatQuickMethodHeader";
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
+ return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
}
// The only remaining case is if the method is native and uses the generic JNI stub.
@@ -785,7 +762,7 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
size_t scope_size = HandleScope::SizeOf(handle_refs);
QuickMethodFrameInfo callee_info =
- runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs);
+ runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
// Callee saves + handle scope + method ref + alignment
// Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
@@ -868,11 +845,11 @@ void StackVisitor::WalkStack(bool include_transitions) {
thread_->GetInstrumentationStack()->at(instrumentation_stack_depth);
instrumentation_stack_depth++;
if (GetMethod() ==
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves)) {
+ Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
// Skip runtime save all callee frames which are used to deliver exceptions.
} else if (instrumentation_frame.interpreter_entry_) {
ArtMethod* callee =
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs);
+ Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee)
<< " Found: " << ArtMethod::PrettyMethod(GetMethod());
} else {
diff --git a/runtime/stack.h b/runtime/stack.h
index bdaa4c3ca2..fd86f5d2b1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -512,86 +512,6 @@ class JavaFrameRootInfo FINAL : public RootInfo {
const size_t vreg_;
};
-// The managed stack is used to record fragments of managed code stacks. Managed code stacks
-// may either be shadow frames or lists of frames using fixed frame sizes. Transition records are
-// necessary for transitions between code using different frame layouts and transitions into native
-// code.
-class PACKED(4) ManagedStack {
- public:
- ManagedStack()
- : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {}
-
- void PushManagedStackFragment(ManagedStack* fragment) {
- // Copy this top fragment into given fragment.
- memcpy(fragment, this, sizeof(ManagedStack));
- // Clear this fragment, which has become the top.
- memset(this, 0, sizeof(ManagedStack));
- // Link our top fragment onto the given fragment.
- link_ = fragment;
- }
-
- void PopManagedStackFragment(const ManagedStack& fragment) {
- DCHECK(&fragment == link_);
- // Copy this given fragment back to the top.
- memcpy(this, &fragment, sizeof(ManagedStack));
- }
-
- ManagedStack* GetLink() const {
- return link_;
- }
-
- ArtMethod** GetTopQuickFrame() const {
- return top_quick_frame_;
- }
-
- void SetTopQuickFrame(ArtMethod** top) {
- DCHECK(top_shadow_frame_ == nullptr);
- top_quick_frame_ = top;
- }
-
- static size_t TopQuickFrameOffset() {
- return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
- }
-
- ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
- DCHECK(top_quick_frame_ == nullptr);
- ShadowFrame* old_frame = top_shadow_frame_;
- top_shadow_frame_ = new_top_frame;
- new_top_frame->SetLink(old_frame);
- return old_frame;
- }
-
- ShadowFrame* PopShadowFrame() {
- DCHECK(top_quick_frame_ == nullptr);
- CHECK(top_shadow_frame_ != nullptr);
- ShadowFrame* frame = top_shadow_frame_;
- top_shadow_frame_ = frame->GetLink();
- return frame;
- }
-
- ShadowFrame* GetTopShadowFrame() const {
- return top_shadow_frame_;
- }
-
- void SetTopShadowFrame(ShadowFrame* top) {
- DCHECK(top_quick_frame_ == nullptr);
- top_shadow_frame_ = top;
- }
-
- static size_t TopShadowFrameOffset() {
- return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
- }
-
- size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
-
- private:
- ArtMethod** top_quick_frame_;
- ManagedStack* link_;
- ShadowFrame* top_shadow_frame_;
-};
-
class StackVisitor {
public:
// This enum defines a flag to control whether inlined frames are included
@@ -612,6 +532,8 @@ class StackVisitor {
public:
virtual ~StackVisitor() {}
+ StackVisitor(const StackVisitor&) = default;
+ StackVisitor(StackVisitor&&) = default;
// Return 'true' if we should continue to visit more frames, 'false' to stop.
virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h
index ac04c3b403..0560c33eee 100644
--- a/runtime/stride_iterator.h
+++ b/runtime/stride_iterator.h
@@ -24,8 +24,11 @@
namespace art {
template<typename T>
-class StrideIterator : public std::iterator<std::forward_iterator_tag, T> {
+class StrideIterator : public std::iterator<std::random_access_iterator_tag, T> {
public:
+ using difference_type =
+ typename std::iterator<std::random_access_iterator_tag, T>::difference_type;
+
StrideIterator(const StrideIterator&) = default;
StrideIterator(StrideIterator&&) = default;
StrideIterator& operator=(const StrideIterator&) = default;
@@ -44,28 +47,56 @@ class StrideIterator : public std::iterator<std::forward_iterator_tag, T> {
return !(*this == other);
}
- StrideIterator operator++() { // Value after modification.
+ StrideIterator& operator++() { // Value after modification.
ptr_ += stride_;
return *this;
}
StrideIterator operator++(int) {
StrideIterator<T> temp = *this;
- ptr_ += stride_;
+ ++*this;
return temp;
}
- StrideIterator operator+(ssize_t delta) const {
+ StrideIterator& operator--() { // Value after modification.
+ ptr_ -= stride_;
+ return *this;
+ }
+
+ StrideIterator operator--(int) {
StrideIterator<T> temp = *this;
- temp += delta;
+ --*this;
return temp;
}
- StrideIterator& operator+=(ssize_t delta) {
+ StrideIterator& operator+=(difference_type delta) {
ptr_ += static_cast<ssize_t>(stride_) * delta;
return *this;
}
+ StrideIterator operator+(difference_type delta) const {
+ StrideIterator<T> temp = *this;
+ temp += delta;
+ return temp;
+ }
+
+ StrideIterator& operator-=(difference_type delta) {
+ ptr_ -= static_cast<ssize_t>(stride_) * delta;
+ return *this;
+ }
+
+ StrideIterator operator-(difference_type delta) const {
+ StrideIterator<T> temp = *this;
+ temp -= delta;
+ return temp;
+ }
+
+ difference_type operator-(const StrideIterator& rhs) {
+ DCHECK_EQ(stride_, rhs.stride_);
+ DCHECK_EQ((ptr_ - rhs.ptr_) % stride_, 0u);
+ return (ptr_ - rhs.ptr_) / stride_;
+ }
+
T& operator*() const {
return *reinterpret_cast<T*>(ptr_);
}
@@ -74,12 +105,46 @@ class StrideIterator : public std::iterator<std::forward_iterator_tag, T> {
return &**this;
}
+ T& operator[](difference_type n) {
+ return *(*this + n);
+ }
+
private:
uintptr_t ptr_;
// Not const for operator=.
size_t stride_;
+
+ template <typename U>
+ friend bool operator<(const StrideIterator<U>& lhs, const StrideIterator<U>& rhs);
};
+template <typename T>
+StrideIterator<T> operator+(typename StrideIterator<T>::difference_type dist,
+ const StrideIterator<T>& it) {
+ return it + dist;
+}
+
+template <typename T>
+bool operator<(const StrideIterator<T>& lhs, const StrideIterator<T>& rhs) {
+ DCHECK_EQ(lhs.stride_, rhs.stride_);
+ return lhs.ptr_ < rhs.ptr_;
+}
+
+template <typename T>
+bool operator>(const StrideIterator<T>& lhs, const StrideIterator<T>& rhs) {
+ return rhs < lhs;
+}
+
+template <typename T>
+bool operator<=(const StrideIterator<T>& lhs, const StrideIterator<T>& rhs) {
+ return !(rhs < lhs);
+}
+
+template <typename T>
+bool operator>=(const StrideIterator<T>& lhs, const StrideIterator<T>& rhs) {
+ return !(lhs < rhs);
+}
+
} // namespace art
#endif // ART_RUNTIME_STRIDE_ITERATOR_H_
diff --git a/runtime/thread-current-inl.h b/runtime/thread-current-inl.h
new file mode 100644
index 0000000000..9241b1f875
--- /dev/null
+++ b/runtime/thread-current-inl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_THREAD_CURRENT_INL_H_
+#define ART_RUNTIME_THREAD_CURRENT_INL_H_
+
+#include "thread.h"
+
+#ifdef ART_TARGET_ANDROID
+#include <bionic_tls.h> // Access to our own TLS slot.
+#endif
+
+#include <pthread.h>
+
+namespace art {
+
+inline Thread* Thread::Current() {
+ // We rely on Thread::Current returning null for a detached thread, so it's not obvious
+ // that we can replace this with a direct %fs access on x86.
+ if (!is_started_) {
+ return nullptr;
+ } else {
+#ifdef ART_TARGET_ANDROID
+ void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF];
+#else
+ void* thread = pthread_getspecific(Thread::pthread_key_self_);
+#endif
+ return reinterpret_cast<Thread*>(thread);
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_THREAD_CURRENT_INL_H_
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 5c65da6d41..7da15d9f4c 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -19,18 +19,13 @@
#include "thread.h"
-#ifdef ART_TARGET_ANDROID
-#include <bionic_tls.h> // Access to our own TLS slot.
-#endif
-
-#include <pthread.h>
-
#include "base/casts.h"
#include "base/mutex-inl.h"
#include "base/time_utils.h"
#include "jni_env_ext.h"
+#include "managed_stack-inl.h"
#include "obj_ptr.h"
-#include "runtime.h"
+#include "thread-current-inl.h"
#include "thread_pool.h"
namespace art {
@@ -41,21 +36,6 @@ static inline Thread* ThreadForEnv(JNIEnv* env) {
return full_env->self;
}
-inline Thread* Thread::Current() {
- // We rely on Thread::Current returning null for a detached thread, so it's not obvious
- // that we can replace this with a direct %fs access on x86.
- if (!is_started_) {
- return nullptr;
- } else {
-#ifdef ART_TARGET_ANDROID
- void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF];
-#else
- void* thread = pthread_getspecific(Thread::pthread_key_self_);
-#endif
- return reinterpret_cast<Thread*>(thread);
- }
-}
-
inline void Thread::AllowThreadSuspension() {
DCHECK_EQ(Thread::Current(), this);
if (UNLIKELY(TestAllFlags())) {
@@ -295,12 +275,6 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
return static_cast<ThreadState>(old_state);
}
-inline void Thread::VerifyStack() {
- if (kVerifyStack) {
- VerifyStackImpl();
- }
-}
-
inline mirror::Object* Thread::AllocTlab(size_t bytes) {
DCHECK_GE(TlabSize(), bytes);
++tlsPtr_.thread_local_objects;
@@ -384,6 +358,14 @@ inline bool Thread::ModifySuspendCount(Thread* self,
}
}
+inline ShadowFrame* Thread::PushShadowFrame(ShadowFrame* new_top_frame) {
+ return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
+}
+
+inline ShadowFrame* Thread::PopShadowFrame() {
+ return tlsPtr_.managed_stack.PopShadowFrame();
+}
+
} // namespace art
#endif // ART_RUNTIME_THREAD_INL_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index c849a126ae..789f571253 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -55,6 +55,7 @@
#include "gc/allocator/rosalloc.h"
#include "gc/heap.h"
#include "gc/space/space-inl.h"
+#include "gc_root.h"
#include "handle_scope-inl.h"
#include "indirect_reference_table-inl.h"
#include "java_vm_ext.h"
@@ -2160,7 +2161,7 @@ Thread::~Thread() {
TearDownAlternateSignalStack();
}
-void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
+void Thread::HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) {
if (!IsExceptionPending()) {
return;
}
@@ -2180,7 +2181,7 @@ void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
tlsPtr_.jni_env->ExceptionClear();
}
-void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
+void Thread::RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) {
// this.group.removeThread(this);
// group can be null if we're in the compiler or a test.
ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)
diff --git a/runtime/thread.h b/runtime/thread.h
index a60fd58ca0..e85ee0d2f3 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -33,15 +33,13 @@
#include "base/mutex.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "gc_root.h"
#include "globals.h"
#include "handle_scope.h"
#include "instrumentation.h"
#include "jvalue.h"
-#include "object_callbacks.h"
+#include "managed_stack.h"
#include "offsets.h"
#include "runtime_stats.h"
-#include "stack.h"
#include "thread_state.h"
class BacktraceMap;
@@ -87,12 +85,14 @@ class FrameIdToShadowFrame;
class JavaVMExt;
struct JNIEnvExt;
class Monitor;
+class RootVisitor;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
class SingleStepControl;
class StackedShadowFrameRecord;
class Thread;
class ThreadList;
+enum VisitRootFlags : uint8_t;
// Thread priorities. These must match the Thread.MIN_PRIORITY,
// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
@@ -149,6 +149,7 @@ static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
class Thread {
public:
static const size_t kStackOverflowImplicitCheckSize;
+ static constexpr bool kVerifyStack = kIsDebugBuild;
// Creates a new native thread corresponding to the given managed peer.
// Used to implement Thread.start.
@@ -560,10 +561,14 @@ class Thread {
return tlsPtr_.frame_id_to_shadow_frame != nullptr;
}
- void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
+ void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_);
+ void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kVerifyStack) {
+ VerifyStackImpl();
+ }
+ }
//
// Offsets of various members of native Thread class, used by compiled code.
@@ -793,13 +798,8 @@ class Thread {
tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
}
- ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
- return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
- }
-
- ShadowFrame* PopShadowFrame() {
- return tlsPtr_.managed_stack.PopShadowFrame();
- }
+ ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
+ ALWAYS_INLINE ShadowFrame* PopShadowFrame();
template<PointerSize pointer_size>
static ThreadOffset<pointer_size> TopShadowFrameOffset() {
@@ -1250,9 +1250,10 @@ class Thread {
static void* CreateCallback(void* arg);
- void HandleUncaughtExceptions(ScopedObjectAccess& soa)
+ void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
REQUIRES_SHARED(Locks::mutator_lock_);
- void RemoveFromThreadGroup(ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_);
// Initialize a thread.
//
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ca8f7b648c..95aba79ed7 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -37,6 +37,7 @@
#include "gc/gc_pause_listener.h"
#include "gc/heap.h"
#include "gc/reference_processor.h"
+#include "gc_root.h"
#include "jni_internal.h"
#include "lock_word.h"
#include "monitor.h"
@@ -164,7 +165,7 @@ static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_s
if (dump_native_stack) {
DumpNativeStack(os, tid, nullptr, " native: ");
}
- os << "\n";
+ os << std::endl;
}
void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) {
@@ -215,11 +216,10 @@ class DumpCheckpoint FINAL : public Closure {
ScopedObjectAccess soa(self);
thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
}
- local_os << "\n";
{
// Use the logging lock to ensure serialization when writing to the common ostream.
MutexLock mu(self, *Locks::logging_lock_);
- *os_ << local_os.str();
+ *os_ << local_os.str() << std::endl;
}
barrier_.Pass(self);
}
@@ -757,7 +757,7 @@ void ThreadList::SuspendAllInternal(Thread* self,
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
if ((errno != EAGAIN) && (errno != EINTR)) {
if (errno == ETIMEDOUT) {
- LOG(::android::base::FATAL)
+ LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
<< "Timed out waiting for threads to suspend, waited for "
<< PrettyDuration(NanoTime() - start_time);
} else {
@@ -1509,7 +1509,7 @@ void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
// Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
// order violations.
for (Thread* thread : threads_to_visit) {
- thread->VisitRoots(visitor);
+ thread->VisitRoots(visitor, kVisitRootFlagAllRoots);
}
// Restore suspend counts.
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 0ce1d78382..92702c6498 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -22,9 +22,7 @@
#include "base/mutex.h"
#include "base/time_utils.h"
#include "base/value_object.h"
-#include "gc_root.h"
#include "jni.h"
-#include "object_callbacks.h"
#include <bitset>
#include <list>
@@ -38,8 +36,10 @@ namespace gc {
class GcPauseListener;
} // namespace gc
class Closure;
+class RootVisitor;
class Thread;
class TimingLogger;
+enum VisitRootFlags : uint8_t;
class ThreadList {
public:
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index e051e7647f..8349f33028 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -30,7 +30,7 @@
#include "base/stl_util.h"
#include "base/time_utils.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 86f5282664..82b9af33d5 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -18,6 +18,7 @@
#include "android-base/stringprintf.h"
+#include "base/strlcpy.h"
#include "java_vm_ext.h"
#include "runtime.h"
@@ -57,7 +58,7 @@ Agent::LoadError Agent::DoLoadHelper(bool attaching,
}
// Need to let the function fiddle with the array.
std::unique_ptr<char[]> copied_args(new char[args_.size() + 1]);
- strcpy(copied_args.get(), args_.c_str());
+ strlcpy(copied_args.get(), args_.c_str(), args_.size() + 1);
// TODO Need to do some checks that we are at a good spot etc.
*call_res = callback(Runtime::Current()->GetJavaVM(),
copied_args.get(),
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 3a9975a4e2..cabd1620a7 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -41,6 +41,7 @@
#include "os.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
+#include "stack.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
@@ -739,7 +740,7 @@ void Trace::FinishTracing() {
}
void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t new_dex_pc) {
// We're not recorded to listen to this kind of event, so complain.
@@ -748,7 +749,7 @@ void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
}
void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field ATTRIBUTE_UNUSED)
@@ -759,7 +760,7 @@ void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
}
void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc,
ArtField* field ATTRIBUTE_UNUSED,
@@ -770,8 +771,10 @@ void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
<< " " << dex_pc;
}
-void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
- ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
+void Trace::MethodEntered(Thread* thread,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -779,8 +782,10 @@ void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
- ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED,
+void Trace::MethodExited(Thread* thread,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
@@ -789,8 +794,10 @@ void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_U
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
- ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
+void Trace::MethodUnwind(Thread* thread,
+ Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -799,7 +806,7 @@ void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_U
}
void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
- mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
+ Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
@@ -811,7 +818,7 @@ void Trace::Branch(Thread* /*thread*/, ArtMethod* method,
}
void Trace::InvokeVirtualOrInterface(Thread*,
- mirror::Object*,
+ Handle<mirror::Object>,
ArtMethod* method,
uint32_t dex_pc,
ArtMethod*) {
diff --git a/runtime/trace.h b/runtime/trace.h
index 485e9a133a..ad1025045c 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -140,36 +140,54 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
// InstrumentationListener implementation.
- void MethodEntered(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc)
+ void MethodEntered(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
- void MethodExited(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc,
+ void MethodExited(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
const JValue& return_value)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
- void MethodUnwind(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc)
+ void MethodUnwind(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
- void DexPcMoved(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t new_dex_pc)
+ void DexPcMoved(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t new_dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
- void FieldRead(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field)
+ void FieldRead(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
- void FieldWritten(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field,
+ void FieldWritten(Thread* thread,
+ Handle<mirror::Object> this_object,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field,
const JValue& field_value)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
- void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
+ void ExceptionCaught(Thread* thread,
+ Handle<mirror::Throwable> exception_object)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
- void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
+ void Branch(Thread* thread,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ int32_t dex_pc_offset)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void InvokeVirtualOrInterface(Thread* thread,
- mirror::Object* this_object,
+ Handle<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 56ff0a13ac..907d37ef31 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -19,8 +19,10 @@
#include "base/stl_util.h"
#include "base/logging.h"
#include "gc/accounting/card_table-inl.h"
+#include "gc_root-inl.h"
#include "intern_table.h"
#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 921de03754..747c2d0f38 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -22,7 +22,6 @@
#include "base/value_object.h"
#include "dex_file_types.h"
#include "gc_root.h"
-#include "object_callbacks.h"
#include "offsets.h"
#include "primitive.h"
#include "safe_map.h"
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 20a53b7c69..c4b044110c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -303,7 +303,7 @@ std::string PrintableChar(uint16_t ch) {
if (NeedsEscaping(ch)) {
StringAppendF(&result, "\\u%04x", ch);
} else {
- result += ch;
+ result += static_cast<std::string::value_type>(ch);
}
result += '\'';
return result;
@@ -330,7 +330,7 @@ std::string PrintableString(const char* utf) {
if (NeedsEscaping(leading)) {
StringAppendF(&result, "\\u%04x", leading);
} else {
- result += leading;
+ result += static_cast<std::string::value_type>(leading);
}
const uint32_t trailing = GetTrailingUtf16Char(ch);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 7490611cb6..12f791c1f1 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -50,6 +50,7 @@
#include "register_line-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "utils.h"
#include "verifier_deps.h"
#include "verifier_compiler_binding.h"
@@ -883,10 +884,13 @@ bool MethodVerifier::Verify() {
InstructionFlags());
// Run through the instructions and see if the width checks out.
bool result = ComputeWidthsAndCountOps();
+ bool allow_runtime_only_instructions = !Runtime::Current()->IsAotCompiler() || verify_to_dump_;
// Flag instructions guarded by a "try" block and check exception handlers.
result = result && ScanTryCatchBlocks();
// Perform static instruction verification.
- result = result && VerifyInstructions();
+ result = result && (allow_runtime_only_instructions
+ ? VerifyInstructions<true>()
+ : VerifyInstructions<false>());
// Perform code-flow analysis and return.
result = result && VerifyCodeFlow();
@@ -1102,6 +1106,7 @@ bool MethodVerifier::ScanTryCatchBlocks() {
return true;
}
+template <bool kAllowRuntimeOnlyInstructions>
bool MethodVerifier::VerifyInstructions() {
const Instruction* inst = Instruction::At(code_item_->insns_);
@@ -1110,9 +1115,8 @@ bool MethodVerifier::VerifyInstructions() {
GetInstructionFlags(0).SetCompileTimeInfoPoint();
uint32_t insns_size = code_item_->insns_size_in_code_units_;
- bool allow_runtime_only_instructions = !Runtime::Current()->IsAotCompiler() || verify_to_dump_;
for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
- if (!VerifyInstruction(inst, dex_pc, allow_runtime_only_instructions)) {
+ if (!VerifyInstruction<kAllowRuntimeOnlyInstructions>(inst, dex_pc)) {
DCHECK_NE(failures_.size(), 0U);
return false;
}
@@ -1139,9 +1143,8 @@ bool MethodVerifier::VerifyInstructions() {
return true;
}
-bool MethodVerifier::VerifyInstruction(const Instruction* inst,
- uint32_t code_offset,
- bool allow_runtime_only_instructions) {
+template <bool kAllowRuntimeOnlyInstructions>
+bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_offset) {
if (Instruction::kHaveExperimentalInstructions && UNLIKELY(inst->IsExperimental())) {
// Experimental instructions don't yet have verifier support implementation.
// While it is possible to use them by themselves, when we try to use stable instructions
@@ -1250,7 +1253,7 @@ bool MethodVerifier::VerifyInstruction(const Instruction* inst,
result = false;
break;
}
- if (!allow_runtime_only_instructions && inst->GetVerifyIsRuntimeOnly()) {
+ if (!kAllowRuntimeOnlyInstructions && inst->GetVerifyIsRuntimeOnly()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "opcode only expected at runtime " << inst->Name();
result = false;
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 9ef98f70e4..46fdc5419d 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -24,7 +24,6 @@
#include "base/arena_allocator.h"
#include "base/macros.h"
#include "base/scoped_arena_containers.h"
-#include "base/stl_util.h"
#include "base/value_object.h"
#include "dex_file.h"
#include "dex_file_types.h"
@@ -360,6 +359,7 @@ class MethodVerifier {
*
* Walks through instructions in a method calling VerifyInstruction on each.
*/
+ template <bool kAllowRuntimeOnlyInstructions>
bool VerifyInstructions();
/*
@@ -395,9 +395,8 @@ class MethodVerifier {
* - (earlier) for each exception handler, the handler must start at a valid
* instruction
*/
- bool VerifyInstruction(const Instruction* inst,
- uint32_t code_offset,
- bool allow_runtime_only_instructions);
+ template <bool kAllowRuntimeOnlyInstructions>
+ bool VerifyInstruction(const Instruction* inst, uint32_t code_offset);
/* Ensure that the register index is valid for this code item. */
bool CheckRegisterIndex(uint32_t idx);
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 25baac5094..6c01a7982a 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -30,7 +30,6 @@
#include "gc_root.h"
#include "handle_scope.h"
#include "obj_ptr.h"
-#include "object_callbacks.h"
#include "primitive.h"
namespace art {
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 49dac26bb4..b0ea6c857c 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -25,7 +25,7 @@
#include "reg_type_cache-inl.h"
#include "reg_type-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace verifier {
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index 70ce0c4a29..43eb948c64 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -23,6 +23,7 @@
#include "base/array_ref.h"
#include "base/mutex.h"
+#include "dex_file_types.h"
#include "handle.h"
#include "method_resolution_kind.h"
#include "obj_ptr.h"
diff --git a/runtime/verify_object.h b/runtime/verify_object.h
index 519f7f5f5a..e4c01d0f78 100644
--- a/runtime/verify_object.h
+++ b/runtime/verify_object.h
@@ -48,7 +48,6 @@ enum VerifyObjectFlags {
kVerifyAll = kVerifyThis | kVerifyReads | kVerifyWrites,
};
-static constexpr bool kVerifyStack = kIsDebugBuild;
static constexpr VerifyObjectFlags kDefaultVerifyFlags = kVerifyNone;
static constexpr VerifyObjectMode kVerifyObjectSupport =
kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled;
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 5aef062728..24f194b5ee 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -30,7 +30,7 @@
#include "obj_ptr-inl.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index 0d0d5c73ae..df1012ea3c 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -25,6 +25,7 @@
#include <vector>
#include "android-base/stringprintf.h"
+#include "base/bit_utils.h"
#include "base/unix_file/fd_file.h"
namespace art {
diff --git a/test/003-omnibus-opcodes/src/Main.java b/test/003-omnibus-opcodes/src/Main.java
index a30ec15c66..4e1ffe2fde 100644
--- a/test/003-omnibus-opcodes/src/Main.java
+++ b/test/003-omnibus-opcodes/src/Main.java
@@ -67,7 +67,7 @@ public class Main {
} catch (Throwable th) {
// We and the RI throw ClassNotFoundException, but that isn't declared so javac
// won't let us try to catch it.
- th.printStackTrace();
+ th.printStackTrace(System.out);
}
InternedString.run();
GenSelect.run();
diff --git a/test/008-exceptions/src/Main.java b/test/008-exceptions/src/Main.java
index 74af00ccf7..89fe016856 100644
--- a/test/008-exceptions/src/Main.java
+++ b/test/008-exceptions/src/Main.java
@@ -155,7 +155,7 @@ public class Main {
} catch (BadError e) {
System.out.println(e);
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
try {
// Before splitting mirror::Class::kStatusError into
@@ -171,11 +171,11 @@ public class Main {
throw new IllegalStateException("Should not reach here.");
} catch (NoClassDefFoundError ncdfe) {
if (!(ncdfe.getCause() instanceof BadError)) {
- ncdfe.getCause().printStackTrace();
+ ncdfe.getCause().printStackTrace(System.out);
}
} catch (VerifyError e) {
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
@@ -186,7 +186,7 @@ public class Main {
} catch (Error e) {
System.out.println(e);
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
// Before splitting mirror::Class::kStatusError into
// kStatusErrorUnresolved and kStatusErrorResolved,
@@ -200,7 +200,7 @@ public class Main {
System.out.println(ncdfe);
System.out.println(" cause: " + ncdfe.getCause());
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
try {
MultiDexBadInitWrapper2.setDummy(1);
@@ -209,7 +209,7 @@ public class Main {
System.out.println(ncdfe);
System.out.println(" cause: " + ncdfe.getCause());
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
}
diff --git a/test/023-many-interfaces/src/ManyInterfaces.java b/test/023-many-interfaces/src/ManyInterfaces.java
index d69a490f67..8ec4566802 100644
--- a/test/023-many-interfaces/src/ManyInterfaces.java
+++ b/test/023-many-interfaces/src/ManyInterfaces.java
@@ -355,7 +355,7 @@ public class ManyInterfaces
static void testInstance001(Object obj, int count) {
if (!(obj instanceof Interface001))
- System.err.println("BAD");
+ System.out.println("BAD");
while (count-- != 0) {
boolean is;
is = obj instanceof Interface001;
@@ -379,7 +379,7 @@ public class ManyInterfaces
static void testInstance049(Object obj, int count) {
if (!(obj instanceof Interface049))
- System.err.println("BAD");
+ System.out.println("BAD");
while (count-- != 0) {
boolean is;
is = obj instanceof Interface049;
@@ -403,7 +403,7 @@ public class ManyInterfaces
static void testInstance099(Object obj, int count) {
if (!(obj instanceof Interface099))
- System.err.println("BAD");
+ System.out.println("BAD");
while (count-- != 0) {
boolean is;
is = obj instanceof Interface099;
diff --git a/test/024-illegal-access/src/Main.java b/test/024-illegal-access/src/Main.java
index 84c7114cb4..de9ad5b694 100644
--- a/test/024-illegal-access/src/Main.java
+++ b/test/024-illegal-access/src/Main.java
@@ -18,7 +18,7 @@ public class Main {
static public void main(String[] args) {
try {
PublicAccess.accessStaticField();
- System.err.println("ERROR: call 1 not expected to succeed");
+ System.out.println("ERROR: call 1 not expected to succeed");
} catch (VerifyError ve) {
// dalvik
System.out.println("Got expected failure 1");
@@ -29,7 +29,7 @@ public class Main {
try {
PublicAccess.accessStaticMethod();
- System.err.println("ERROR: call 2 not expected to succeed");
+ System.out.println("ERROR: call 2 not expected to succeed");
} catch (IllegalAccessError iae) {
// reference
System.out.println("Got expected failure 2");
@@ -37,7 +37,7 @@ public class Main {
try {
PublicAccess.accessInstanceField();
- System.err.println("ERROR: call 3 not expected to succeed");
+ System.out.println("ERROR: call 3 not expected to succeed");
} catch (VerifyError ve) {
// dalvik
System.out.println("Got expected failure 3");
@@ -48,7 +48,7 @@ public class Main {
try {
PublicAccess.accessInstanceMethod();
- System.err.println("ERROR: call 4 not expected to succeed");
+ System.out.println("ERROR: call 4 not expected to succeed");
} catch (IllegalAccessError iae) {
// reference
System.out.println("Got expected failure 4");
@@ -56,7 +56,7 @@ public class Main {
try {
CheckInstanceof.main(new Object());
- System.err.println("ERROR: call 5 not expected to succeed");
+ System.out.println("ERROR: call 5 not expected to succeed");
} catch (VerifyError ve) {
// dalvik
System.out.println("Got expected failure 5");
diff --git a/test/031-class-attributes/src/ClassAttrs.java b/test/031-class-attributes/src/ClassAttrs.java
index 39e69a3066..8489a2c222 100644
--- a/test/031-class-attributes/src/ClassAttrs.java
+++ b/test/031-class-attributes/src/ClassAttrs.java
@@ -133,12 +133,12 @@ public class ClassAttrs {
System.out.println("field signature: "
+ getSignatureAttribute(field));
} catch (NoSuchMethodException nsme) {
- System.err.println("FAILED: " + nsme);
+ System.out.println("FAILED: " + nsme);
} catch (NoSuchFieldException nsfe) {
- System.err.println("FAILED: " + nsfe);
+ System.out.println("FAILED: " + nsfe);
} catch (RuntimeException re) {
- System.err.println("FAILED: " + re);
- re.printStackTrace();
+ System.out.println("FAILED: " + re);
+ re.printStackTrace(System.out);
}
test_isAssignableFrom();
@@ -228,7 +228,7 @@ public class ClassAttrs {
method = c.getDeclaredMethod("getSignatureAttribute");
method.setAccessible(true);
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
return "<unknown>";
}
diff --git a/test/032-concrete-sub/src/ConcreteSub.java b/test/032-concrete-sub/src/ConcreteSub.java
index 95adf63c37..61d1602492 100644
--- a/test/032-concrete-sub/src/ConcreteSub.java
+++ b/test/032-concrete-sub/src/ConcreteSub.java
@@ -45,7 +45,7 @@ public class ConcreteSub extends AbstractBase {
try {
meth = absClass.getMethod("redefineMe");
} catch (NoSuchMethodException nsme) {
- nsme.printStackTrace();
+ nsme.printStackTrace(System.out);
return;
}
System.out.println("meth modifiers=" + meth.getModifiers());
diff --git a/test/032-concrete-sub/src/Main.java b/test/032-concrete-sub/src/Main.java
index 4a5193d635..7d3be1525d 100644
--- a/test/032-concrete-sub/src/Main.java
+++ b/test/032-concrete-sub/src/Main.java
@@ -26,7 +26,7 @@ public class Main {
ConcreteSub2 blah = new ConcreteSub2();
// other VMs fail here (AbstractMethodError)
blah.doStuff();
- System.err.println("Succeeded unexpectedly");
+ System.out.println("Succeeded unexpectedly");
} catch (VerifyError ve) {
System.out.println("Got expected failure");
} catch (AbstractMethodError ame) {
diff --git a/test/036-finalizer/src/Main.java b/test/036-finalizer/src/Main.java
index 0de56f9a7c..734830f7de 100644
--- a/test/036-finalizer/src/Main.java
+++ b/test/036-finalizer/src/Main.java
@@ -120,7 +120,7 @@ public class Main {
static void printNonFinalized() {
for (int i = 0; i < maxCount; ++i) {
if (!FinalizeCounter.finalized[i]) {
- System.err.println("Element " + i + " was not finalized");
+ System.out.println("Element " + i + " was not finalized");
}
}
}
diff --git a/test/042-new-instance/src/Main.java b/test/042-new-instance/src/Main.java
index 755d62ebb5..34d1f5a68d 100644
--- a/test/042-new-instance/src/Main.java
+++ b/test/042-new-instance/src/Main.java
@@ -37,31 +37,31 @@ public class Main {
Object obj = c.newInstance();
System.out.println("LocalClass succeeded");
} catch (Exception ex) {
- System.err.println("LocalClass failed");
- ex.printStackTrace();
+ System.out.println("LocalClass failed");
+ ex.printStackTrace(System.out);
}
// should fail
try {
Class<?> c = Class.forName("otherpackage.PackageAccess");
Object obj = c.newInstance();
- System.err.println("ERROR: PackageAccess succeeded unexpectedly");
+ System.out.println("ERROR: PackageAccess succeeded unexpectedly");
} catch (IllegalAccessException iae) {
System.out.println("Got expected PackageAccess complaint");
} catch (Exception ex) {
- System.err.println("Got unexpected PackageAccess failure");
- ex.printStackTrace();
+ System.out.println("Got unexpected PackageAccess failure");
+ ex.printStackTrace(System.out);
}
LocalClass3.main();
try {
MaybeAbstract ma = new MaybeAbstract();
- System.err.println("ERROR: MaybeAbstract succeeded unexpectedly");
+ System.out.println("ERROR: MaybeAbstract succeeded unexpectedly");
} catch (InstantiationError ie) {
System.out.println("Got expected InstantationError");
} catch (Exception ex) {
- System.err.println("Got unexpected MaybeAbstract failure");
+ System.out.println("Got unexpected MaybeAbstract failure");
}
}
@@ -73,12 +73,12 @@ public class Main {
try {
Class<?> c = Class.forName("LocalClass");
Constructor<?> cons = c.getConstructor();
- System.err.println("Cons LocalClass succeeded unexpectedly");
+ System.out.println("Cons LocalClass succeeded unexpectedly");
} catch (NoSuchMethodException nsme) {
System.out.println("Cons LocalClass failed as expected");
} catch (Exception ex) {
- System.err.println("Cons LocalClass failed strangely");
- ex.printStackTrace();
+ System.out.println("Cons LocalClass failed strangely");
+ ex.printStackTrace(System.out);
}
// should succeed
@@ -88,8 +88,8 @@ public class Main {
Object obj = cons.newInstance();
System.out.println("Cons LocalClass2 succeeded");
} catch (Exception ex) {
- System.err.println("Cons LocalClass2 failed");
- ex.printStackTrace();
+ System.out.println("Cons LocalClass2 failed");
+ ex.printStackTrace(System.out);
}
// should succeed
@@ -99,8 +99,8 @@ public class Main {
Object obj = cons.newInstance(new Main());
System.out.println("Cons InnerClass succeeded");
} catch (Exception ex) {
- System.err.println("Cons InnerClass failed");
- ex.printStackTrace();
+ System.out.println("Cons InnerClass failed");
+ ex.printStackTrace(System.out);
}
// should succeed
@@ -110,21 +110,21 @@ public class Main {
Object obj = cons.newInstance();
System.out.println("Cons StaticInnerClass succeeded");
} catch (Exception ex) {
- System.err.println("Cons StaticInnerClass failed");
- ex.printStackTrace();
+ System.out.println("Cons StaticInnerClass failed");
+ ex.printStackTrace(System.out);
}
// should fail
try {
Class<?> c = Class.forName("otherpackage.PackageAccess");
Constructor<?> cons = c.getConstructor();
- System.err.println("ERROR: Cons PackageAccess succeeded unexpectedly");
+ System.out.println("ERROR: Cons PackageAccess succeeded unexpectedly");
} catch (NoSuchMethodException nsme) {
// constructor isn't public
System.out.println("Cons got expected PackageAccess complaint");
} catch (Exception ex) {
- System.err.println("Cons got unexpected PackageAccess failure");
- ex.printStackTrace();
+ System.out.println("Cons got unexpected PackageAccess failure");
+ ex.printStackTrace(System.out);
}
// should fail
@@ -132,13 +132,13 @@ public class Main {
Class<?> c = Class.forName("MaybeAbstract");
Constructor<?> cons = c.getConstructor();
Object obj = cons.newInstance();
- System.err.println("ERROR: Cons MaybeAbstract succeeded unexpectedly");
+ System.out.println("ERROR: Cons MaybeAbstract succeeded unexpectedly");
} catch (InstantiationException ie) {
// note InstantiationException vs. InstantiationError
System.out.println("Cons got expected InstantationException");
} catch (Exception ex) {
- System.err.println("Cons got unexpected MaybeAbstract failure");
- ex.printStackTrace();
+ System.out.println("Cons got unexpected MaybeAbstract failure");
+ ex.printStackTrace(System.out);
}
// should fail
@@ -147,13 +147,13 @@ public class Main {
Constructor<?> cons = c.getConstructor();
if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
Object obj = cons.newInstance();
- System.err.println("ERROR: Cons PackageAccess2 succeeded unexpectedly");
+ System.out.println("ERROR: Cons PackageAccess2 succeeded unexpectedly");
} catch (IllegalAccessException iae) {
// constructor is public, but class has package scope
System.out.println("Cons got expected PackageAccess2 complaint");
} catch (Exception ex) {
- System.err.println("Cons got unexpected PackageAccess2 failure");
- ex.printStackTrace();
+ System.out.println("Cons got unexpected PackageAccess2 failure");
+ ex.printStackTrace(System.out);
}
// should succeed
@@ -161,8 +161,8 @@ public class Main {
otherpackage.ConstructorAccess.newConstructorInstance();
System.out.println("Cons ConstructorAccess succeeded");
} catch (Exception ex) {
- System.err.println("Cons ConstructorAccess failed");
- ex.printStackTrace();
+ System.out.println("Cons ConstructorAccess failed");
+ ex.printStackTrace(System.out);
}
}
@@ -187,8 +187,8 @@ class LocalClass3 {
CC.newInstance();
System.out.println("LocalClass3 succeeded");
} catch (Exception ex) {
- System.err.println("Got unexpected LocalClass3 failure");
- ex.printStackTrace();
+ System.out.println("Got unexpected LocalClass3 failure");
+ ex.printStackTrace(System.out);
}
}
@@ -200,7 +200,7 @@ class LocalClass3 {
Class<?> c = CC.class;
return c.newInstance();
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
return null;
}
}
diff --git a/test/044-proxy/src/BasicTest.java b/test/044-proxy/src/BasicTest.java
index 5f04b9344c..7f301f667b 100644
--- a/test/044-proxy/src/BasicTest.java
+++ b/test/044-proxy/src/BasicTest.java
@@ -34,9 +34,9 @@ public class BasicTest {
Object proxy = createProxy(proxyMe);
if (!Proxy.isProxyClass(proxy.getClass()))
- System.err.println("not a proxy class?");
+ System.out.println("not a proxy class?");
if (Proxy.getInvocationHandler(proxy) == null)
- System.err.println("ERROR: Proxy.getInvocationHandler is null");
+ System.out.println("ERROR: Proxy.getInvocationHandler is null");
/* take it for a spin; verifies instanceof constraint */
Shapes shapes = (Shapes) proxy;
@@ -110,13 +110,13 @@ public class BasicTest {
//System.out.println("Constructor is " + cons);
proxy = cons.newInstance(handler);
} catch (NoSuchMethodException nsme) {
- System.err.println("failed: " + nsme);
+ System.out.println("failed: " + nsme);
} catch (InstantiationException ie) {
- System.err.println("failed: " + ie);
+ System.out.println("failed: " + ie);
} catch (IllegalAccessException ie) {
- System.err.println("failed: " + ie);
+ System.out.println("failed: " + ie);
} catch (InvocationTargetException ite) {
- System.err.println("failed: " + ite);
+ System.out.println("failed: " + ite);
}
return proxy;
diff --git a/test/044-proxy/src/Clash.java b/test/044-proxy/src/Clash.java
index d000112fb6..7dabe927b0 100644
--- a/test/044-proxy/src/Clash.java
+++ b/test/044-proxy/src/Clash.java
@@ -32,7 +32,7 @@ public class Clash {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
new Class<?>[] { Interface1A.class, Interface1A.class },
handler);
- System.err.println("Dupe did not throw expected exception");
+ System.out.println("Dupe did not throw expected exception");
} catch (IllegalArgumentException iae) {
System.out.println("Dupe threw expected exception");
}
@@ -41,7 +41,7 @@ public class Clash {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
new Class<?>[] { Interface1A.class, Interface1B.class },
handler);
- System.err.println("Clash did not throw expected exception");
+ System.out.println("Clash did not throw expected exception");
} catch (IllegalArgumentException iae) {
System.out.println("Clash threw expected exception");
}
diff --git a/test/044-proxy/src/Clash2.java b/test/044-proxy/src/Clash2.java
index e405cfea03..51221f2656 100644
--- a/test/044-proxy/src/Clash2.java
+++ b/test/044-proxy/src/Clash2.java
@@ -31,7 +31,7 @@ public class Clash2 {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
new Class<?>[] { Interface2A.class, Interface2B.class },
handler);
- System.err.println("Clash2 did not throw expected exception");
+ System.out.println("Clash2 did not throw expected exception");
} catch (IllegalArgumentException iae) {
System.out.println("Clash2 threw expected exception");
}
diff --git a/test/044-proxy/src/Clash3.java b/test/044-proxy/src/Clash3.java
index 44806cee7d..9d23059b9c 100644
--- a/test/044-proxy/src/Clash3.java
+++ b/test/044-proxy/src/Clash3.java
@@ -35,7 +35,7 @@ public class Clash3 {
Interface3aa.class,
Interface3b.class },
handler);
- System.err.println("Clash3 did not throw expected exception");
+ System.out.println("Clash3 did not throw expected exception");
} catch (IllegalArgumentException iae) {
System.out.println("Clash3 threw expected exception");
}
diff --git a/test/044-proxy/src/Clash4.java b/test/044-proxy/src/Clash4.java
index ca5c3ab6e6..45d48208e1 100644
--- a/test/044-proxy/src/Clash4.java
+++ b/test/044-proxy/src/Clash4.java
@@ -36,7 +36,7 @@ public class Clash4 {
Interface4b.class,
Interface4bb.class },
handler);
- System.err.println("Clash4 did not throw expected exception");
+ System.out.println("Clash4 did not throw expected exception");
} catch (IllegalArgumentException iae) {
System.out.println("Clash4 threw expected exception");
//System.out.println(iae);
diff --git a/test/044-proxy/src/ConstructorProxy.java b/test/044-proxy/src/ConstructorProxy.java
index 95d150cbbd..dfafbd804a 100644
--- a/test/044-proxy/src/ConstructorProxy.java
+++ b/test/044-proxy/src/ConstructorProxy.java
@@ -28,7 +28,7 @@ class ConstructorProxy implements InvocationHandler {
new ConstructorProxy().runTest();
} catch (Exception e) {
System.out.println("Unexpected failure occured");
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
diff --git a/test/044-proxy/src/WrappedThrow.java b/test/044-proxy/src/WrappedThrow.java
index 643ba053b9..afea26d5d5 100644
--- a/test/044-proxy/src/WrappedThrow.java
+++ b/test/044-proxy/src/WrappedThrow.java
@@ -43,29 +43,29 @@ public class WrappedThrow {
InterfaceW2 if2 = (InterfaceW2) proxy;
try {
if1.throwFunky();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (UndeclaredThrowableException ute) {
System.out.println("Got expected UTE");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
try {
if1.throwFunky2();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (IOException ioe) {
System.out.println("Got expected IOE");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
try {
if2.throwFunky2();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (IOException ioe) {
System.out.println("Got expected IOE");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
/*
@@ -73,38 +73,38 @@ public class WrappedThrow {
*/
try {
if1.throwException();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (UndeclaredThrowableException ute) {
System.out.println("Got expected UTE");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
try {
if1.throwBase();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (UndeclaredThrowableException ute) {
System.out.println("Got expected UTE");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
try {
if2.throwSub();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (SubException se) {
System.out.println("Got expected exception");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
try {
if2.throwSubSub();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (SubException se) {
System.out.println("Got expected exception");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
/*
@@ -113,11 +113,11 @@ public class WrappedThrow {
*/
try {
if1.bothThrowBase();
- System.err.println("No exception thrown");
+ System.out.println("No exception thrown");
} catch (BaseException se) {
System.out.println("Got expected exception");
} catch (Throwable t) {
- System.err.println("Got unexpected exception: " + t);
+ System.out.println("Got unexpected exception: " + t);
}
}
}
diff --git a/test/045-reflect-array/src/Main.java b/test/045-reflect-array/src/Main.java
index 7418eed824..4c321b3303 100644
--- a/test/045-reflect-array/src/Main.java
+++ b/test/045-reflect-array/src/Main.java
@@ -102,7 +102,7 @@ public class Main {
throw new RuntimeException("load should have worked");
}
} catch (IllegalArgumentException iae) {
- iae.printStackTrace();
+ iae.printStackTrace(System.out);
}
try {
Array.getByte(charArray, 2);
@@ -116,7 +116,7 @@ public class Main {
+ Array.getInt(charArray, 3));
}
} catch (IllegalArgumentException iae) {
- iae.printStackTrace();
+ iae.printStackTrace(System.out);
}
System.out.println("ReflectArrayTest.testSingleChar passed");
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 10dad8ddac..b8a48ea247 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -89,7 +89,7 @@ public class Main {
try {
meth = target.getMethod("packageMethod");
- System.err.println("succeeded on package-scope method");
+ System.out.println("succeeded on package-scope method");
} catch (NoSuchMethodException nsme) {
// good
}
@@ -101,7 +101,7 @@ public class Main {
try {
if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
meth.invoke(instance);
- System.err.println("inner-method invoke unexpectedly worked");
+ System.out.println("inner-method invoke unexpectedly worked");
} catch (IllegalAccessException iae) {
// good
}
@@ -110,13 +110,13 @@ public class Main {
try {
int x = field.getInt(instance);
if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
- System.err.println("field get unexpectedly worked: " + x);
+ System.out.println("field get unexpectedly worked: " + x);
} catch (IllegalAccessException iae) {
// good
}
} catch (Exception ex) {
System.out.println("----- unexpected exception -----");
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
}
@@ -171,7 +171,7 @@ public class Main {
}
catch (Exception ex) {
System.out.println("GLITCH: invoke got wrong exception:");
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
System.out.println("");
@@ -400,7 +400,7 @@ public class Main {
} catch (Exception ex) {
System.out.println("----- unexpected exception -----");
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
System.out.println("ReflectTest done!");
@@ -414,7 +414,7 @@ public class Main {
m = Collections.class.getDeclaredMethod("swap",
Object[].class, int.class, int.class);
} catch (NoSuchMethodException nsme) {
- nsme.printStackTrace();
+ nsme.printStackTrace(System.out);
return;
}
System.out.println(m + " accessible=" + m.isAccessible());
@@ -423,10 +423,10 @@ public class Main {
try {
m.invoke(null, objects, 0, 1);
} catch (IllegalAccessException iae) {
- iae.printStackTrace();
+ iae.printStackTrace(System.out);
return;
} catch (InvocationTargetException ite) {
- ite.printStackTrace();
+ ite.printStackTrace(System.out);
return;
}
@@ -434,10 +434,10 @@ public class Main {
String s = "Should be ignored";
m.invoke(s, objects, 0, 1);
} catch (IllegalAccessException iae) {
- iae.printStackTrace();
+ iae.printStackTrace(System.out);
return;
} catch (InvocationTargetException ite) {
- ite.printStackTrace();
+ ite.printStackTrace(System.out);
return;
}
@@ -449,7 +449,7 @@ public class Main {
} catch (InvocationTargetException ite) {
System.out.println("checkType got expected exception");
} catch (IllegalAccessException iae) {
- iae.printStackTrace();
+ iae.printStackTrace(System.out);
return;
}
}
@@ -826,7 +826,7 @@ class FieldNoisyInit {
static {
System.out.println("FieldNoisyInit is initializing");
//Throwable th = new Throwable();
- //th.printStackTrace();
+ //th.printStackTrace(System.out);
}
}
@@ -842,7 +842,7 @@ class MethodNoisyInit {
static {
System.out.println("MethodNoisyInit is initializing");
//Throwable th = new Throwable();
- //th.printStackTrace();
+ //th.printStackTrace(System.out);
}
}
diff --git a/test/048-reflect-v8/src/DefaultDeclared.java b/test/048-reflect-v8/src/DefaultDeclared.java
index 16e8a24c00..d49bdc91a5 100644
--- a/test/048-reflect-v8/src/DefaultDeclared.java
+++ b/test/048-reflect-v8/src/DefaultDeclared.java
@@ -52,7 +52,7 @@ public class DefaultDeclared {
System.out.println("NoSuchMethodException thrown for class " + klass.toString());
} catch (Throwable t) {
System.out.println("Unknown error thrown for class " + klass.toString());
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
diff --git a/test/050-sync-test/src/Main.java b/test/050-sync-test/src/Main.java
index 5364e2aaaa..734b51e811 100644
--- a/test/050-sync-test/src/Main.java
+++ b/test/050-sync-test/src/Main.java
@@ -39,7 +39,7 @@ public class Main {
Thread.sleep(1000);
} catch (InterruptedException ie) {
System.out.println("INTERRUPT!");
- ie.printStackTrace();
+ ie.printStackTrace(System.out);
}
System.out.println("GONE");
}
@@ -56,7 +56,7 @@ public class Main {
one.wait();
} catch (InterruptedException ie) {
System.out.println("INTERRUPT!");
- ie.printStackTrace();
+ ie.printStackTrace(System.out);
}
}
@@ -69,7 +69,7 @@ public class Main {
two.join();
} catch (InterruptedException ie) {
System.out.println("INTERRUPT!");
- ie.printStackTrace();
+ ie.printStackTrace(System.out);
}
System.out.println("main: all done");
}
@@ -167,7 +167,7 @@ class SleepyThread extends Thread {
" interrupted, flag=" + Thread.interrupted());
intr = true;
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
if (!intr)
diff --git a/test/050-sync-test/src/ThreadDeathHandler.java b/test/050-sync-test/src/ThreadDeathHandler.java
index 0a7437d307..58061f8d00 100644
--- a/test/050-sync-test/src/ThreadDeathHandler.java
+++ b/test/050-sync-test/src/ThreadDeathHandler.java
@@ -27,7 +27,7 @@ public class ThreadDeathHandler implements Thread.UncaughtExceptionHandler {
}
public void uncaughtException(Thread t, Throwable e) {
- System.err.println("Uncaught exception " + mMyMessage + "!");
- e.printStackTrace();
+ System.out.println("Uncaught exception " + mMyMessage + "!");
+ e.printStackTrace(System.out);
}
}
diff --git a/test/051-thread/src/Main.java b/test/051-thread/src/Main.java
index 08cb5deeac..fe1cafef56 100644
--- a/test/051-thread/src/Main.java
+++ b/test/051-thread/src/Main.java
@@ -79,7 +79,7 @@ public class Main {
try {
t.join();
} catch (InterruptedException ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
System.out.print("testThreadDaemons finished\n");
diff --git a/test/053-wait-some/src/Main.java b/test/053-wait-some/src/Main.java
index 377a5781ea..b8e6dfeeae 100644
--- a/test/053-wait-some/src/Main.java
+++ b/test/053-wait-some/src/Main.java
@@ -39,7 +39,7 @@ public class Main {
} catch (IllegalArgumentException iae) {
System.out.println("Caught expected exception on neg arg");
} catch (InterruptedException ie) {
- ie.printStackTrace();
+ ie.printStackTrace(System.out);
}
for (long delay : DELAYS) {
@@ -49,7 +49,7 @@ public class Main {
try {
sleepy.wait(delay);
} catch (InterruptedException ie) {
- ie.printStackTrace();
+ ie.printStackTrace(System.out);
}
end = System.currentTimeMillis();
diff --git a/test/054-uncaught/src/Main.java b/test/054-uncaught/src/Main.java
index 688a2a4d79..43de7ae258 100644
--- a/test/054-uncaught/src/Main.java
+++ b/test/054-uncaught/src/Main.java
@@ -33,7 +33,7 @@ public class Main {
try {
t.join();
} catch (InterruptedException ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
}
@@ -41,7 +41,7 @@ public class Main {
ThreadDeathHandler defHandler = new ThreadDeathHandler("DEFAULT");
ThreadDeathHandler threadHandler = new ThreadDeathHandler("THREAD");
- System.err.println("Test " + which);
+ System.out.println("Test " + which);
switch (which) {
case 1: {
Thread.setDefaultUncaughtExceptionHandler(defHandler);
diff --git a/test/054-uncaught/src/ThreadDeathHandler.java b/test/054-uncaught/src/ThreadDeathHandler.java
index 0a7437d307..58061f8d00 100644
--- a/test/054-uncaught/src/ThreadDeathHandler.java
+++ b/test/054-uncaught/src/ThreadDeathHandler.java
@@ -27,7 +27,7 @@ public class ThreadDeathHandler implements Thread.UncaughtExceptionHandler {
}
public void uncaughtException(Thread t, Throwable e) {
- System.err.println("Uncaught exception " + mMyMessage + "!");
- e.printStackTrace();
+ System.out.println("Uncaught exception " + mMyMessage + "!");
+ e.printStackTrace(System.out);
}
}
diff --git a/test/059-finalizer-throw/src/Main.java b/test/059-finalizer-throw/src/Main.java
index fa80fe3f7a..3bfbc2d5b6 100644
--- a/test/059-finalizer-throw/src/Main.java
+++ b/test/059-finalizer-throw/src/Main.java
@@ -46,7 +46,7 @@ public class Main {
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
- System.err.println(ie);
+ System.out.println(ie);
}
}
@@ -54,7 +54,7 @@ public class Main {
try {
Thread.sleep(750);
} catch (InterruptedException ie) {
- System.err.println(ie);
+ System.out.println(ie);
}
System.out.println("done");
diff --git a/test/064-field-access/src/Main.java b/test/064-field-access/src/Main.java
index 50ad5b9a20..b08f3ae305 100644
--- a/test/064-field-access/src/Main.java
+++ b/test/064-field-access/src/Main.java
@@ -28,7 +28,7 @@ public class Main {
try {
GetNonexistent.main(null);
- System.err.println("Not expected to succeed");
+ System.out.println("Not expected to succeed");
} catch (VerifyError fe) {
// dalvik
System.out.println("Got expected failure");
@@ -101,22 +101,22 @@ public class Main {
/* success; expected? */
if (expectedException != null) {
- System.err.println("ERROR: call succeeded for field " + field +
+ System.out.println("ERROR: call succeeded for field " + field +
" with a read of type '" + type +
"', was expecting " + expectedException);
Thread.dumpStack();
}
} catch (Exception ex) {
if (expectedException == null) {
- System.err.println("ERROR: call failed unexpectedly: "
+ System.out.println("ERROR: call failed unexpectedly: "
+ ex.getClass());
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
} else {
if (!expectedException.equals(ex.getClass())) {
- System.err.println("ERROR: incorrect exception: wanted "
+ System.out.println("ERROR: incorrect exception: wanted "
+ expectedException.getName() + ", got "
+ ex.getClass());
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
}
}
@@ -675,22 +675,22 @@ class SubClass extends PublicClass {
/* success; expected? */
if (expectedException != null) {
- System.err.println("ERROR: call succeeded for field " + field +
+ System.out.println("ERROR: call succeeded for field " + field +
" with a read of type '" + type +
"', was expecting " + expectedException);
Thread.dumpStack();
}
} catch (Exception ex) {
if (expectedException == null) {
- System.err.println("ERROR: call failed unexpectedly: "
+ System.out.println("ERROR: call failed unexpectedly: "
+ ex.getClass());
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
} else {
if (!expectedException.equals(ex.getClass())) {
- System.err.println("ERROR: incorrect exception: wanted "
+ System.out.println("ERROR: incorrect exception: wanted "
+ expectedException.getName() + ", got "
+ ex.getClass());
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
}
}
@@ -704,19 +704,19 @@ class SubClass extends PublicClass {
result = method.invoke(obj);
/* success; expected? */
if (expectedException != null) {
- System.err.println("ERROR: call succeeded for method " + method + "', was expecting " +
+ System.out.println("ERROR: call succeeded for method " + method + "', was expecting " +
expectedException);
Thread.dumpStack();
}
} catch (Exception ex) {
if (expectedException == null) {
- System.err.println("ERROR: call failed unexpectedly: " + ex.getClass());
- ex.printStackTrace();
+ System.out.println("ERROR: call failed unexpectedly: " + ex.getClass());
+ ex.printStackTrace(System.out);
} else {
if (!expectedException.equals(ex.getClass())) {
- System.err.println("ERROR: incorrect exception: wanted " + expectedException.getName() +
+ System.out.println("ERROR: incorrect exception: wanted " + expectedException.getName() +
", got " + ex.getClass());
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
}
}
diff --git a/test/065-mismatched-implements/src/Main.java b/test/065-mismatched-implements/src/Main.java
index 5975b99e92..55d0babbef 100644
--- a/test/065-mismatched-implements/src/Main.java
+++ b/test/065-mismatched-implements/src/Main.java
@@ -21,7 +21,7 @@ public class Main {
public static void main(String[] args) {
try {
Indirect.main();
- System.err.println("Succeeded unexpectedly");
+ System.out.println("Succeeded unexpectedly");
} catch (IncompatibleClassChangeError icce) {
System.out.println("Got expected ICCE");
}
diff --git a/test/066-mismatched-super/src/Main.java b/test/066-mismatched-super/src/Main.java
index 5975b99e92..55d0babbef 100644
--- a/test/066-mismatched-super/src/Main.java
+++ b/test/066-mismatched-super/src/Main.java
@@ -21,7 +21,7 @@ public class Main {
public static void main(String[] args) {
try {
Indirect.main();
- System.err.println("Succeeded unexpectedly");
+ System.out.println("Succeeded unexpectedly");
} catch (IncompatibleClassChangeError icce) {
System.out.println("Got expected ICCE");
}
diff --git a/test/068-classloader/src/Main.java b/test/068-classloader/src/Main.java
index 01539b7172..0aaa1528c0 100644
--- a/test/068-classloader/src/Main.java
+++ b/test/068-classloader/src/Main.java
@@ -129,7 +129,7 @@ public class Main {
throw new RuntimeException("target 2 has unexpected value " + value);
}
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
}
@@ -153,8 +153,8 @@ public class Main {
try {
altClass = loader.loadClass("Inaccessible1");
} catch (ClassNotFoundException cnfe) {
- System.err.println("loadClass failed");
- cnfe.printStackTrace();
+ System.out.println("loadClass failed");
+ cnfe.printStackTrace(System.out);
return;
}
@@ -162,9 +162,9 @@ public class Main {
Object obj;
try {
obj = altClass.newInstance();
- System.err.println("ERROR: Inaccessible1 was accessible");
+ System.out.println("ERROR: Inaccessible1 was accessible");
} catch (InstantiationException ie) {
- System.err.println("newInstance failed: " + ie);
+ System.out.println("newInstance failed: " + ie);
return;
} catch (IllegalAccessException iae) {
System.out.println("Got expected access exception #1");
@@ -182,14 +182,14 @@ public class Main {
try {
altClass = loader.loadClass("Inaccessible2");
- System.err.println("ERROR: Inaccessible2 was accessible: " + altClass);
+ System.out.println("ERROR: Inaccessible2 was accessible: " + altClass);
} catch (ClassNotFoundException cnfe) {
Throwable cause = cnfe.getCause();
if (cause instanceof IllegalAccessError) {
System.out.println("Got expected CNFE/IAE #2");
} else {
- System.err.println("Got unexpected CNFE/IAE #2");
- cnfe.printStackTrace();
+ System.out.println("Got unexpected CNFE/IAE #2");
+ cnfe.printStackTrace(System.out);
}
}
}
@@ -202,14 +202,14 @@ public class Main {
try {
altClass = loader.loadClass("Inaccessible3");
- System.err.println("ERROR: Inaccessible3 was accessible: " + altClass);
+ System.out.println("ERROR: Inaccessible3 was accessible: " + altClass);
} catch (ClassNotFoundException cnfe) {
Throwable cause = cnfe.getCause();
if (cause instanceof IllegalAccessError) {
System.out.println("Got expected CNFE/IAE #3");
} else {
- System.err.println("Got unexpected CNFE/IAE #3");
- cnfe.printStackTrace();
+ System.out.println("Got unexpected CNFE/IAE #3");
+ cnfe.printStackTrace(System.out);
}
}
}
@@ -227,7 +227,7 @@ public class Main {
//System.out.println("+++ DoubledExtend is " + doubledExtendClass
// + " in " + doubledExtendClass.getClassLoader());
} catch (ClassNotFoundException cnfe) {
- System.err.println("loadClass failed: " + cnfe);
+ System.out.println("loadClass failed: " + cnfe);
return;
}
@@ -235,10 +235,10 @@ public class Main {
try {
obj = doubledExtendClass.newInstance();
} catch (InstantiationException ie) {
- System.err.println("newInstance failed: " + ie);
+ System.out.println("newInstance failed: " + ie);
return;
} catch (IllegalAccessException iae) {
- System.err.println("newInstance failed: " + iae);
+ System.out.println("newInstance failed: " + iae);
return;
} catch (LinkageError le) {
System.out.println("Got expected LinkageError on DE");
@@ -254,8 +254,8 @@ public class Main {
String result;
result = Base.doStuff(de);
- System.err.println("ERROR: did not get LinkageError on DE");
- System.err.println("(result=" + result + ")");
+ System.out.println("ERROR: did not get LinkageError on DE");
+ System.out.println("(result=" + result + ")");
} catch (LinkageError le) {
System.out.println("Got expected LinkageError on DE");
return;
@@ -274,7 +274,7 @@ public class Main {
try {
doubledExtendOkayClass = loader.loadClass("DoubledExtendOkay");
} catch (ClassNotFoundException cnfe) {
- System.err.println("loadClass failed: " + cnfe);
+ System.out.println("loadClass failed: " + cnfe);
return;
}
@@ -282,14 +282,14 @@ public class Main {
try {
obj = doubledExtendOkayClass.newInstance();
} catch (InstantiationException ie) {
- System.err.println("newInstance failed: " + ie);
+ System.out.println("newInstance failed: " + ie);
return;
} catch (IllegalAccessException iae) {
- System.err.println("newInstance failed: " + iae);
+ System.out.println("newInstance failed: " + iae);
return;
} catch (LinkageError le) {
- System.err.println("Got unexpected LinkageError on DEO");
- le.printStackTrace();
+ System.out.println("Got unexpected LinkageError on DEO");
+ le.printStackTrace(System.out);
return;
}
@@ -304,8 +304,8 @@ public class Main {
result = BaseOkay.doStuff(de);
System.out.println("Got DEO result " + result);
} catch (LinkageError le) {
- System.err.println("Got unexpected LinkageError on DEO");
- le.printStackTrace();
+ System.out.println("Got unexpected LinkageError on DEO");
+ le.printStackTrace(System.out);
return;
}
}
@@ -322,7 +322,7 @@ public class Main {
try {
getDoubledClass = loader.loadClass("GetDoubled");
} catch (ClassNotFoundException cnfe) {
- System.err.println("loadClass failed: " + cnfe);
+ System.out.println("loadClass failed: " + cnfe);
return;
}
@@ -330,10 +330,10 @@ public class Main {
try {
obj = getDoubledClass.newInstance();
} catch (InstantiationException ie) {
- System.err.println("newInstance failed: " + ie);
+ System.out.println("newInstance failed: " + ie);
return;
} catch (IllegalAccessException iae) {
- System.err.println("newInstance failed: " + iae);
+ System.out.println("newInstance failed: " + iae);
return;
} catch (LinkageError le) {
// Dalvik bails here
@@ -354,7 +354,7 @@ public class Main {
System.out.println("Got LinkageError on GD");
return;
}
- System.err.println("Should have failed by now on GetDoubled");
+ System.out.println("Should have failed by now on GetDoubled");
}
/**
@@ -368,7 +368,7 @@ public class Main {
try {
abstractGetClass = loader.loadClass("AbstractGet");
} catch (ClassNotFoundException cnfe) {
- System.err.println("loadClass ta failed: " + cnfe);
+ System.out.println("loadClass ta failed: " + cnfe);
return;
}
@@ -376,10 +376,10 @@ public class Main {
try {
obj = abstractGetClass.newInstance();
} catch (InstantiationException ie) {
- System.err.println("newInstance failed: " + ie);
+ System.out.println("newInstance failed: " + ie);
return;
} catch (IllegalAccessException iae) {
- System.err.println("newInstance failed: " + iae);
+ System.out.println("newInstance failed: " + iae);
return;
} catch (LinkageError le) {
System.out.println("Got LinkageError on TA");
@@ -399,7 +399,7 @@ public class Main {
System.out.println("Got LinkageError on TA");
return;
}
- System.err.println("Should have failed by now in testAbstract");
+ System.out.println("Should have failed by now in testAbstract");
}
/**
@@ -415,7 +415,7 @@ public class Main {
try {
doubledImplementClass = loader.loadClass("DoubledImplement");
} catch (ClassNotFoundException cnfe) {
- System.err.println("loadClass failed: " + cnfe);
+ System.out.println("loadClass failed: " + cnfe);
return;
}
@@ -423,10 +423,10 @@ public class Main {
try {
obj = doubledImplementClass.newInstance();
} catch (InstantiationException ie) {
- System.err.println("newInstance failed: " + ie);
+ System.out.println("newInstance failed: " + ie);
return;
} catch (IllegalAccessException iae) {
- System.err.println("newInstance failed: " + iae);
+ System.out.println("newInstance failed: " + iae);
return;
} catch (LinkageError le) {
System.out.println("Got LinkageError on DI (early)");
@@ -447,7 +447,7 @@ public class Main {
try {
di.one();
if (!isOne) {
- System.err.println("ERROR: did not get LinkageError on DI");
+ System.out.println("ERROR: did not get LinkageError on DI");
}
} catch (LinkageError le) {
if (!isOne) {
@@ -476,7 +476,7 @@ public class Main {
ifaceImplClass = loader.loadClass("IfaceImpl");
ifaceImplClass = loader.loadClass("DoubledImplement2");
} catch (ClassNotFoundException cnfe) {
- System.err.println("loadClass failed: " + cnfe);
+ System.out.println("loadClass failed: " + cnfe);
return;
}
@@ -484,10 +484,10 @@ public class Main {
try {
obj = ifaceImplClass.newInstance();
} catch (InstantiationException ie) {
- System.err.println("newInstance failed: " + ie);
+ System.out.println("newInstance failed: " + ie);
return;
} catch (IllegalAccessException iae) {
- System.err.println("newInstance failed: " + iae);
+ System.out.println("newInstance failed: " + iae);
return;
} catch (LinkageError le) {
System.out.println("Got LinkageError on IDI (early)");
diff --git a/test/069-field-type/src/Main.java b/test/069-field-type/src/Main.java
index f9885e64b2..d9aa9e11bf 100644
--- a/test/069-field-type/src/Main.java
+++ b/test/069-field-type/src/Main.java
@@ -19,7 +19,7 @@ public class Main {
/* try to use the reference; should fail */
try {
holder.mValue.run();
- System.err.println("ERROR: did not get expected ICCE");
+ System.out.println("ERROR: did not get expected ICCE");
} catch (IncompatibleClassChangeError icce) {
System.out.println("Got expected IncompatibleClassChangeError");
}
diff --git a/test/070-nio-buffer/src/Main.java b/test/070-nio-buffer/src/Main.java
index a7433b8516..a3eeb3fda6 100644
--- a/test/070-nio-buffer/src/Main.java
+++ b/test/070-nio-buffer/src/Main.java
@@ -58,7 +58,7 @@ public class Main {
try {
shortBuf.put(myShorts, 0, 1); // should fail
- System.err.println("ERROR: out-of-bounds put succeeded\n");
+ System.out.println("ERROR: out-of-bounds put succeeded\n");
} catch (BufferOverflowException boe) {
System.out.println("Got expected buffer overflow exception");
}
@@ -66,7 +66,7 @@ public class Main {
try {
shortBuf.position(0);
shortBuf.put(myShorts, 0, 33); // should fail
- System.err.println("ERROR: out-of-bounds put succeeded\n");
+ System.out.println("ERROR: out-of-bounds put succeeded\n");
} catch (IndexOutOfBoundsException ioobe) {
System.out.println("Got expected out-of-bounds exception");
}
@@ -74,7 +74,7 @@ public class Main {
try {
shortBuf.position(16);
shortBuf.put(myShorts, 0, 17); // should fail
- System.err.println("ERROR: out-of-bounds put succeeded\n");
+ System.out.println("ERROR: out-of-bounds put succeeded\n");
} catch (BufferOverflowException boe) {
System.out.println("Got expected buffer overflow exception");
}
diff --git a/test/073-mismatched-field/src/Main.java b/test/073-mismatched-field/src/Main.java
index 70709c0c86..2d6b9eb51e 100644
--- a/test/073-mismatched-field/src/Main.java
+++ b/test/073-mismatched-field/src/Main.java
@@ -23,7 +23,7 @@ public class Main extends SuperMain implements IMain {
void doit() {
try {
System.out.println("value=" + this.f);
- System.err.println("Succeeded unexpectedly");
+ System.out.println("Succeeded unexpectedly");
} catch (IncompatibleClassChangeError icce) {
System.out.println("Got expected failure");
}
diff --git a/test/074-gc-thrash/src/Main.java b/test/074-gc-thrash/src/Main.java
index df0479365f..5165df7bd3 100644
--- a/test/074-gc-thrash/src/Main.java
+++ b/test/074-gc-thrash/src/Main.java
@@ -52,9 +52,9 @@ public class Main {
try {
dumpHprofDataMethod.invoke(null, dumpFile);
} catch (IllegalAccessException iae) {
- System.err.println(iae);
+ System.out.println(iae);
} catch (InvocationTargetException ite) {
- System.err.println(ite);
+ System.out.println(ite);
}
}
@@ -80,7 +80,7 @@ public class Main {
try {
meth = vmdClass.getMethod("dumpHprofData", String.class);
} catch (NoSuchMethodException nsme) {
- System.err.println("Found VMDebug but not dumpHprofData method");
+ System.out.println("Found VMDebug but not dumpHprofData method");
return null;
}
@@ -126,7 +126,7 @@ public class Main {
deep.join();
large.join();
} catch (InterruptedException ie) {
- System.err.println("join was interrupted");
+ System.out.println("join was interrupted");
}
}
@@ -137,7 +137,7 @@ public class Main {
try {
Thread.sleep(ms);
} catch (InterruptedException ie) {
- System.err.println("sleep was interrupted");
+ System.out.println("sleep was interrupted");
}
}
@@ -213,7 +213,7 @@ class Deep extends Thread {
}
if (!once) {
- System.err.println("not even once?");
+ System.out.println("not even once?");
return;
}
@@ -229,7 +229,7 @@ class Deep extends Thread {
for (int i = 0; i < MAX_DEPTH; i++) {
if (weak[i].get() != null) {
- System.err.println("Deep: weak still has " + i);
+ System.out.println("Deep: weak still has " + i);
}
}
@@ -251,7 +251,7 @@ class Deep extends Thread {
private static void checkStringReferences() {
for (int i = 0; i < MAX_DEPTH; i++) {
if (strong[i] != weak[i].get()) {
- System.err.println("Deep: " + i + " strong=" + strong[i] +
+ System.out.println("Deep: " + i + " strong=" + strong[i] +
", weak=" + weak[i].get());
}
}
diff --git a/test/075-verification-error/src/Main.java b/test/075-verification-error/src/Main.java
index 9b66a8d5e1..3f2881eb10 100644
--- a/test/075-verification-error/src/Main.java
+++ b/test/075-verification-error/src/Main.java
@@ -36,12 +36,12 @@ public class Main {
static void testClassNewInstance() {
try {
MaybeAbstract ma = new MaybeAbstract();
- System.err.println("ERROR: MaybeAbstract succeeded unexpectedly");
+ System.out.println("ERROR: MaybeAbstract succeeded unexpectedly");
} catch (InstantiationError ie) {
System.out.println("Got expected InstantationError");
if (VERBOSE) System.out.println("--- " + ie);
} catch (Exception ex) {
- System.err.println("Got unexpected MaybeAbstract failure");
+ System.out.println("Got unexpected MaybeAbstract failure");
}
}
@@ -88,7 +88,7 @@ public class Main {
try {
int x = mutant.inaccessibleField;
- System.err.println("ERROR: bad access succeeded (ifield)");
+ System.out.println("ERROR: bad access succeeded (ifield)");
} catch (IllegalAccessError iae) {
System.out.println("Got expected IllegalAccessError (ifield)");
if (VERBOSE) System.out.println("--- " + iae);
@@ -96,7 +96,7 @@ public class Main {
try {
int y = Mutant.inaccessibleStaticField;
- System.err.println("ERROR: bad access succeeded (sfield)");
+ System.out.println("ERROR: bad access succeeded (sfield)");
} catch (IllegalAccessError iae) {
System.out.println("Got expected IllegalAccessError (sfield)");
if (VERBOSE) System.out.println("--- " + iae);
@@ -104,7 +104,7 @@ public class Main {
try {
mutant.inaccessibleMethod();
- System.err.println("ERROR: bad access succeeded (method)");
+ System.out.println("ERROR: bad access succeeded (method)");
} catch (IllegalAccessError iae) {
System.out.println("Got expected IllegalAccessError (method)");
if (VERBOSE) System.out.println("--- " + iae);
@@ -112,7 +112,7 @@ public class Main {
try {
Mutant.inaccessibleStaticMethod();
- System.err.println("ERROR: bad access succeeded (smethod)");
+ System.out.println("ERROR: bad access succeeded (smethod)");
} catch (IllegalAccessError iae) {
System.out.println("Got expected IllegalAccessError (smethod)");
if (VERBOSE) System.out.println("--- " + iae);
@@ -121,7 +121,7 @@ public class Main {
try {
/* accessible static method in an inaccessible class */
InaccessibleClass.test();
- System.err.println("ERROR: bad meth-class access succeeded (meth-class)");
+ System.out.println("ERROR: bad meth-class access succeeded (meth-class)");
} catch (IllegalAccessError iae) {
System.out.println("Got expected IllegalAccessError (meth-class)");
if (VERBOSE) System.out.println("--- " + iae);
@@ -130,7 +130,7 @@ public class Main {
try {
/* accessible static field in an inaccessible class */
int blah = InaccessibleClass.blah;
- System.err.println("ERROR: bad field-class access succeeded (field-class)");
+ System.out.println("ERROR: bad field-class access succeeded (field-class)");
} catch (IllegalAccessError iae) {
System.out.println("Got expected IllegalAccessError (field-class)");
if (VERBOSE) System.out.println("--- " + iae);
@@ -139,7 +139,7 @@ public class Main {
try {
/* inaccessible static method in an accessible class */
InaccessibleMethod.test();
- System.err.println("ERROR: bad access succeeded (meth-meth)");
+ System.out.println("ERROR: bad access succeeded (meth-meth)");
} catch (IllegalAccessError iae) {
System.out.println("Got expected IllegalAccessError (meth-meth)");
if (VERBOSE) System.out.println("--- " + iae);
diff --git a/test/077-method-override/src/Main.java b/test/077-method-override/src/Main.java
index 84bdf35c29..3a3c528d61 100644
--- a/test/077-method-override/src/Main.java
+++ b/test/077-method-override/src/Main.java
@@ -37,8 +37,8 @@ public class Main {
((Base)derived).overrideVirtualWithStatic();
} catch (NoSuchMethodError nsme) {
/* NSME is subclass of ICCE, so check it explicitly */
- System.err.println("Got NSME - ovws");
- nsme.printStackTrace(System.err);
+ System.out.println("Got NSME - ovws");
+ nsme.printStackTrace(System.out);
} catch (IncompatibleClassChangeError icce) {
System.out.println("Got expected exception - ovws");
}
@@ -46,8 +46,8 @@ public class Main {
try {
((Base)derived).overrideStaticWithVirtual();
} catch (NoSuchMethodError nsme) {
- System.err.println("Got NSME - oswv");
- nsme.printStackTrace(System.err);
+ System.out.println("Got NSME - oswv");
+ nsme.printStackTrace(System.out);
} catch (IncompatibleClassChangeError icce) {
System.out.println("Got expected exception - oswv");
}
diff --git a/test/079-phantom/src/Main.java b/test/079-phantom/src/Main.java
index c54bc0be68..daead2e1dd 100644
--- a/test/079-phantom/src/Main.java
+++ b/test/079-phantom/src/Main.java
@@ -21,7 +21,7 @@ public class Main {
try {
Thread.sleep(ms);
} catch (InterruptedException ie) {
- System.err.println("sleep interrupted");
+ System.out.println("sleep interrupted");
}
}
diff --git a/test/084-class-init/src/Main.java b/test/084-class-init/src/Main.java
index 28eb3e923f..a60fbac84f 100644
--- a/test/084-class-init/src/Main.java
+++ b/test/084-class-init/src/Main.java
@@ -24,7 +24,7 @@ public class Main {
// that is currently a resolution stub because it's running on behalf of <clinit>.
try {
throwDuringClinit();
- System.err.println("didn't throw!");
+ System.out.println("didn't throw!");
} catch (NullPointerException ex) {
System.out.println("caught exception thrown during clinit");
}
@@ -44,34 +44,34 @@ public class Main {
try {
Thread.sleep(msec);
} catch (InterruptedException ie) {
- System.err.println("sleep interrupted");
+ System.out.println("sleep interrupted");
}
}
static void checkExceptions() {
try {
System.out.println(PartialInit.FIELD0);
- System.err.println("Construction of PartialInit succeeded unexpectedly");
+ System.out.println("Construction of PartialInit succeeded unexpectedly");
} catch (ExceptionInInitializerError eiie) {
System.out.println("Got expected EIIE for FIELD0");
}
try {
System.out.println(PartialInit.FIELD0);
- System.err.println("Load of FIELD0 succeeded unexpectedly");
+ System.out.println("Load of FIELD0 succeeded unexpectedly");
} catch (NoClassDefFoundError ncdfe) {
System.out.println("Got expected NCDFE for FIELD0");
}
try {
System.out.println(PartialInit.FIELD1);
- System.err.println("Load of FIELD1 succeeded unexpectedly");
+ System.out.println("Load of FIELD1 succeeded unexpectedly");
} catch (NoClassDefFoundError ncdfe) {
System.out.println("Got expected NCDFE for FIELD1");
}
try {
System.out.println(Exploder.FIELD);
- System.err.println("Load of FIELD succeeded unexpectedly");
+ System.out.println("Load of FIELD succeeded unexpectedly");
} catch (AssertionError expected) {
System.out.println("Got expected '" + expected.getMessage() + "' from Exploder");
}
@@ -92,7 +92,7 @@ public class Main {
fieldThread.join();
methodThread.join();
} catch (InterruptedException ie) {
- System.err.println(ie);
+ System.out.println(ie);
}
/* print all values */
diff --git a/test/086-null-super/src/Main.java b/test/086-null-super/src/Main.java
index 8bd17860ea..039a9599e1 100644
--- a/test/086-null-super/src/Main.java
+++ b/test/086-null-super/src/Main.java
@@ -149,14 +149,14 @@ public class Main {
loader = new BrokenDexLoader(ClassLoader.getSystemClassLoader());
loader.findBrokenClass();
- System.err.println("ERROR: Inaccessible was accessible");
+ System.out.println("ERROR: Inaccessible was accessible");
} catch (InvocationTargetException ite) {
Throwable cause = ite.getCause();
if (cause instanceof NullPointerException) {
- System.err.println("Got expected ITE/NPE");
+ System.out.println("Got expected ITE/NPE");
} else {
- System.err.println("Got unexpected ITE");
- ite.printStackTrace();
+ System.out.println("Got unexpected ITE");
+ ite.printStackTrace(System.out);
}
}
}
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index a6f0e642d4..bca3df6828 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -41,7 +41,7 @@ public class Main {
m.nestedMayThrow(false);
try {
m.nestedMayThrow(true);
- System.err.println("nestedThrow(true) did not throw");
+ System.out.println("nestedThrow(true) did not throw");
} catch (MyException me) {}
System.out.println("nestedMayThrow ok");
diff --git a/test/092-locale/src/Main.java b/test/092-locale/src/Main.java
index 8916a29c47..60c0551b84 100644
--- a/test/092-locale/src/Main.java
+++ b/test/092-locale/src/Main.java
@@ -34,31 +34,31 @@ public class Main {
try {
testCalendar();
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
try {
testDateFormatSymbols();
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
try {
testCurrency();
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
try {
testNormalizer();
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
try {
testIso3();
} catch (Exception ex) {
- ex.printStackTrace();
+ ex.printStackTrace(System.out);
}
}
@@ -125,13 +125,13 @@ public class Main {
res = Normalizer.normalize(composed, Normalizer.Form.NFD);
if (!decomposed.equals(res)) {
- System.err.println("Bad decompose: '" + composed + "' --> '"
+ System.out.println("Bad decompose: '" + composed + "' --> '"
+ res + "'");
}
res = Normalizer.normalize(decomposed, Normalizer.Form.NFC);
if (!composed.equals(res)) {
- System.err.println("Bad compose: '" + decomposed + "' --> '"
+ System.out.println("Bad compose: '" + decomposed + "' --> '"
+ res + "'");
}
@@ -153,7 +153,7 @@ public class Main {
try {
System.out.println(" iso3=" + loc.getISO3Language());
} catch (MissingResourceException mre) {
- System.err.println("couldn't get iso3 language");
+ System.out.println("couldn't get iso3 language");
}
}
}
diff --git a/test/095-switch-MAX_INT/src/Main.java b/test/095-switch-MAX_INT/src/Main.java
index d1171ea6bc..a004a1aede 100644
--- a/test/095-switch-MAX_INT/src/Main.java
+++ b/test/095-switch-MAX_INT/src/Main.java
@@ -2,7 +2,7 @@ public class Main {
static public void main(String[] args) throws Exception {
switch (0x7fffffff) {
case 0x7fffffff:
- System.err.println("good");
+ System.out.println("good");
break;
default:
throw new AssertionError();
diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java
index 91ba3075f4..5f6ffa8ae9 100644
--- a/test/100-reflect2/src/Main.java
+++ b/test/100-reflect2/src/Main.java
@@ -292,7 +292,7 @@ class Main {
// Expected.
} catch (Exception e) {
// Error.
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
@@ -304,7 +304,7 @@ class Main {
cons.newInstance();
} catch (Exception e) {
// Error.
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
diff --git a/test/101-fibonacci/src/Main.java b/test/101-fibonacci/src/Main.java
index c594edbded..9c57ba76eb 100644
--- a/test/101-fibonacci/src/Main.java
+++ b/test/101-fibonacci/src/Main.java
@@ -51,7 +51,7 @@ class Main {
y = fibonacci(x + 1);
System.out.printf("fibonacci(%d)=%d\n", x + 1, y);
} catch (NumberFormatException ex) {
- System.err.println(ex);
+ System.out.println(ex);
System.exit(1);
}
}
diff --git a/test/109-suspend-check/src/Main.java b/test/109-suspend-check/src/Main.java
index 3c3353b4db..e140a59173 100644
--- a/test/109-suspend-check/src/Main.java
+++ b/test/109-suspend-check/src/Main.java
@@ -55,7 +55,7 @@ public class Main {
try {
Thread.sleep(ms);
} catch (InterruptedException ie) {
- System.err.println("sleep was interrupted");
+ System.out.println("sleep was interrupted");
}
}
}
diff --git a/test/114-ParallelGC/src/Main.java b/test/114-ParallelGC/src/Main.java
index 159dd5c926..2199872ba6 100644
--- a/test/114-ParallelGC/src/Main.java
+++ b/test/114-ParallelGC/src/Main.java
@@ -82,7 +82,7 @@ public class Main implements Runnable {
// Any exception or error getting here is bad.
try {
// May need allocations...
- t.printStackTrace(System.err);
+ t.printStackTrace(System.out);
} catch (Throwable tInner) {
}
System.exit(1);
diff --git a/test/115-native-bridge/check b/test/115-native-bridge/check
new file mode 100755
index 0000000000..1ecf3348ca
--- /dev/null
+++ b/test/115-native-bridge/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ASAN prints a warning here.
+
+sed -e '/WARNING: ASan is ignoring requested __asan_handle_no_return/,+2d' "$2" | \
+ diff --strip-trailing-cr -q "$1" - >/dev/null
diff --git a/test/120-hashcode/src/Main.java b/test/120-hashcode/src/Main.java
index d2435cee62..0955f50c1a 100644
--- a/test/120-hashcode/src/Main.java
+++ b/test/120-hashcode/src/Main.java
@@ -30,7 +30,7 @@ public class Main {
// Make sure that all the hashes agree.
if (hashOrig != hashInflated || hashOrig != hashSystemOrig ||
hashSystemOrig != hashSystemInflated) {
- System.err.println("hash codes dont match: " + hashOrig + " " + hashInflated + " " +
+ System.out.println("hash codes dont match: " + hashOrig + " " + hashInflated + " " +
hashSystemOrig + " " + hashSystemInflated);
}
System.out.println("Done.");
diff --git a/test/130-hprof/src/Main.java b/test/130-hprof/src/Main.java
index 5899dd1183..a8597f1391 100644
--- a/test/130-hprof/src/Main.java
+++ b/test/130-hprof/src/Main.java
@@ -140,7 +140,7 @@ public class Main {
allocator.join();
dumper.join();
} catch (InterruptedException e) {
- System.err.println("join interrupted");
+ System.out.println("join interrupted");
}
}
@@ -178,7 +178,7 @@ public class Main {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
- System.err.println("sleep interrupted");
+ System.out.println("sleep interrupted");
}
}
@@ -223,7 +223,7 @@ public class Main {
try {
meth = vmdClass.getMethod("dumpHprofData", String.class);
} catch (NoSuchMethodException nsme) {
- System.err.println("Found VMDebug but not dumpHprofData method");
+ System.out.println("Found VMDebug but not dumpHprofData method");
return null;
}
diff --git a/test/1337-gc-coverage/gc_coverage.cc b/test/1337-gc-coverage/gc_coverage.cc
index 1cb2fb0976..ac959f68e8 100644
--- a/test/1337-gc-coverage/gc_coverage.cc
+++ b/test/1337-gc-coverage/gc_coverage.cc
@@ -18,7 +18,7 @@
#include "jni.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace {
diff --git a/test/135-MirandaDispatch/src/Main.java b/test/135-MirandaDispatch/src/Main.java
index ada8cefead..ab2a90b3b7 100644
--- a/test/135-MirandaDispatch/src/Main.java
+++ b/test/135-MirandaDispatch/src/Main.java
@@ -53,7 +53,7 @@ public class Main {
} catch (VerifyError expected) {
System.out.println("b/21646347");
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
System.out.println("Finishing");
}
diff --git a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
index b7293015cf..7d40f5773d 100644
--- a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
+++ b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
@@ -21,7 +21,7 @@
#include "base/macros.h"
#include "java_vm_ext.h"
#include "jni_env_ext.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace {
diff --git a/test/138-duplicate-classes-check/src/Main.java b/test/138-duplicate-classes-check/src/Main.java
index 5ffceb94fe..b32f0bcc46 100644
--- a/test/138-duplicate-classes-check/src/Main.java
+++ b/test/138-duplicate-classes-check/src/Main.java
@@ -42,7 +42,7 @@ public class Main {
Method test = testEx.getDeclaredMethod("test");
test.invoke(null);
} catch (Exception exc) {
- exc.printStackTrace();
+ exc.printStackTrace(System.out);
}
}
}
diff --git a/test/138-duplicate-classes-check2/src/Main.java b/test/138-duplicate-classes-check2/src/Main.java
index a0d6977405..faf8b5d337 100644
--- a/test/138-duplicate-classes-check2/src/Main.java
+++ b/test/138-duplicate-classes-check2/src/Main.java
@@ -37,7 +37,7 @@ public class Main {
Method test = testEx.getDeclaredMethod("test");
test.invoke(null);
} catch (Exception exc) {
- exc.printStackTrace();
+ exc.printStackTrace(System.out);
}
}
}
diff --git a/test/141-class-unload/jni_unload.cc b/test/141-class-unload/jni_unload.cc
index 9b7e171a95..355457d68d 100644
--- a/test/141-class-unload/jni_unload.cc
+++ b/test/141-class-unload/jni_unload.cc
@@ -20,7 +20,7 @@
#include "jit/jit.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace {
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index 7e8431fb52..9072c8b538 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -50,7 +50,7 @@ public class Main {
// Test that objects keep class loader live for sticky GC.
testStickyUnload(constructor);
} catch (Exception e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
diff --git a/test/142-classloader2/src/Main.java b/test/142-classloader2/src/Main.java
index a0c77645a3..193fd5dea0 100644
--- a/test/142-classloader2/src/Main.java
+++ b/test/142-classloader2/src/Main.java
@@ -91,7 +91,7 @@ public class Main {
if (e.getCause() instanceof VerifyError) {
System.out.println("Caught wrapped VerifyError.");
} else {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
diff --git a/test/146-bad-interface/src/Main.java b/test/146-bad-interface/src/Main.java
index 5534bb4bba..958ec7c3c4 100644
--- a/test/146-bad-interface/src/Main.java
+++ b/test/146-bad-interface/src/Main.java
@@ -37,7 +37,7 @@ public class Main {
} catch (Throwable t) {
System.out.println("Error occurred");
System.out.println(t);
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
}
diff --git a/test/148-multithread-gc-annotations/gc_coverage.cc b/test/148-multithread-gc-annotations/gc_coverage.cc
index 4862b87057..f48493c684 100644
--- a/test/148-multithread-gc-annotations/gc_coverage.cc
+++ b/test/148-multithread-gc-annotations/gc_coverage.cc
@@ -18,7 +18,7 @@
#include "jni.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace {
diff --git a/test/155-java-set-resolved-type/src/Main.java b/test/155-java-set-resolved-type/src/Main.java
index 8f79bd7ecd..44278a1c9a 100644
--- a/test/155-java-set-resolved-type/src/Main.java
+++ b/test/155-java-set-resolved-type/src/Main.java
@@ -61,7 +61,7 @@ public class Main {
// to be resolved and found through simple lookup.
timpl.newInstance();
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
diff --git a/test/156-register-dex-file-multi-loader/src/Main.java b/test/156-register-dex-file-multi-loader/src/Main.java
index ff5a2bd570..6aa1d780d3 100644
--- a/test/156-register-dex-file-multi-loader/src/Main.java
+++ b/test/156-register-dex-file-multi-loader/src/Main.java
@@ -81,7 +81,7 @@ public class Main {
!message.endsWith(" with multiple class loaders");
}
if (unexpected) {
- cnfe.getCause().printStackTrace();
+ cnfe.getCause().printStackTrace(System.out);
}
}
}
diff --git a/test/158-app-image-class-table/src/Main.java b/test/158-app-image-class-table/src/Main.java
index 804468fe91..97aa14d4b1 100644
--- a/test/158-app-image-class-table/src/Main.java
+++ b/test/158-app-image-class-table/src/Main.java
@@ -39,7 +39,7 @@ public class Main {
// to be resolved and found through simple lookup.
timpl.newInstance();
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
diff --git a/test/159-app-image-fields/src/Main.java b/test/159-app-image-fields/src/Main.java
index d06a50204a..47d0116a19 100644
--- a/test/159-app-image-fields/src/Main.java
+++ b/test/159-app-image-fields/src/Main.java
@@ -57,7 +57,7 @@ public class Main {
System.out.println("another_value: " + another_value);
}
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
diff --git a/test/301-abstract-protected/src/Main.java b/test/301-abstract-protected/src/Main.java
index 9b19a9d56b..f12026791e 100644
--- a/test/301-abstract-protected/src/Main.java
+++ b/test/301-abstract-protected/src/Main.java
@@ -16,7 +16,7 @@
public class Main {
public static void main(String args[]) throws Exception {
- System.err.println(new C().m());
+ System.out.println(new C().m());
}
}
diff --git a/test/487-checker-inline-calls/src/Main.java b/test/487-checker-inline-calls/src/Main.java
index 70384d5a7f..00694f359f 100644
--- a/test/487-checker-inline-calls/src/Main.java
+++ b/test/487-checker-inline-calls/src/Main.java
@@ -20,7 +20,7 @@ public class Main {
try {
doTopCall();
} catch (Error e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
diff --git a/test/488-checker-inline-recursive-calls/src/Main.java b/test/488-checker-inline-recursive-calls/src/Main.java
index 441dbbfcb6..1137837048 100644
--- a/test/488-checker-inline-recursive-calls/src/Main.java
+++ b/test/488-checker-inline-recursive-calls/src/Main.java
@@ -20,7 +20,7 @@ public class Main {
try {
doTopCall(true);
} catch (Error e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
diff --git a/test/492-checker-inline-invoke-interface/src/Main.java b/test/492-checker-inline-invoke-interface/src/Main.java
index a919690000..785c0db0eb 100644
--- a/test/492-checker-inline-invoke-interface/src/Main.java
+++ b/test/492-checker-inline-invoke-interface/src/Main.java
@@ -21,7 +21,7 @@ interface Itf {
class ForceStatic {
static {
System.out.println("Hello from clinit");
- new Exception().printStackTrace();
+ new Exception().printStackTrace(System.out);
}
static int field;
}
diff --git a/test/493-checker-inline-invoke-interface/src/Main.java b/test/493-checker-inline-invoke-interface/src/Main.java
index 171405ca44..0570b20a40 100644
--- a/test/493-checker-inline-invoke-interface/src/Main.java
+++ b/test/493-checker-inline-invoke-interface/src/Main.java
@@ -21,7 +21,7 @@ interface Itf {
class ForceStatic {
static {
System.out.println("Hello from clinit");
- new Exception().printStackTrace();
+ new Exception().printStackTrace(System.out);
}
static int field;
}
diff --git a/test/497-inlining-and-class-loader/src/Main.java b/test/497-inlining-and-class-loader/src/Main.java
index 1e27e77786..01b4bcd391 100644
--- a/test/497-inlining-and-class-loader/src/Main.java
+++ b/test/497-inlining-and-class-loader/src/Main.java
@@ -121,7 +121,7 @@ class Main {
// Because we cleared dex cache entries, we will have to find
// classes again, which require to use the correct class loader
// in the presence of inlining.
- new Exception().printStackTrace();
+ new Exception().printStackTrace(System.out);
}
static Object savedResolvedMethods;
diff --git a/test/522-checker-regression-monitor-exit/src/Main.java b/test/522-checker-regression-monitor-exit/src/Main.java
index c4f80fc9c6..5c26f36fe8 100644
--- a/test/522-checker-regression-monitor-exit/src/Main.java
+++ b/test/522-checker-regression-monitor-exit/src/Main.java
@@ -43,8 +43,8 @@ public class Main {
Method m = c.getMethod("synchronizedHashCode", Object.class);
result = (Integer) m.invoke(null, m_obj);
} catch (Exception e) {
- System.err.println("Hash code query exception");
- e.printStackTrace();
+ System.out.println("Hash code query exception");
+ e.printStackTrace(System.out);
result = -1;
}
return result;
@@ -77,7 +77,7 @@ public class Main {
}
pool.shutdown();
} catch (CancellationException ex) {
- System.err.println("Job timeout");
+ System.out.println("Job timeout");
System.exit(1);
}
}
diff --git a/test/551-checker-shifter-operand/src/Main.java b/test/551-checker-shifter-operand/src/Main.java
index bf09a6aa5e..951889ab9f 100644
--- a/test/551-checker-shifter-operand/src/Main.java
+++ b/test/551-checker-shifter-operand/src/Main.java
@@ -234,8 +234,8 @@ public class Main {
/// CHECK-START-ARM: void Main.$opt$noinline$testAnd(long, long) disassembly (after)
/// CHECK: and lsl
/// CHECK: sbfx
- /// CHECK: asr
- /// CHECK: and
+ /// CHECK: asr{{s?}}
+ /// CHECK: and{{s?}}
/// CHECK-START-ARM64: void Main.$opt$noinline$testAnd(long, long) instruction_simplifier_arm64 (after)
/// CHECK: DataProcWithShifterOp
@@ -259,7 +259,7 @@ public class Main {
/// CHECK-START-ARM: void Main.$opt$noinline$testOr(int, int) disassembly (after)
/// CHECK: orr asr
/// CHECK: ubfx
- /// CHECK: orr
+ /// CHECK: orr{{s?}}
/// CHECK-START-ARM64: void Main.$opt$noinline$testOr(int, int) instruction_simplifier_arm64 (after)
/// CHECK: DataProcWithShifterOp
@@ -282,9 +282,8 @@ public class Main {
/// CHECK-START-ARM: void Main.$opt$noinline$testXor(long, long) disassembly (after)
/// CHECK: eor lsr
- /// CHECK: mov
- /// CHECK: asr
- /// CHECK: eor
+ /// CHECK: asr{{s?}}
+ /// CHECK: eor{{s?}}
/// CHECK-START-ARM64: void Main.$opt$noinline$testXor(long, long) instruction_simplifier_arm64 (after)
/// CHECK: DataProcWithShifterOp
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 3f81fd644a..7408e6d263 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -42,31 +42,30 @@ public class Main {
}
/// CHECK-START: int Main.testSimple(int) sharpening (before)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCacheViaMethod
+ /// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
/// CHECK-START-ARM: int Main.testSimple(int) sharpening (after)
- /// CHECK-NOT: ArmDexCacheArraysBase
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-ARM64: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-MIPS: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-MIPS64: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-X86: int Main.testSimple(int) sharpening (after)
/// CHECK-NOT: X86ComputeBaseMethodAddress
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-X86_64: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
- /// CHECK-START-ARM: int Main.testSimple(int) dex_cache_array_fixups_arm (after)
- /// CHECK: ArmDexCacheArraysBase
- /// CHECK-NOT: ArmDexCacheArraysBase
+ /// CHECK-START-MIPS: int Main.testSimple(int) pc_relative_fixups_mips (after)
+ /// CHECK: MipsComputeBaseMethodAddress
+ /// CHECK-NOT: MipsComputeBaseMethodAddress
/// CHECK-START-X86: int Main.testSimple(int) pc_relative_fixups_x86 (after)
/// CHECK: X86ComputeBaseMethodAddress
@@ -78,40 +77,39 @@ public class Main {
}
/// CHECK-START: int Main.testDiamond(boolean, int) sharpening (before)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCacheViaMethod
+ /// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
/// CHECK-START-ARM: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK-NOT: ArmDexCacheArraysBase
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-ARM64: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-MIPS: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-MIPS64: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-X86: int Main.testDiamond(boolean, int) sharpening (after)
/// CHECK-NOT: X86ComputeBaseMethodAddress
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-X86_64: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
- /// CHECK-START-ARM: int Main.testDiamond(boolean, int) dex_cache_array_fixups_arm (after)
- /// CHECK: ArmDexCacheArraysBase
- /// CHECK-NOT: ArmDexCacheArraysBase
+ /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) pc_relative_fixups_mips (after)
+ /// CHECK: MipsComputeBaseMethodAddress
+ /// CHECK-NOT: MipsComputeBaseMethodAddress
- /// CHECK-START-ARM: int Main.testDiamond(boolean, int) dex_cache_array_fixups_arm (after)
- /// CHECK: ArmDexCacheArraysBase
+ /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) pc_relative_fixups_mips (after)
+ /// CHECK: MipsComputeBaseMethodAddress
/// CHECK-NEXT: If
/// CHECK-START-X86: int Main.testDiamond(boolean, int) pc_relative_fixups_x86 (after)
@@ -123,8 +121,8 @@ public class Main {
/// CHECK-NEXT: If
public static int testDiamond(boolean negate, int x) {
- // These calls should use PC-relative dex cache array loads to retrieve the target method.
- // PC-relative bases used by ARM, MIPS and X86 should be pulled before the If.
+ // These calls should use PC-relative loads to retrieve the target method.
+ // PC-relative bases used by MIPS and X86 should be pulled before the If.
if (negate) {
return $noinline$foo(-x);
} else {
@@ -132,72 +130,72 @@ public class Main {
}
}
- /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (before)
- /// CHECK-NOT: X86ComputeBaseMethodAddress
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) pc_relative_fixups_mips (before)
+ /// CHECK-NOT: MipsComputeBaseMethodAddress
- /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (after)
- /// CHECK: X86ComputeBaseMethodAddress
- /// CHECK-NOT: X86ComputeBaseMethodAddress
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) pc_relative_fixups_mips (after)
+ /// CHECK: MipsComputeBaseMethodAddress
+ /// CHECK-NOT: MipsComputeBaseMethodAddress
- /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (after)
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) pc_relative_fixups_mips (after)
/// CHECK: InvokeStaticOrDirect
/// CHECK-NOT: InvokeStaticOrDirect
- /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (after)
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) pc_relative_fixups_mips (after)
/// CHECK: ArrayLength
- /// CHECK-NEXT: X86ComputeBaseMethodAddress
+ /// CHECK-NEXT: MipsComputeBaseMethodAddress
/// CHECK-NEXT: Goto
/// CHECK: begin_block
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
- /// CHECK-START-ARM: int Main.testLoop(int[], int) dex_cache_array_fixups_arm (before)
- /// CHECK-NOT: ArmDexCacheArraysBase
+ /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (before)
+ /// CHECK-NOT: X86ComputeBaseMethodAddress
- /// CHECK-START-ARM: int Main.testLoop(int[], int) dex_cache_array_fixups_arm (after)
- /// CHECK: ArmDexCacheArraysBase
- /// CHECK-NOT: ArmDexCacheArraysBase
+ /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (after)
+ /// CHECK: X86ComputeBaseMethodAddress
+ /// CHECK-NOT: X86ComputeBaseMethodAddress
- /// CHECK-START-ARM: int Main.testLoop(int[], int) dex_cache_array_fixups_arm (after)
+ /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (after)
/// CHECK: InvokeStaticOrDirect
/// CHECK-NOT: InvokeStaticOrDirect
- /// CHECK-START-ARM: int Main.testLoop(int[], int) dex_cache_array_fixups_arm (after)
+ /// CHECK-START-X86: int Main.testLoop(int[], int) pc_relative_fixups_x86 (after)
/// CHECK: ArrayLength
- /// CHECK-NEXT: ArmDexCacheArraysBase
+ /// CHECK-NEXT: X86ComputeBaseMethodAddress
/// CHECK-NEXT: Goto
/// CHECK: begin_block
- /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
public static int testLoop(int[] array, int x) {
- // PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop.
+ // PC-relative bases used by MIPS and X86 should be pulled before the loop.
for (int i : array) {
x += $noinline$foo(i);
}
return x;
}
- /// CHECK-START-X86: int Main.testLoopWithDiamond(int[], boolean, int) pc_relative_fixups_x86 (before)
- /// CHECK-NOT: X86ComputeBaseMethodAddress
+ /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) pc_relative_fixups_mips (before)
+ /// CHECK-NOT: MipsComputeBaseMethodAddress
- /// CHECK-START-X86: int Main.testLoopWithDiamond(int[], boolean, int) pc_relative_fixups_x86 (after)
+ /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) pc_relative_fixups_mips (after)
/// CHECK: If
/// CHECK: begin_block
/// CHECK: ArrayLength
- /// CHECK-NEXT: X86ComputeBaseMethodAddress
+ /// CHECK-NEXT: MipsComputeBaseMethodAddress
/// CHECK-NEXT: Goto
- /// CHECK-START-ARM: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_arm (before)
- /// CHECK-NOT: ArmDexCacheArraysBase
+ /// CHECK-START-X86: int Main.testLoopWithDiamond(int[], boolean, int) pc_relative_fixups_x86 (before)
+ /// CHECK-NOT: X86ComputeBaseMethodAddress
- /// CHECK-START-ARM: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_arm (after)
+ /// CHECK-START-X86: int Main.testLoopWithDiamond(int[], boolean, int) pc_relative_fixups_x86 (after)
/// CHECK: If
/// CHECK: begin_block
/// CHECK: ArrayLength
- /// CHECK-NEXT: ArmDexCacheArraysBase
+ /// CHECK-NEXT: X86ComputeBaseMethodAddress
/// CHECK-NEXT: Goto
public static int testLoopWithDiamond(int[] array, boolean negate, int x) {
- // PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop
+ // PC-relative bases used by MIPS and X86 should be pulled before the loop
// but not outside the if.
if (array != null) {
for (int i : array) {
@@ -212,7 +210,7 @@ public class Main {
}
/// CHECK-START: java.lang.String Main.$noinline$getBootImageString() sharpening (before)
- /// CHECK: LoadString load_kind:DexCacheViaMethod
+ /// CHECK: LoadString load_kind:RuntimeCall
/// CHECK-START-X86: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
// Note: load kind depends on PIC/non-PIC
@@ -246,7 +244,7 @@ public class Main {
}
/// CHECK-START: java.lang.String Main.$noinline$getNonBootImageString() sharpening (before)
- /// CHECK: LoadString load_kind:DexCacheViaMethod
+ /// CHECK: LoadString load_kind:RuntimeCall
/// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
/// CHECK: LoadString load_kind:BssEntry
diff --git a/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali
index e4bf236266..5f73bbe759 100644
--- a/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -18,8 +18,8 @@
## CHECK-START-X86: int IrreducibleLoop.simpleLoop(int) dead_code_elimination$initial (before)
## CHECK-DAG: <<Constant:i\d+>> IntConstant 42
-## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>] loop:{{B\d+}} irreducible:true
-## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>] loop:none
+## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>{{(,[ij]\d+)?}}] loop:{{B\d+}} irreducible:true
+## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>{{(,[ij]\d+)?}}] loop:none
.method public static simpleLoop(I)I
.registers 3
const/16 v0, 42
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index 8eca6b2ccb..45ead6b204 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -21,6 +21,7 @@
#include "oat_quick_method_header.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
+#include "stack.h"
#include "stack_map.h"
namespace art {
diff --git a/test/570-checker-osr/src/DeoptimizationController.java b/test/570-checker-osr/src/DeoptimizationController.java
index 907d133d3b..e272607ec4 100644
--- a/test/570-checker-osr/src/DeoptimizationController.java
+++ b/test/570-checker-osr/src/DeoptimizationController.java
@@ -53,7 +53,7 @@ public class DeoptimizationController {
throw new IllegalStateException("Not tracing.");
}
} catch (Exception exc) {
- exc.printStackTrace(System.err);
+ exc.printStackTrace(System.out);
} finally {
if (tempFile != null) {
tempFile.delete();
@@ -68,7 +68,7 @@ public class DeoptimizationController {
throw new IllegalStateException("Still tracing.");
}
} catch (Exception exc) {
- exc.printStackTrace(System.err);
+ exc.printStackTrace(System.out);
}
}
diff --git a/test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali b/test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali
index 9b8aa510a4..3058358033 100644
--- a/test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali
+++ b/test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali
@@ -19,8 +19,8 @@
## CHECK-START-X86: int IrreducibleLoop.simpleLoop1(int) dead_code_elimination$initial (before)
## CHECK-DAG: <<Constant:i\d+>> IntConstant 42
## CHECK-DAG: Goto irreducible:true
-## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>] loop:none
-## CHECK-DAG: InvokeStaticOrDirect [{{i\d+}}] loop:none
+## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>{{(,[ij]\d+)?}}] loop:none
+## CHECK-DAG: InvokeStaticOrDirect [{{i\d+}}{{(,[ij]\d+)?}}] loop:none
.method public static simpleLoop1(I)I
.registers 3
const/16 v0, 42
@@ -59,8 +59,8 @@
## CHECK-START-X86: int IrreducibleLoop.simpleLoop2(int) dead_code_elimination$initial (before)
## CHECK-DAG: <<Constant:i\d+>> IntConstant 42
## CHECK-DAG: Goto irreducible:true
-## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>] loop:none
-## CHECK-DAG: InvokeStaticOrDirect [{{i\d+}}] loop:none
+## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>{{(,[ij]\d+)?}}] loop:none
+## CHECK-DAG: InvokeStaticOrDirect [{{i\d+}}{{(,[ij]\d+)?}}] loop:none
.method public static simpleLoop2(I)I
.registers 3
const/16 v0, 42
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index 0f8dd57385..0bdbadef48 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -22,6 +22,7 @@
#include "jni.h"
#include "method_reference.h"
#include "mirror/class-inl.h"
+#include "mirror/executable.h"
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
#include "scoped_thread_state_change-inl.h"
@@ -31,53 +32,31 @@
namespace art {
namespace {
-class CreateProfilingInfoVisitor : public StackVisitor {
- public:
- explicit CreateProfilingInfoVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name) {}
-
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare(method_name_) == 0) {
- ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
- method_index_ = m->GetDexMethodIndex();
- return false;
- }
- return true;
- }
-
- int method_index_ = -1;
- const char* const method_name_;
-};
-
-extern "C" JNIEXPORT jint JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env,
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env,
jclass,
- jstring method_name) {
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- CreateProfilingInfoVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
- return visitor.method_index_;
+ jobject method) {
+ CHECK(method != nullptr);
+ ScopedObjectAccess soa(env);
+ ObjPtr<mirror::Executable> exec = soa.Decode<mirror::Executable>(method);
+ ProfilingInfo::Create(soa.Self(), exec->GetArtMethod(), /* retry_allocation */ true);
}
extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfileProcessing(JNIEnv*, jclass) {
ProfileSaver::ForceProcessProfiles();
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_presentInProfile(
- JNIEnv* env, jclass cls, jstring filename, jint method_index) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_presentInProfile(JNIEnv* env,
+ jclass,
+ jstring filename,
+ jobject method) {
ScopedUtfChars filename_chars(env, filename);
CHECK(filename_chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- const DexFile* dex_file = soa.Decode<mirror::Class>(cls)->GetDexCache()->GetDexFile();
+ ScopedObjectAccess soa(env);
+ ObjPtr<mirror::Executable> exec = soa.Decode<mirror::Executable>(method);
+ ArtMethod* art_method = exec->GetArtMethod();
return ProfileSaver::HasSeenMethod(std::string(filename_chars.c_str()),
- dex_file,
- static_cast<uint16_t>(method_index));
+ art_method->GetDexFile(),
+ art_method->GetDexMethodIndex());
}
} // namespace
diff --git a/test/595-profile-saving/run b/test/595-profile-saving/run
index fce6ac15d8..055035b3e0 100644
--- a/test/595-profile-saving/run
+++ b/test/595-profile-saving/run
@@ -24,4 +24,5 @@ exec ${RUN} \
--runtime-option '-Xcompiler-option --compiler-filter=quicken' \
--runtime-option -Xjitsaveprofilinginfo \
--runtime-option -Xusejit:false \
+ --runtime-option -Xps-profile-boot-class-path \
"${@}"
diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java
index faf94c4fcc..18c0598bef 100644
--- a/test/595-profile-saving/src/Main.java
+++ b/test/595-profile-saving/src/Main.java
@@ -31,11 +31,17 @@ public class Main {
VMRuntime.registerAppInfo(file.getPath(),
new String[] {codePath});
- int methodIdx = $opt$noinline$testProfile();
- ensureProfileProcessing();
- if (!presentInProfile(file.getPath(), methodIdx)) {
- throw new RuntimeException("Method with index " + methodIdx + " not in the profile");
+ // Test that the profile saves an app method with a profiling info.
+ Method appMethod = Main.class.getDeclaredMethod("testAddMethodToProfile",
+ File.class, Method.class);
+ testAddMethodToProfile(file, appMethod);
+
+ // Test that the profile saves a boot class path method with a profiling info.
+ Method bootMethod = File.class.getDeclaredMethod("delete");
+ if (bootMethod.getDeclaringClass().getClassLoader() != Object.class.getClassLoader()) {
+ System.out.println("Class loader does not match boot class");
}
+ testAddMethodToProfile(file, bootMethod);
} finally {
if (file != null) {
file.delete();
@@ -43,20 +49,24 @@ public class Main {
}
}
- public static int $opt$noinline$testProfile() {
- if (doThrow) throw new Error();
+ static void testAddMethodToProfile(File file, Method m) {
// Make sure we have a profile info for this method without the need to loop.
- return ensureProfilingInfo("$opt$noinline$testProfile");
+ ensureProfilingInfo(m);
+ // Make sure the profile gets saved.
+ ensureProfileProcessing();
+ // Verify that the profile was saved and contains the method.
+ if (!presentInProfile(file.getPath(), m)) {
+ throw new RuntimeException("Method with index " + m + " not in the profile");
+ }
}
- // Return the dex method index.
- public static native int ensureProfilingInfo(String methodName);
+ // Ensure a method has a profiling info.
+ public static native void ensureProfilingInfo(Method method);
// Ensures the profile saver does its usual processing.
public static native void ensureProfileProcessing();
// Checks if the profiles saver knows about the method.
- public static native boolean presentInProfile(String profile, int methodIdx);
+ public static native boolean presentInProfile(String profile, Method method);
- public static boolean doThrow = false;
private static final String TEMP_FILE_NAME_PREFIX = "dummy";
private static final String TEMP_FILE_NAME_SUFFIX = "-file";
diff --git a/test/596-app-images/src/Main.java b/test/596-app-images/src/Main.java
index 674ba4d037..8ee3c888b0 100644
--- a/test/596-app-images/src/Main.java
+++ b/test/596-app-images/src/Main.java
@@ -14,10 +14,6 @@
* limitations under the License.
*/
-import java.lang.reflect.Field;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-
class Main {
static class Inner {
final public static int abc = 10;
@@ -50,76 +46,13 @@ class Main {
if (!checkInitialized(StaticFieldsInit.class))
System.out.println("StaticFieldsInit class is not initialized!");
- if (!checkInitialized(StaticInternString.class))
- System.out.println("StaticInternString class is not initialized!");
-
- StringBuffer sb = new StringBuffer();
- sb.append("java.");
- sb.append("abc.");
- sb.append("Action");
-
- String tmp = sb.toString();
- String intern = tmp.intern();
-
- assertNotEqual(tmp, intern, "Dynamically constructed String, not interned.");
- assertEqual(intern, StaticInternString.intent, "Static encoded literal String not interned.");
- assertEqual(BootInternedString.boot, BootInternedString.boot.intern(),
- "Static encoded literal String not moved back to runtime intern table.");
-
- try {
- Field f = StaticInternString.class.getDeclaredField("intent");
- assertEqual(intern, f.get(null), "String Literals are not interned properly.");
-
- } catch (Exception e) {
- System.out.println("Exception");
- }
-
- assertEqual(StaticInternString.getIntent(), StaticInternString2.getIntent(),
- "String Literals are not intenred properly, App image static strings duplicated.");
-
- // reload the class StaticInternString, check whether static strings interned properly
- final String DEX_FILE = System.getenv("DEX_LOCATION") + "/596-app-images.jar";
- final String LIBRARY_SEARCH_PATH = System.getProperty("java.library.path");
-
- try {
- Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
- if (pathClassLoader == null) {
- throw new AssertionError("Counldn't find path class loader class");
- }
- Constructor<?> ctor =
- pathClassLoader.getDeclaredConstructor(String.class, String.class, ClassLoader.class);
- ClassLoader loader = (ClassLoader) ctor.newInstance(
- DEX_FILE, LIBRARY_SEARCH_PATH, null);
-
- Class<?> staticInternString = loader.loadClass("StaticInternString");
-
- if (!checkAppImageContains(staticInternString)) {
- System.out.println("Not loaded again.");
- }
- Method getIntent = staticInternString.getDeclaredMethod("getIntent");
-
- assertEqual(StaticInternString.getIntent(), getIntent.invoke(staticInternString),
- "Dynamically loaded app image's literal strings not interned properly.");
- } catch (Exception e) {
- e.printStackTrace();
- }
-
+ if (checkInitialized(StaticInternString.class))
+ System.out.println("StaticInternString class is initialized!");
}
public static native boolean checkAppImageLoaded();
public static native boolean checkAppImageContains(Class<?> klass);
public static native boolean checkInitialized(Class<?> klass);
-
- public static void assertEqual(Object a, Object b, String msg) {
- if (a != b)
- System.out.println(msg);
- }
-
- public static void assertNotEqual(Object a, Object b, String msg) {
- if (a == b)
- System.out.println(msg);
- }
-
}
class StaticFields{
@@ -135,21 +68,6 @@ class StaticFieldsInit{
}
class StaticInternString {
- final public static String intent = "java.abc.Action";
- static public String getIntent() {
- return intent;
- }
-}
-
-class BootInternedString {
- final public static String boot = "double";
-}
-
-class StaticInternString2 {
- final public static String intent = "java.abc.Action";
-
- static String getIntent() {
- return intent;
- }
+ final public static String intern = "java.abc.Action";
}
diff --git a/test/596-monitor-inflation/monitor_inflation.cc b/test/596-monitor-inflation/monitor_inflation.cc
index fb4275b711..07d1ddbe69 100644
--- a/test/596-monitor-inflation/monitor_inflation.cc
+++ b/test/596-monitor-inflation/monitor_inflation.cc
@@ -18,7 +18,7 @@
#include "jni.h"
#include "monitor.h"
#include "runtime.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
namespace {
diff --git a/test/602-deoptimizeable/src/Main.java b/test/602-deoptimizeable/src/Main.java
index 743a5796c9..d995923f88 100644
--- a/test/602-deoptimizeable/src/Main.java
+++ b/test/602-deoptimizeable/src/Main.java
@@ -99,7 +99,7 @@ public class Main {
System.exit(0);
}
} catch (Exception e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
});
@@ -127,7 +127,7 @@ public class Main {
map.put(new DummyObject(10), Long.valueOf(100));
assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
} catch (Exception e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
});
diff --git a/test/617-clinit-oome/src/Main.java b/test/617-clinit-oome/src/Main.java
index 749a2325ef..94cb7ce7db 100644
--- a/test/617-clinit-oome/src/Main.java
+++ b/test/617-clinit-oome/src/Main.java
@@ -37,7 +37,7 @@ public class Main {
Other.print();
} catch (OutOfMemoryError e) {
} catch (Exception e) {
- System.err.println(e);
+ System.out.println(e);
}
}
}
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 3a2145bf2b..af205b074f 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -291,6 +291,9 @@ public class Main {
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
// NOTE: should correctly deal with compressed and uncompressed cases.
+ //
+ /// CHECK-START-MIPS64: void Main.string2Bytes(char[], java.lang.String) loop_optimization (after)
+ /// CHECK-NOT: VecLoad
private static void string2Bytes(char[] a, String b) {
int min = Math.min(a.length, b.length());
for (int i = 0; i < min; i++) {
@@ -333,6 +336,13 @@ public class Main {
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.oneBoth(short[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
// Bug b/37764324: integral same-length packed types can be mixed freely.
private static void oneBoth(short[] a, char[] b) {
for (int i = 0; i < Math.min(a.length, b.length); i++) {
@@ -372,6 +382,19 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
+ //
// Scalar code in cleanup loop uses correct byte type on array get and type conversion.
private static void typeConv(byte[] a, byte[] b) {
int len = Math.min(a.length, b.length);
diff --git a/test/626-const-class-linking/src/RacyMisbehavingHelper.java b/test/626-const-class-linking/src/RacyMisbehavingHelper.java
index 45252789e4..9acd3c3ff6 100644
--- a/test/626-const-class-linking/src/RacyMisbehavingHelper.java
+++ b/test/626-const-class-linking/src/RacyMisbehavingHelper.java
@@ -26,7 +26,7 @@ public class RacyMisbehavingHelper {
Method reportAfterLoading = loader.getClass().getDeclaredMethod("reportAfterLoading");
reportAfterLoading.invoke(loader);
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
return new ClassPair(helper1_class, test_class);
}
diff --git a/test/638-no-line-number/src/Main.java b/test/638-no-line-number/src/Main.java
index 7fe0404204..851f049ba6 100644
--- a/test/638-no-line-number/src/Main.java
+++ b/test/638-no-line-number/src/Main.java
@@ -19,12 +19,12 @@ public class Main {
try {
doThrow(new Error());
} catch (Error e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
try {
doThrow(null);
} catch (Throwable t) {
- t.printStackTrace();
+ t.printStackTrace(System.out);
}
}
diff --git a/test/640-checker-boolean-simd/src/Main.java b/test/640-checker-boolean-simd/src/Main.java
index f8239faaf3..64b76f8516 100644
--- a/test/640-checker-boolean-simd/src/Main.java
+++ b/test/640-checker-boolean-simd/src/Main.java
@@ -35,6 +35,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.and(boolean) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void and(boolean x) {
for (int i = 0; i < 128; i++)
a[i] &= x; // NOTE: bitwise and, not the common &&
@@ -50,6 +56,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.or(boolean) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void or(boolean x) {
for (int i = 0; i < 128; i++)
a[i] |= x; // NOTE: bitwise or, not the common ||
@@ -65,6 +77,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.xor(boolean) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void xor(boolean x) {
for (int i = 0; i < 128; i++)
a[i] ^= x; // NOTE: bitwise xor
@@ -80,6 +98,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
for (int i = 0; i < 128; i++)
a[i] = !a[i];
diff --git a/test/640-checker-byte-simd/src/Main.java b/test/640-checker-byte-simd/src/Main.java
index 21d71e8a13..283c2c907d 100644
--- a/test/640-checker-byte-simd/src/Main.java
+++ b/test/640-checker-byte-simd/src/Main.java
@@ -35,6 +35,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
for (int i = 0; i < 128; i++)
a[i] += x;
@@ -50,6 +56,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
for (int i = 0; i < 128; i++)
a[i] -= x;
@@ -65,6 +77,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
for (int i = 0; i < 128; i++)
a[i] *= x;
@@ -94,6 +112,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
for (int i = 0; i < 128; i++)
a[i] = (byte) -a[i];
@@ -109,6 +133,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
for (int i = 0; i < 128; i++)
a[i] = (byte) ~a[i];
@@ -124,6 +154,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
for (int i = 0; i < 128; i++)
a[i] <<= 4;
@@ -139,6 +175,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
diff --git a/test/640-checker-char-simd/src/Main.java b/test/640-checker-char-simd/src/Main.java
index 89d4b6b84e..dd879b40cd 100644
--- a/test/640-checker-char-simd/src/Main.java
+++ b/test/640-checker-char-simd/src/Main.java
@@ -35,6 +35,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
for (int i = 0; i < 128; i++)
a[i] += x;
@@ -50,6 +56,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
for (int i = 0; i < 128; i++)
a[i] -= x;
@@ -65,6 +77,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
for (int i = 0; i < 128; i++)
a[i] *= x;
@@ -94,6 +112,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
for (int i = 0; i < 128; i++)
a[i] = (char) -a[i];
@@ -109,6 +133,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
for (int i = 0; i < 128; i++)
a[i] = (char) ~a[i];
@@ -124,6 +154,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
for (int i = 0; i < 128; i++)
a[i] <<= 4;
@@ -152,6 +188,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-double-simd/src/Main.java b/test/640-checker-double-simd/src/Main.java
index 5709b5dab8..f7492d5578 100644
--- a/test/640-checker-double-simd/src/Main.java
+++ b/test/640-checker-double-simd/src/Main.java
@@ -36,6 +36,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.add(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(double x) {
for (int i = 0; i < 128; i++)
a[i] += x;
@@ -51,6 +57,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sub(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(double x) {
for (int i = 0; i < 128; i++)
a[i] -= x;
@@ -66,6 +78,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.mul(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(double x) {
for (int i = 0; i < 128; i++)
a[i] *= x;
@@ -81,6 +99,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.div(double) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void div(double x) {
for (int i = 0; i < 128; i++)
a[i] /= x;
@@ -96,6 +120,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
for (int i = 0; i < 128; i++)
a[i] = -a[i];
@@ -111,6 +141,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.abs() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void abs() {
for (int i = 0; i < 128; i++)
a[i] = Math.abs(a[i]);
@@ -125,6 +161,10 @@ public class Main {
/// CHECK-NOT: VecLoad
/// CHECK-NOT: VecStore
//
+ /// CHECK-START-MIPS64: void Main.conv(long[]) loop_optimization (after)
+ /// CHECK-NOT: VecLoad
+ /// CHECK-NOT: VecStore
+ //
// TODO: fill in when long2double is supported
static void conv(long[] b) {
for (int i = 0; i < 128; i++)
diff --git a/test/640-checker-float-simd/src/Main.java b/test/640-checker-float-simd/src/Main.java
index 4bcb7e2c1b..4fe9675afe 100644
--- a/test/640-checker-float-simd/src/Main.java
+++ b/test/640-checker-float-simd/src/Main.java
@@ -36,6 +36,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.add(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(float x) {
for (int i = 0; i < 128; i++)
a[i] += x;
@@ -51,6 +57,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sub(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(float x) {
for (int i = 0; i < 128; i++)
a[i] -= x;
@@ -66,6 +78,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.mul(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(float x) {
for (int i = 0; i < 128; i++)
a[i] *= x;
@@ -81,6 +99,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.div(float) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void div(float x) {
for (int i = 0; i < 128; i++)
a[i] /= x;
@@ -96,6 +120,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
for (int i = 0; i < 128; i++)
a[i] = -a[i];
@@ -106,6 +136,12 @@ public class Main {
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.abs() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
/// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
@@ -126,6 +162,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecCnv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.conv(int[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecCnv loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void conv(int[] b) {
for (int i = 0; i < 128; i++)
a[i] = b[i];
diff --git a/test/640-checker-int-simd/src/Main.java b/test/640-checker-int-simd/src/Main.java
index 9ee553c469..9abf60d6fa 100644
--- a/test/640-checker-int-simd/src/Main.java
+++ b/test/640-checker-int-simd/src/Main.java
@@ -35,6 +35,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
for (int i = 0; i < 128; i++)
a[i] += x;
@@ -50,6 +56,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
for (int i = 0; i < 128; i++)
a[i] -= x;
@@ -65,6 +77,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
for (int i = 0; i < 128; i++)
a[i] *= x;
@@ -95,6 +113,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
for (int i = 0; i < 128; i++)
a[i] = -a[i];
@@ -110,6 +134,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
for (int i = 0; i < 128; i++)
a[i] = ~a[i];
@@ -125,6 +155,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
for (int i = 0; i < 128; i++)
a[i] <<= 4;
@@ -134,12 +170,18 @@ public class Main {
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
- //
+ //
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -155,6 +197,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
@@ -185,6 +233,11 @@ public class Main {
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shr32() loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
static void shr32() {
// TODO: remove a[i] = a[i] altogether?
for (int i = 0; i < 128; i++)
@@ -211,6 +264,13 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shr33() loop_optimization (after)
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
static void shr33() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstant33(); // 1, since & 31
@@ -236,6 +296,13 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shrMinus254() loop_optimization (after)
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
static void shrMinus254() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstantMinus254(); // 2, since & 31
diff --git a/test/640-checker-long-simd/src/Main.java b/test/640-checker-long-simd/src/Main.java
index 8f6af9d012..05dcae6f83 100644
--- a/test/640-checker-long-simd/src/Main.java
+++ b/test/640-checker-long-simd/src/Main.java
@@ -35,6 +35,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.add(long) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(long x) {
for (int i = 0; i < 128; i++)
a[i] += x;
@@ -50,6 +56,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sub(long) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(long x) {
for (int i = 0; i < 128; i++)
a[i] -= x;
@@ -60,6 +72,12 @@ public class Main {
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.mul(long) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
// Not supported for longs.
/// CHECK-START-ARM64: void Main.mul(long) loop_optimization (after)
/// CHECK-NOT: VecMul
@@ -93,6 +111,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
for (int i = 0; i < 128; i++)
a[i] = -a[i];
@@ -108,6 +132,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
for (int i = 0; i < 128; i++)
a[i] = ~a[i];
@@ -123,6 +153,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
for (int i = 0; i < 128; i++)
a[i] <<= 4;
@@ -138,6 +174,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -153,6 +195,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
@@ -183,6 +231,11 @@ public class Main {
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shr64() loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
static void shr64() {
// TODO: remove a[i] = a[i] altogether?
for (int i = 0; i < 128; i++)
@@ -209,6 +262,13 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shr65() loop_optimization (after)
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
static void shr65() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstant65(); // 1, since & 63
@@ -234,6 +294,13 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shrMinus254() loop_optimization (after)
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
static void shrMinus254() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstantMinus254(); // 2, since & 63
diff --git a/test/640-checker-short-simd/src/Main.java b/test/640-checker-short-simd/src/Main.java
index f62c726c05..4cca837efb 100644
--- a/test/640-checker-short-simd/src/Main.java
+++ b/test/640-checker-short-simd/src/Main.java
@@ -35,6 +35,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
for (int i = 0; i < 128; i++)
a[i] += x;
@@ -50,6 +56,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
for (int i = 0; i < 128; i++)
a[i] -= x;
@@ -65,6 +77,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
for (int i = 0; i < 128; i++)
a[i] *= x;
@@ -94,6 +112,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
for (int i = 0; i < 128; i++)
a[i] = (short) -a[i];
@@ -109,6 +133,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
for (int i = 0; i < 128; i++)
a[i] = (short) ~a[i];
@@ -124,6 +154,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
for (int i = 0; i < 128; i++)
a[i] <<= 4;
@@ -139,6 +175,12 @@ public class Main {
/// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index 5a63d9f539..9714a46630 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -39,6 +39,18 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START-MIPS64: void Main.doitByte(byte[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitByte(byte[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = (byte) Math.abs(x[i]);
@@ -77,6 +89,18 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START-MIPS64: void Main.doitShort(short[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitShort(short[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = (short) Math.abs(x[i]);
@@ -100,6 +124,18 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START-MIPS64: void Main.doitInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitInt(int[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
@@ -123,6 +159,18 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START-MIPS64: void Main.doitLong(long[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitLong(long[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
@@ -146,6 +194,18 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START-MIPS64: void Main.doitFloat(float[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitFloat(float[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
@@ -169,6 +229,18 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START-MIPS64: void Main.doitDouble(double[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
private static void doitDouble(double[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
diff --git a/test/646-checker-hadd-alt-byte/src/Main.java b/test/646-checker-hadd-alt-byte/src/Main.java
index d1b33ea0da..9cc68287c3 100644
--- a/test/646-checker-hadd-alt-byte/src/Main.java
+++ b/test/646-checker-hadd-alt-byte/src/Main.java
@@ -45,6 +45,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -71,6 +78,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -95,6 +109,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -122,6 +143,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -146,6 +174,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -171,6 +207,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/646-checker-hadd-alt-char/src/Main.java b/test/646-checker-hadd-alt-char/src/Main.java
index 1ea8d3fe07..3f81299476 100644
--- a/test/646-checker-hadd-alt-char/src/Main.java
+++ b/test/646-checker-hadd-alt-char/src/Main.java
@@ -45,6 +45,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -72,6 +79,13 @@ public class Main {
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
// Note: HAnd has no impact (already a zero extension).
//
private static void halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
@@ -98,6 +112,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -126,6 +147,13 @@ public class Main {
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
// Note: HAnd has no impact (already a zero extension).
//
private static void rounding_halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
@@ -152,6 +180,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -178,6 +214,14 @@ public class Main {
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
// Note: HAnd has no impact (already a zero extension).
//
private static void halving_add_also_unsigned_constant(char[] b1, char[] bo) {
diff --git a/test/646-checker-hadd-alt-short/src/Main.java b/test/646-checker-hadd-alt-short/src/Main.java
index 269e6183b4..150626cdd7 100644
--- a/test/646-checker-hadd-alt-short/src/Main.java
+++ b/test/646-checker-hadd-alt-short/src/Main.java
@@ -45,6 +45,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -71,6 +78,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -95,6 +109,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -122,6 +143,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -146,6 +174,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -171,6 +207,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/646-checker-hadd-byte/src/Main.java b/test/646-checker-hadd-byte/src/Main.java
index 7e29a7e60b..5a615a429e 100644
--- a/test/646-checker-hadd-byte/src/Main.java
+++ b/test/646-checker-hadd-byte/src/Main.java
@@ -42,6 +42,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -68,6 +75,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -92,6 +106,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -119,6 +140,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -143,6 +171,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -168,6 +204,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/646-checker-hadd-char/src/Main.java b/test/646-checker-hadd-char/src/Main.java
index d24608f5af..bb8a01f2cc 100644
--- a/test/646-checker-hadd-char/src/Main.java
+++ b/test/646-checker-hadd-char/src/Main.java
@@ -42,6 +42,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -69,6 +76,13 @@ public class Main {
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
// Note: HAnd has no impact (already a zero extension).
//
private static void halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
@@ -95,6 +109,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -123,6 +144,13 @@ public class Main {
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
// Note: HAnd has no impact (already a zero extension).
//
private static void rounding_halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
@@ -149,6 +177,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -175,6 +211,14 @@ public class Main {
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
// Note: HAnd has no impact (already a zero extension).
//
private static void halving_add_also_unsigned_constant(char[] b1, char[] bo) {
diff --git a/test/646-checker-hadd-short/src/Main.java b/test/646-checker-hadd-short/src/Main.java
index 4e6b4bde8b..07845a6038 100644
--- a/test/646-checker-hadd-short/src/Main.java
+++ b/test/646-checker-hadd-short/src/Main.java
@@ -42,6 +42,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -69,6 +76,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -96,6 +110,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -120,6 +141,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -144,6 +172,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -172,6 +207,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt2(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -200,6 +242,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -227,6 +276,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -252,6 +308,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -277,6 +341,14 @@ public class Main {
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
index fe4580784a..4711214c9d 100644
--- a/test/651-checker-byte-simd-minmax/src/Main.java
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -33,6 +33,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -57,6 +64,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -78,6 +92,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -102,6 +123,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
index e2998dadf6..79795ee0bd 100644
--- a/test/651-checker-char-simd-minmax/src/Main.java
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -33,6 +33,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -54,6 +61,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-double-simd-minmax/src/Main.java b/test/651-checker-double-simd-minmax/src/Main.java
index cf04f85906..23a6d54d9e 100644
--- a/test/651-checker-double-simd-minmax/src/Main.java
+++ b/test/651-checker-double-simd-minmax/src/Main.java
@@ -27,6 +27,7 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
// TODO x86: 0.0 vs -0.0?
+ // TODO MIPS64: min(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMin(double[], double[], double[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -49,6 +50,7 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
// TODO x86: 0.0 vs -0.0?
+ // TODO MIPS64: max(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMax(double[], double[], double[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
diff --git a/test/651-checker-float-simd-minmax/src/Main.java b/test/651-checker-float-simd-minmax/src/Main.java
index bd412e02e9..3959c821c4 100644
--- a/test/651-checker-float-simd-minmax/src/Main.java
+++ b/test/651-checker-float-simd-minmax/src/Main.java
@@ -27,6 +27,7 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
// TODO x86: 0.0 vs -0.0?
+ // TODO MIPS64: min(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMin(float[], float[], float[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -49,6 +50,7 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
// TODO x86: 0.0 vs -0.0?
+ // TODO MIPS64: max(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMax(float[], float[], float[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 6cee7b5484..2a97009ae9 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -32,6 +32,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -52,6 +59,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-long-simd-minmax/src/Main.java b/test/651-checker-long-simd-minmax/src/Main.java
index 51cf67ee00..6289a1e3bb 100644
--- a/test/651-checker-long-simd-minmax/src/Main.java
+++ b/test/651-checker-long-simd-minmax/src/Main.java
@@ -28,8 +28,16 @@ public class Main {
//
// Not directly supported for longs.
//
- /// CHECK-START: void Main.doitMin(long[], long[], long[]) loop_optimization (after)
+ /// CHECK-START-ARM64: void Main.doitMin(long[], long[], long[]) loop_optimization (after)
/// CHECK-NOT: VecMin
+ //
+ /// CHECK-START-MIPS64: void Main.doitMin(long[], long[], long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+
private static void doitMin(long[] x, long[] y, long[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -46,8 +54,15 @@ public class Main {
//
// Not directly supported for longs.
//
- /// CHECK-START: void Main.doitMax(long[], long[], long[]) loop_optimization (after)
+ /// CHECK-START-ARM64: void Main.doitMax(long[], long[], long[]) loop_optimization (after)
/// CHECK-NOT: VecMax
+ //
+ /// CHECK-START-MIPS64: void Main.doitMax(long[], long[], long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(long[] x, long[] y, long[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
index 09485a2d8a..3bd1305e3e 100644
--- a/test/651-checker-short-simd-minmax/src/Main.java
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -33,6 +33,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -57,6 +64,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -78,6 +92,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -102,6 +123,13 @@ public class Main {
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/654-checker-periodic/expected.txt b/test/654-checker-periodic/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/654-checker-periodic/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/654-checker-periodic/info.txt b/test/654-checker-periodic/info.txt
new file mode 100644
index 0000000000..7c8a7770ae
--- /dev/null
+++ b/test/654-checker-periodic/info.txt
@@ -0,0 +1 @@
+Periodic sequence on integer and floating-point.
diff --git a/test/654-checker-periodic/src/Main.java b/test/654-checker-periodic/src/Main.java
new file mode 100644
index 0000000000..7a0c98cfae
--- /dev/null
+++ b/test/654-checker-periodic/src/Main.java
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for last value of a few periodic sequences
+ * (found by fuzz testing).
+ */
+public class Main {
+
+ /// CHECK-START: int Main.doitUpInt(int) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.doitUpInt(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ static int doitUpInt(int n) {
+ // Complete loop is replaced by last-value.
+ int lI = 1;
+ for (int i1 = 0; i1 < n; i1++) {
+ lI = (1486662021 - lI);
+ }
+ return lI;
+ }
+
+ /// CHECK-START: int Main.doitDownInt(int) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.doitDownInt(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ static int doitDownInt(int n) {
+ // Complete loop is replaced by last-value.
+ int lI = 1;
+ for (int i1 = n - 1; i1 >= 0; i1--) {
+ lI = (1486662021 - lI);
+ }
+ return lI;
+ }
+
+ /// CHECK-START: float Main.doitUpFloat(int) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: float Main.doitUpFloat(int) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ static float doitUpFloat(int n) {
+ // FP arithmetic is not sufficiently precise.
+ // The loop remains.
+ float lF = 1.0f;
+ for (int i1 = 0; i1 < n; i1++) {
+ lF = (1486662021.0f - lF);
+ }
+ return lF;
+ }
+
+ /// CHECK-START: float Main.doitDownFloat(int) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: float Main.doitDownFloat(int) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ static float doitDownFloat(int n) {
+ // FP arithmetic is not sufficiently precise.
+ // The loop remains.
+ float lF = 1.0f;
+ for (int i1 = n - 1; i1 >= 0; i1--) {
+ lF = (1486662021.0f - lF);
+ }
+ return lF;
+ }
+
+ /// CHECK-START: float Main.doitUpFloatAlt(int) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: float Main.doitUpFloatAlt(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ static float doitUpFloatAlt(int n) {
+ // Complete loop is replaced by last-value
+ // since the values are now precise.
+ float lF = 1.0f;
+ float l2 = 1486662020.0f;
+ for (int i1 = 0; i1 < n; i1++) {
+ float old = lF;
+ lF = l2;
+ l2 = old;
+ }
+ return lF;
+ }
+
+ /// CHECK-START: float Main.doitDownFloatAlt(int) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: float Main.doitDownFloatAlt(int) loop_optimization (after)
+ /// CHECK-NOT: Phi
+ static float doitDownFloatAlt(int n) {
+ // Complete loop is replaced by last-value
+ // since the values are now precise.
+ float lF = 1.0f;
+ float l2 = 1486662020.0f;
+ for (int i1 = n - 1; i1 >= 0; i1--) {
+ float old = lF;
+ lF = l2;
+ l2 = old;
+ }
+ return lF;
+ }
+
+ // Main driver.
+ public static void main(String[] args) {
+ for (int i = 0; i < 10; i++) {
+ int ei = (i & 1) == 0 ? 1 : 1486662020;
+ int ci = doitUpInt(i);
+ expectEquals(ei, ci);
+ }
+ for (int i = 0; i < 10; i++) {
+ int ei = (i & 1) == 0 ? 1 : 1486662020;
+ int ci = doitDownInt(i);
+ expectEquals(ei, ci);
+ }
+ for (int i = 0; i < 10; i++) {
+ float ef = i == 0 ? 1.0f : ((i & 1) == 0 ? 0.0f : 1486662021.0f);
+ float cf = doitUpFloat(i);
+ expectEquals(ef, cf);
+ }
+ for (int i = 0; i < 10; i++) {
+ float ef = i == 0 ? 1.0f : ((i & 1) == 0 ? 0.0f : 1486662021.0f);
+ float cf = doitDownFloat(i);
+ expectEquals(ef, cf);
+ }
+ for (int i = 0; i < 10; i++) {
+ float ef = (i & 1) == 0 ? 1.0f : 1486662020.0f;
+ float cf = doitUpFloatAlt(i);
+ expectEquals(ef, cf);
+ }
+ for (int i = 0; i < 10; i++) {
+ float ef = (i & 1) == 0 ? 1.0f : 1486662020.0f;
+ float cf = doitDownFloatAlt(i);
+ expectEquals(ef, cf);
+ }
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
+
+
diff --git a/test/655-checker-simd-arm-opt/expected.txt b/test/655-checker-simd-arm-opt/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/655-checker-simd-arm-opt/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/655-checker-simd-arm-opt/info.txt b/test/655-checker-simd-arm-opt/info.txt
new file mode 100644
index 0000000000..198cc952b2
--- /dev/null
+++ b/test/655-checker-simd-arm-opt/info.txt
@@ -0,0 +1 @@
+Checker test for arm and arm64 simd optimizations.
diff --git a/test/655-checker-simd-arm-opt/src/Main.java b/test/655-checker-simd-arm-opt/src/Main.java
new file mode 100644
index 0000000000..7b61dd7951
--- /dev/null
+++ b/test/655-checker-simd-arm-opt/src/Main.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Checker test for arm and arm64 simd optimizations.
+ */
+public class Main {
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /// CHECK-START-ARM64: void Main.encodableConstants(byte[], short[], char[], int[], long[], float[], double[]) disassembly (after)
+ /// CHECK-DAG: <<C1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<C2:i\d+>> IntConstant 2
+ /// CHECK-DAG: <<C3:i\d+>> IntConstant 3
+ /// CHECK-DAG: <<C4:i\d+>> IntConstant 4
+ /// CHECK-DAG: <<L5:j\d+>> LongConstant 5
+ /// CHECK-DAG: <<F2:f\d+>> FloatConstant 2
+ /// CHECK-DAG: <<D20:d\d+>> DoubleConstant 20
+ //
+ /// CHECK-DAG: VecReplicateScalar [<<C1>>]
+ /// CHECK-DAG: movi v{{[0-9]+}}.16b, #0x1
+ /// CHECK-DAG: VecReplicateScalar [<<C2>>]
+ /// CHECK-DAG: movi v{{[0-9]+}}.8h, #0x2, lsl #0
+ /// CHECK-DAG: VecReplicateScalar [<<C3>>]
+ /// CHECK-DAG: movi v{{[0-9]+}}.8h, #0x3, lsl #0
+ /// CHECK-DAG: VecReplicateScalar [<<C4>>]
+ /// CHECK-DAG: movi v{{[0-9]+}}.4s, #0x4, lsl #0
+ /// CHECK-DAG: VecReplicateScalar [<<L5>>]
+ /// CHECK-DAG: dup v{{[0-9]+}}.2d, x{{[0-9]+}}
+ /// CHECK-DAG: VecReplicateScalar [<<F2>>]
+ /// CHECK-DAG: fmov v{{[0-9]+}}.4s, #0x0
+ /// CHECK-DAG: VecReplicateScalar [<<D20>>]
+ /// CHECK-DAG: fmov v{{[0-9]+}}.2d, #0x34
+ private static void encodableConstants(byte[] b, short[] s, char[] c, int[] a, long[] l, float[] f, double[] d) {
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ b[i] += 1;
+ }
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ s[i] += 2;
+ }
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ c[i] += 3;
+ }
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ a[i] += 4;
+ }
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ l[i] += 5;
+ }
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ f[i] += 2.0f;
+ }
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ d[i] += 20.0;
+ }
+ }
+
+ private static int sumArray(byte[] b, short[] s, char[] c, int[] a, long[] l, float[] f, double[] d) {
+ int sum = 0;
+ for (int i = 0; i < ARRAY_SIZE; i++) {
+ sum += b[i] + s[i] + c[i] + a[i] + l[i] + f[i] + d[i];
+ }
+ return sum;
+ }
+
+ public static final int ARRAY_SIZE = 100;
+
+ public static void main(String[] args) {
+ byte[] b = new byte[ARRAY_SIZE];
+ short[] s = new short[ARRAY_SIZE];
+ char[] c = new char[ARRAY_SIZE];
+ int[] a = new int[ARRAY_SIZE];
+ long[] l = new long[ARRAY_SIZE];
+ float[] f = new float[ARRAY_SIZE];
+ double[] d = new double[ARRAY_SIZE];
+
+ encodableConstants(b, s, c, a, l, f, d);
+ expectEquals(3700, sumArray(b, s, c, a, l, f, d));
+
+ System.out.println("passed");
+ }
+}
diff --git a/test/655-jit-clinit/expected.txt b/test/655-jit-clinit/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/655-jit-clinit/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/655-jit-clinit/info.txt b/test/655-jit-clinit/info.txt
new file mode 100644
index 0000000000..5c81d9b014
--- /dev/null
+++ b/test/655-jit-clinit/info.txt
@@ -0,0 +1,3 @@
+Regression test for the JIT compiler, which used to wait
+on a class object, meaning application code could just block
+all JIT compilations.
diff --git a/test/655-jit-clinit/src/Main.java b/test/655-jit-clinit/src/Main.java
new file mode 100644
index 0000000000..44b315478f
--- /dev/null
+++ b/test/655-jit-clinit/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ if (!hasJit()) {
+ return;
+ }
+ Foo.hotMethod();
+ }
+
+ public native static boolean isJitCompiled(Class<?> cls, String methodName);
+ private native static boolean hasJit();
+}
+
+class Foo {
+ static void hotMethod() {
+ for (int i = 0; i < array.length; ++i) {
+ array[i] = array;
+ }
+ }
+
+ static {
+ array = new Object[10000];
+ while (!Main.isJitCompiled(Foo.class, "hotMethod")) {
+ Foo.hotMethod();
+ try {
+ // Sleep to give a chance for the JIT to compile `hotMethod`.
+ Thread.sleep(100);
+ } catch (Exception e) {
+ // Ignore
+ }
+ }
+ }
+
+ static Object[] array;
+}
diff --git a/test/802-deoptimization/src/DeoptimizationController.java b/test/802-deoptimization/src/DeoptimizationController.java
index d6e662d04d..88579ded07 100644
--- a/test/802-deoptimization/src/DeoptimizationController.java
+++ b/test/802-deoptimization/src/DeoptimizationController.java
@@ -50,7 +50,7 @@ public class DeoptimizationController {
throw new IllegalStateException("Not tracing.");
}
} catch (Exception exc) {
- exc.printStackTrace(System.err);
+ exc.printStackTrace(System.out);
} finally {
if (tempFile != null) {
tempFile.delete();
@@ -65,7 +65,7 @@ public class DeoptimizationController {
throw new IllegalStateException("Still tracing.");
}
} catch (Exception exc) {
- exc.printStackTrace(System.err);
+ exc.printStackTrace(System.out);
}
}
diff --git a/test/909-attach-agent/attach.cc b/test/909-attach-agent/attach.cc
index 0150e0962f..3a6788a8e3 100644
--- a/test/909-attach-agent/attach.cc
+++ b/test/909-attach-agent/attach.cc
@@ -27,18 +27,22 @@
namespace art {
namespace Test909AttachAgent {
+static void Println(const char* c) {
+ fprintf(stdout, "%s\n", c);
+ fflush(stdout);
+}
+
jint OnAttach(JavaVM* vm,
char* options ATTRIBUTE_UNUSED,
void* reserved ATTRIBUTE_UNUSED) {
- fprintf(stderr, "Attached Agent for test 909-attach-agent\n");
- fsync(1);
+ Println("Attached Agent for test 909-attach-agent");
jvmtiEnv* env = nullptr;
jvmtiEnv* env2 = nullptr;
#define CHECK_CALL_SUCCESS(c) \
do { \
if ((c) != JNI_OK) { \
- fprintf(stderr, "call " #c " did not succeed\n"); \
+ Println("call " #c " did not succeed"); \
return -1; \
} \
} while (false)
@@ -46,7 +50,7 @@ jint OnAttach(JavaVM* vm,
CHECK_CALL_SUCCESS(vm->GetEnv(reinterpret_cast<void**>(&env), JVMTI_VERSION_1_0));
CHECK_CALL_SUCCESS(vm->GetEnv(reinterpret_cast<void**>(&env2), JVMTI_VERSION_1_0));
if (env == env2) {
- fprintf(stderr, "GetEnv returned same environment twice!\n");
+ Println("GetEnv returned same environment twice!");
return -1;
}
unsigned char* local_data = nullptr;
@@ -56,19 +60,19 @@ jint OnAttach(JavaVM* vm,
unsigned char* get_data = nullptr;
CHECK_CALL_SUCCESS(env->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&get_data)));
if (get_data != local_data) {
- fprintf(stderr, "Got different data from local storage then what was set!\n");
+ Println("Got different data from local storage then what was set!");
return -1;
}
CHECK_CALL_SUCCESS(env2->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&get_data)));
if (get_data != nullptr) {
- fprintf(stderr, "env2 did not have nullptr local storage.\n");
+ Println("env2 did not have nullptr local storage.");
return -1;
}
CHECK_CALL_SUCCESS(env->Deallocate(local_data));
jint version = 0;
CHECK_CALL_SUCCESS(env->GetVersionNumber(&version));
if ((version & JVMTI_VERSION_1) != JVMTI_VERSION_1) {
- fprintf(stderr, "Unexpected version number!\n");
+ Println("Unexpected version number!");
return -1;
}
CHECK_CALL_SUCCESS(env->DisposeEnvironment());
diff --git a/test/909-attach-agent/src/Main.java b/test/909-attach-agent/src/Main.java
index 569b89ad7d..25ebd57236 100644
--- a/test/909-attach-agent/src/Main.java
+++ b/test/909-attach-agent/src/Main.java
@@ -19,17 +19,17 @@ import java.io.IOException;
public class Main {
public static void main(String[] args) {
- System.err.println("Hello, world!");
+ System.out.println("Hello, world!");
for(String a : args) {
if(a.startsWith("agent:")) {
String agent = a.substring(6);
try {
VMDebug.attachAgent(agent);
} catch(IOException e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
}
- System.err.println("Goodbye!");
+ System.out.println("Goodbye!");
}
}
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
index 17a7a86ccc..d7b32ba102 100644
--- a/test/916-obsolete-jit/src/Main.java
+++ b/test/916-obsolete-jit/src/Main.java
@@ -132,7 +132,7 @@ public class Main {
"sayHi", Runnable.class, Consumer.class);
} catch (Exception e) {
System.out.println("Unable to find methods!");
- e.printStackTrace();
+ e.printStackTrace(System.out);
return;
}
// Makes sure the stack is the way we want it for the test and does the redefinition. It will
diff --git a/test/934-load-transform/src/Main.java b/test/934-load-transform/src/Main.java
index 1401b7df01..2d0c2978e2 100644
--- a/test/934-load-transform/src/Main.java
+++ b/test/934-load-transform/src/Main.java
@@ -86,7 +86,7 @@ class Main {
run_test.invoke(null);
} catch (Exception e) {
System.out.println(e.toString());
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
}
diff --git a/test/935-non-retransformable/src/Main.java b/test/935-non-retransformable/src/Main.java
index f240224977..5098712044 100644
--- a/test/935-non-retransformable/src/Main.java
+++ b/test/935-non-retransformable/src/Main.java
@@ -97,7 +97,7 @@ class Main {
run_test.invoke(null);
} catch (Exception e) {
System.out.println(e.toString());
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
}
diff --git a/test/938-load-transform-bcp/src-ex/TestMain.java b/test/938-load-transform-bcp/src-ex/TestMain.java
index 3757a0f778..b60fe36556 100644
--- a/test/938-load-transform-bcp/src-ex/TestMain.java
+++ b/test/938-load-transform-bcp/src-ex/TestMain.java
@@ -29,7 +29,7 @@ public class TestMain {
System.out.println(
"Exception occured (did something load OptionalLong before this test method!: "
+ e.toString());
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
}
diff --git a/test/938-load-transform-bcp/src/Main.java b/test/938-load-transform-bcp/src/Main.java
index 69658c0cec..939bdbe02b 100644
--- a/test/938-load-transform-bcp/src/Main.java
+++ b/test/938-load-transform-bcp/src/Main.java
@@ -111,7 +111,7 @@ class Main {
run_test.invoke(null);
} catch (Exception e) {
System.out.println(e.toString());
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
}
diff --git a/test/941-recurive-obsolete-jit/src/Main.java b/test/941-recurive-obsolete-jit/src/Main.java
index 89d593b7cd..e3065a7117 100644
--- a/test/941-recurive-obsolete-jit/src/Main.java
+++ b/test/941-recurive-obsolete-jit/src/Main.java
@@ -116,7 +116,7 @@ public class Main {
"sayHi", int.class, Consumer.class, Runnable.class);
} catch (Exception e) {
System.out.println("Unable to find methods!");
- e.printStackTrace();
+ e.printStackTrace(System.out);
return;
}
// Makes sure the stack is the way we want it for the test and does the redefinition. It will
diff --git a/test/943-private-recursive-jit/src/Main.java b/test/943-private-recursive-jit/src/Main.java
index 871c63674f..09337bae26 100644
--- a/test/943-private-recursive-jit/src/Main.java
+++ b/test/943-private-recursive-jit/src/Main.java
@@ -129,7 +129,7 @@ public class Main {
"privateSayHi", int.class, Consumer.class, Runnable.class);
} catch (Exception e) {
System.out.println("Unable to find methods!");
- e.printStackTrace();
+ e.printStackTrace(System.out);
return;
}
// Makes sure the stack is the way we want it for the test and does the redefinition. It will
diff --git a/test/947-reflect-method/src/art/Test947.java b/test/947-reflect-method/src/art/Test947.java
index 8cb515e492..90e0f81989 100644
--- a/test/947-reflect-method/src/art/Test947.java
+++ b/test/947-reflect-method/src/art/Test947.java
@@ -76,7 +76,7 @@ public class Test947 {
Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
say_hi_method.invoke(t);
} catch (Exception e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
}
diff --git a/test/953-invoke-polymorphic-compiler/src/Main.java b/test/953-invoke-polymorphic-compiler/src/Main.java
index 20a8fec112..ce3f4db5d4 100644
--- a/test/953-invoke-polymorphic-compiler/src/Main.java
+++ b/test/953-invoke-polymorphic-compiler/src/Main.java
@@ -70,30 +70,30 @@ public class Main {
}
public static void fail() {
- System.err.println("fail");
+ System.out.println("fail");
Thread.dumpStack();
}
public static void fail(String message) {
- System.err.println("fail: " + message);
+ System.out.println("fail: " + message);
Thread.dumpStack();
}
public static int Min2Print2(int a, int b) {
int[] values = new int[] { a, b };
- System.err.println("Running Main.Min2Print2(" + Arrays.toString(values) + ")");
+ System.out.println("Running Main.Min2Print2(" + Arrays.toString(values) + ")");
return a > b ? a : b;
}
public static int Min2Print3(int a, int b, int c) {
int[] values = new int[] { a, b, c };
- System.err.println("Running Main.Min2Print3(" + Arrays.toString(values) + ")");
+ System.out.println("Running Main.Min2Print3(" + Arrays.toString(values) + ")");
return a > b ? a : b;
}
public static int Min2Print6(int a, int b, int c, int d, int e, int f) {
int[] values = new int[] { a, b, c, d, e, f };
- System.err.println("Running Main.Min2Print6(" + Arrays.toString(values) + ")");
+ System.out.println("Running Main.Min2Print6(" + Arrays.toString(values) + ")");
return a > b ? a : b;
}
@@ -106,7 +106,7 @@ public class Main {
int y, int z) {
int[] values = new int[] { a, b, c, d, e, f, g, h, i, j, k, l, m,
n, o, p, q, r, s, t, u, v, w, x, y, z };
- System.err.println("Running Main.Min2Print26(" + Arrays.toString(values) + ")");
+ System.out.println("Running Main.Min2Print26(" + Arrays.toString(values) + ")");
return a > b ? a : b;
}
@@ -176,7 +176,7 @@ public class Main {
fail("No NPE for you");
} catch (NullPointerException npe) {}
- System.err.println("BasicTest done.");
+ System.out.println("BasicTest done.");
}
private static boolean And(boolean lhs, boolean rhs) {
@@ -248,7 +248,7 @@ public class Main {
assertEquals(true, (boolean) mh.invoke(false, true));
assertEquals(false, (boolean) mh.invoke(false, false));
- System.err.println("$opt$ReturnBooleanTest done.");
+ System.out.println("$opt$ReturnBooleanTest done.");
}
public static void $opt$ReturnCharTest() throws Throwable {
@@ -257,7 +257,7 @@ public class Main {
MethodType.methodType(char.class, char.class));
assertEquals('B', (char) mh.invokeExact('A'));
assertEquals((char) -55, (char) mh.invokeExact((char) -56));
- System.err.println("$opt$ReturnCharTest done.");
+ System.out.println("$opt$ReturnCharTest done.");
}
public static void $opt$ReturnByteTest() throws Throwable {
@@ -266,7 +266,7 @@ public class Main {
MethodType.methodType(byte.class, byte.class, byte.class));
assertEquals((byte) 30, (byte) mh.invokeExact((byte) 10, (byte) 3));
assertEquals((byte) -90, (byte) mh.invoke((byte) -10, (byte) 9));
- System.err.println("$opt$ReturnByteTest done.");
+ System.out.println("$opt$ReturnByteTest done.");
}
public static void $opt$ReturnShortTest() throws Throwable {
@@ -275,7 +275,7 @@ public class Main {
MethodType.methodType(short.class, short.class, short.class));
assertEquals((short) 3000, (short) mh.invokeExact((short) 1000, (short) 3));
assertEquals((short) -3000, (short) mh.invoke((short) -1000, (short) 3));
- System.err.println("$opt$ReturnShortTest done.");
+ System.out.println("$opt$ReturnShortTest done.");
}
public static void $opt$ReturnIntTest() throws Throwable {
@@ -284,7 +284,7 @@ public class Main {
MethodType.methodType(int.class, int.class, int.class));
assertEquals(3_000_000, (int) mh.invokeExact(1_000_000, 3));
assertEquals(-3_000_000, (int) mh.invoke(-1_000, 3_000));
- System.err.println("$opt$ReturnIntTest done.");
+ System.out.println("$opt$ReturnIntTest done.");
}
public static void $opt$ReturnLongTest() throws Throwable {
@@ -293,7 +293,7 @@ public class Main {
MethodType.methodType(long.class, long.class, long.class));
assertEquals(4_294_967_295_000L, (long) mh.invokeExact(1000L, 4_294_967_295L));
assertEquals(-4_294_967_295_000L, (long) mh.invoke(-1000L, 4_294_967_295L));
- System.err.println("$opt$ReturnLongTest done.");
+ System.out.println("$opt$ReturnLongTest done.");
}
public static void $opt$ReturnFloatTest() throws Throwable {
@@ -302,7 +302,7 @@ public class Main {
MethodType.methodType(float.class, float.class, float.class));
assertEquals(3.0F, (float) mh.invokeExact(1000.0F, 3e-3F));
assertEquals(-3.0F, (float) mh.invoke(-1000.0F, 3e-3F));
- System.err.println("$opt$ReturnFloatTest done.");
+ System.out.println("$opt$ReturnFloatTest done.");
}
public static void $opt$ReturnDoubleTest() throws Throwable {
@@ -311,7 +311,7 @@ public class Main {
MethodType.methodType(double.class, double.class, double.class));
assertEquals(3033000.0, (double) mh.invokeExact(1000.0, 3.033e3));
assertEquals(-3033000.0, (double) mh.invoke(-1000.0, 3.033e3));
- System.err.println("$opt$ReturnDoubleTest done.");
+ System.out.println("$opt$ReturnDoubleTest done.");
}
public static void $opt$ReturnStringTest() throws Throwable {
@@ -320,7 +320,7 @@ public class Main {
MethodType.methodType(String.class, String.class, int.class));
assertEquals("100010001000", (String) mh.invokeExact("1000", 3));
assertEquals("100010001000", (String) mh.invoke("1000", 3));
- System.err.println("$opt$ReturnStringTest done.");
+ System.out.println("$opt$ReturnStringTest done.");
}
public static void ReturnValuesTest() throws Throwable {
@@ -333,7 +333,7 @@ public class Main {
$opt$ReturnFloatTest();
$opt$ReturnDoubleTest();
$opt$ReturnStringTest();
- System.err.println("ReturnValuesTest done.");
+ System.out.println("ReturnValuesTest done.");
}
static class ValueHolder {
diff --git a/test/972-default-imt-collision/src/Main.java b/test/972-default-imt-collision/src/Main.java
index 6819e43ae7..043cef1147 100644
--- a/test/972-default-imt-collision/src/Main.java
+++ b/test/972-default-imt-collision/src/Main.java
@@ -24,7 +24,7 @@ public class Main {
Method test = c.getMethod("testMe", iface);
test.invoke(null, o);
} catch (Exception e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
System.out.println("FAILED: could not run testMe!");
}
}
diff --git a/test/972-iface-super-multidex/src/Main.java b/test/972-iface-super-multidex/src/Main.java
index 3fb3f45428..dea5f1db68 100644
--- a/test/972-iface-super-multidex/src/Main.java
+++ b/test/972-iface-super-multidex/src/Main.java
@@ -22,7 +22,7 @@ public class Main {
c = Class.forName("ConcreteClass");
} catch (Exception e) {
System.out.println("Could not load class");
- e.printStackTrace();
+ e.printStackTrace(System.out);
return;
}
try {
@@ -30,7 +30,7 @@ public class Main {
System.out.println((String)m.invoke(c.newInstance(), new Object[0]));
} catch (Exception e) {
System.out.println("Unknown exception occurred");
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
try {
Method m = c.getMethod("runConflict");
@@ -41,15 +41,15 @@ public class Main {
}
} catch (AbstractMethodError e) {
System.out.println("Unexpected AME caught");
- e.printStackTrace();
+ e.printStackTrace(System.out);
} catch (NoSuchMethodError e) {
System.out.println("Unexpected NSME caught");
- e.printStackTrace();
+ e.printStackTrace(System.out);
} catch (IncompatibleClassChangeError e) {
System.out.println("Expected ICCE caught");
} catch (Throwable e) {
System.out.println("Unknown exception caught!");
- e.printStackTrace();
+ e.printStackTrace(System.out);
}
}
}
diff --git a/test/973-default-multidex/src/Main.java b/test/973-default-multidex/src/Main.java
index b93265a5b8..c7dd6dc9a3 100644
--- a/test/973-default-multidex/src/Main.java
+++ b/test/973-default-multidex/src/Main.java
@@ -23,7 +23,7 @@ public class Main {
Method m = c.getMethod("callMethod");
System.out.println(m.invoke(c.newInstance(), new Object[0]));
} catch (Exception e) {
- e.printStackTrace();
+ e.printStackTrace(System.out);
System.out.println("FAILED: Could not call method");
return;
}
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
index 3ef3c7cb45..a433dc9b75 100644
--- a/test/983-source-transform-verify/source_transform.cc
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -34,7 +34,7 @@
#include "jvmti.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
#include "thread_list.h"
// Test infrastructure
diff --git a/test/988-method-trace/expected.txt b/test/988-method-trace/expected.txt
new file mode 100644
index 0000000000..d3d9249b1f
--- /dev/null
+++ b/test/988-method-trace/expected.txt
@@ -0,0 +1,276 @@
+<= public static native void art.Trace.enableMethodTracing(java.lang.Class,java.lang.reflect.Method,java.lang.reflect.Method,java.lang.Thread) -> <null: null>
+=> art.Test988$IterOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$IterOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$IterOp.applyAsInt(int)
+..=> static int art.Test988.iter_fibonacci(int)
+..<= static int art.Test988.iter_fibonacci(int) -> <class java.lang.Integer: 832040>
+.<= public int art.Test988$IterOp.applyAsInt(int) -> <class java.lang.Integer: 832040>
+.=> public art.Test988$FibResult(java.lang.String,int,int)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibResult(java.lang.String,int,int) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(30)=832040
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> art.Test988$RecurOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$RecurOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$RecurOp.applyAsInt(int)
+..=> static int art.Test988.fibonacci(int)
+...=> static int art.Test988.fibonacci(int)
+....=> static int art.Test988.fibonacci(int)
+.....=> static int art.Test988.fibonacci(int)
+......=> static int art.Test988.fibonacci(int)
+......<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+......=> static int art.Test988.fibonacci(int)
+......<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 0>
+.....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+.....=> static int art.Test988.fibonacci(int)
+.....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 2>
+....=> static int art.Test988.fibonacci(int)
+.....=> static int art.Test988.fibonacci(int)
+.....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+.....=> static int art.Test988.fibonacci(int)
+.....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 0>
+....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+...<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 3>
+...=> static int art.Test988.fibonacci(int)
+....=> static int art.Test988.fibonacci(int)
+.....=> static int art.Test988.fibonacci(int)
+.....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+.....=> static int art.Test988.fibonacci(int)
+.....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 0>
+....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+....=> static int art.Test988.fibonacci(int)
+....<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 1>
+...<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 2>
+..<= static int art.Test988.fibonacci(int) -> <class java.lang.Integer: 5>
+.<= public int art.Test988$RecurOp.applyAsInt(int) -> <class java.lang.Integer: 5>
+.=> public art.Test988$FibResult(java.lang.String,int,int)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibResult(java.lang.String,int,int) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(5)=5
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> art.Test988$IterOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$IterOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$IterOp.applyAsInt(int)
+..=> static int art.Test988.iter_fibonacci(int)
+...=> public java.lang.StringBuilder()
+....=> java.lang.AbstractStringBuilder(int)
+.....=> public java.lang.Object()
+.....<= public java.lang.Object() -> <null: null>
+....<= java.lang.AbstractStringBuilder(int) -> <null: null>
+...<= public java.lang.StringBuilder() -> <null: null>
+...=> public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String)
+....=> public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String)
+.....=> public int java.lang.String.length()
+.....<= public int java.lang.String.length() -> <class java.lang.Integer: 14>
+.....=> private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int)
+.....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
+.....=> public void java.lang.String.getChars(int,int,char[],int)
+......=> public int java.lang.String.length()
+......<= public int java.lang.String.length() -> <class java.lang.Integer: 14>
+......=> native void java.lang.String.getCharsNoCheck(int,int,char[],int)
+......<= native void java.lang.String.getCharsNoCheck(int,int,char[],int) -> <null: null>
+.....<= public void java.lang.String.getChars(int,int,char[],int) -> <null: null>
+....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...<= public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...=> public java.lang.StringBuilder java.lang.StringBuilder.append(int)
+....=> public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(int)
+.....=> static int java.lang.Integer.stringSize(int)
+.....<= static int java.lang.Integer.stringSize(int) -> <class java.lang.Integer: 2>
+.....=> private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int)
+......=> private int java.lang.AbstractStringBuilder.newCapacity(int)
+......<= private int java.lang.AbstractStringBuilder.newCapacity(int) -> <class java.lang.Integer: 34>
+......=> public static char[] java.util.Arrays.copyOf(char[],int)
+.......=> public static int java.lang.Math.min(int,int)
+.......<= public static int java.lang.Math.min(int,int) -> <class java.lang.Integer: 16>
+.......=> public static void java.lang.System.arraycopy(char[],int,char[],int,int)
+.......<= public static void java.lang.System.arraycopy(char[],int,char[],int,int) -> <null: null>
+......<= public static char[] java.util.Arrays.copyOf(char[],int) -> <class [C: [B, a, d, , a, r, g, u, m, e, n, t, :, , -, 1, 9, , <, , 0, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>]>
+.....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
+.....=> static void java.lang.Integer.getChars(int,int,char[])
+.....<= static void java.lang.Integer.getChars(int,int,char[]) -> <null: null>
+....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(int) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...<= public java.lang.StringBuilder java.lang.StringBuilder.append(int) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...=> public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String)
+....=> public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String)
+.....=> public int java.lang.String.length()
+.....<= public int java.lang.String.length() -> <class java.lang.Integer: 4>
+.....=> private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int)
+.....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
+.....=> public void java.lang.String.getChars(int,int,char[],int)
+......=> public int java.lang.String.length()
+......<= public int java.lang.String.length() -> <class java.lang.Integer: 4>
+......=> native void java.lang.String.getCharsNoCheck(int,int,char[],int)
+......<= native void java.lang.String.getCharsNoCheck(int,int,char[],int) -> <null: null>
+.....<= public void java.lang.String.getChars(int,int,char[],int) -> <null: null>
+....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...<= public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...=> public java.lang.String java.lang.StringBuilder.toString()
+....=> static native java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[])
+....<= static native java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[]) -> <class java.lang.String: Bad argument: -19 < 0>
+...<= public java.lang.String java.lang.StringBuilder.toString() -> <class java.lang.String: Bad argument: -19 < 0>
+...=> public java.lang.Error(java.lang.String)
+....=> public java.lang.Throwable(java.lang.String)
+.....=> public java.lang.Object()
+.....<= public java.lang.Object() -> <null: null>
+.....=> public static final java.util.List java.util.Collections.emptyList()
+.....<= public static final java.util.List java.util.Collections.emptyList() -> <class java.util.Collections$EmptyList: []>
+.....=> public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace()
+......=> private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
+......<= private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
+.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
+ at art.Test988.iter_fibonacci(Test988.java:207)
+ at art.Test988$IterOp.applyAsInt(Test988.java:202)
+ at art.Test988.doFibTest(Test988.java:295)
+ at art.Test988.run(Test988.java:265)
+ at Main.main(Main.java:19)
+>
+....<= public java.lang.Throwable(java.lang.String) -> <null: null>
+...<= public java.lang.Error(java.lang.String) -> <null: null>
+..<= static int art.Test988.iter_fibonacci(int) EXCEPTION
+.<= public int art.Test988$IterOp.applyAsInt(int) EXCEPTION
+.=> public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
+ at art.Test988.iter_fibonacci(Test988.java:207)
+ at art.Test988$IterOp.applyAsInt(Test988.java:202)
+ at art.Test988.doFibTest(Test988.java:295)
+ at art.Test988.run(Test988.java:265)
+ at Main.main(Main.java:19)
+
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> art.Test988$RecurOp()
+.=> public java.lang.Object()
+.<= public java.lang.Object() -> <null: null>
+<= art.Test988$RecurOp() -> <null: null>
+=> public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator)
+.=> public int art.Test988$RecurOp.applyAsInt(int)
+..=> static int art.Test988.fibonacci(int)
+...=> public java.lang.StringBuilder()
+....=> java.lang.AbstractStringBuilder(int)
+.....=> public java.lang.Object()
+.....<= public java.lang.Object() -> <null: null>
+....<= java.lang.AbstractStringBuilder(int) -> <null: null>
+...<= public java.lang.StringBuilder() -> <null: null>
+...=> public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String)
+....=> public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String)
+.....=> public int java.lang.String.length()
+.....<= public int java.lang.String.length() -> <class java.lang.Integer: 14>
+.....=> private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int)
+.....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
+.....=> public void java.lang.String.getChars(int,int,char[],int)
+......=> public int java.lang.String.length()
+......<= public int java.lang.String.length() -> <class java.lang.Integer: 14>
+......=> native void java.lang.String.getCharsNoCheck(int,int,char[],int)
+......<= native void java.lang.String.getCharsNoCheck(int,int,char[],int) -> <null: null>
+.....<= public void java.lang.String.getChars(int,int,char[],int) -> <null: null>
+....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...<= public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...=> public java.lang.StringBuilder java.lang.StringBuilder.append(int)
+....=> public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(int)
+.....=> static int java.lang.Integer.stringSize(int)
+.....<= static int java.lang.Integer.stringSize(int) -> <class java.lang.Integer: 2>
+.....=> private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int)
+......=> private int java.lang.AbstractStringBuilder.newCapacity(int)
+......<= private int java.lang.AbstractStringBuilder.newCapacity(int) -> <class java.lang.Integer: 34>
+......=> public static char[] java.util.Arrays.copyOf(char[],int)
+.......=> public static int java.lang.Math.min(int,int)
+.......<= public static int java.lang.Math.min(int,int) -> <class java.lang.Integer: 16>
+.......=> public static void java.lang.System.arraycopy(char[],int,char[],int,int)
+.......<= public static void java.lang.System.arraycopy(char[],int,char[],int,int) -> <null: null>
+......<= public static char[] java.util.Arrays.copyOf(char[],int) -> <class [C: [B, a, d, , a, r, g, u, m, e, n, t, :, , -, 1, 9, , <, , 0, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>]>
+.....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
+.....=> static void java.lang.Integer.getChars(int,int,char[])
+.....<= static void java.lang.Integer.getChars(int,int,char[]) -> <null: null>
+....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(int) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...<= public java.lang.StringBuilder java.lang.StringBuilder.append(int) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...=> public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String)
+....=> public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String)
+.....=> public int java.lang.String.length()
+.....<= public int java.lang.String.length() -> <class java.lang.Integer: 4>
+.....=> private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int)
+.....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
+.....=> public void java.lang.String.getChars(int,int,char[],int)
+......=> public int java.lang.String.length()
+......<= public int java.lang.String.length() -> <class java.lang.Integer: 4>
+......=> native void java.lang.String.getCharsNoCheck(int,int,char[],int)
+......<= native void java.lang.String.getCharsNoCheck(int,int,char[],int) -> <null: null>
+.....<= public void java.lang.String.getChars(int,int,char[],int) -> <null: null>
+....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...<= public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
+...=> public java.lang.String java.lang.StringBuilder.toString()
+....=> static native java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[])
+....<= static native java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[]) -> <class java.lang.String: Bad argument: -19 < 0>
+...<= public java.lang.String java.lang.StringBuilder.toString() -> <class java.lang.String: Bad argument: -19 < 0>
+...=> public java.lang.Error(java.lang.String)
+....=> public java.lang.Throwable(java.lang.String)
+.....=> public java.lang.Object()
+.....<= public java.lang.Object() -> <null: null>
+.....=> public static final java.util.List java.util.Collections.emptyList()
+.....<= public static final java.util.List java.util.Collections.emptyList() -> <class java.util.Collections$EmptyList: []>
+.....=> public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace()
+......=> private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace()
+......<= private static native java.lang.Object java.lang.Throwable.nativeFillInStackTrace() -> <class [Ljava.lang.Object;: <non-deterministic>>
+.....<= public synchronized java.lang.Throwable java.lang.Throwable.fillInStackTrace() -> <class java.lang.Error: java.lang.Error: Bad argument: -19 < 0
+ at art.Test988.fibonacci(Test988.java:229)
+ at art.Test988$RecurOp.applyAsInt(Test988.java:224)
+ at art.Test988.doFibTest(Test988.java:295)
+ at art.Test988.run(Test988.java:266)
+ at Main.main(Main.java:19)
+>
+....<= public java.lang.Throwable(java.lang.String) -> <null: null>
+...<= public java.lang.Error(java.lang.String) -> <null: null>
+..<= static int art.Test988.fibonacci(int) EXCEPTION
+.<= public int art.Test988$RecurOp.applyAsInt(int) EXCEPTION
+.=> public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable)
+..=> public java.lang.Object()
+..<= public java.lang.Object() -> <null: null>
+.<= public art.Test988$FibThrow(java.lang.String,int,java.lang.Throwable) -> <null: null>
+.=> public boolean java.util.ArrayList.add(java.lang.Object)
+..=> private void java.util.ArrayList.ensureCapacityInternal(int)
+...=> private void java.util.ArrayList.ensureExplicitCapacity(int)
+...<= private void java.util.ArrayList.ensureExplicitCapacity(int) -> <null: null>
+..<= private void java.util.ArrayList.ensureCapacityInternal(int) -> <null: null>
+fibonacci(-19) -> java.lang.Error: Bad argument: -19 < 0
+ at art.Test988.fibonacci(Test988.java:229)
+ at art.Test988$RecurOp.applyAsInt(Test988.java:224)
+ at art.Test988.doFibTest(Test988.java:295)
+ at art.Test988.run(Test988.java:266)
+ at Main.main(Main.java:19)
+
+.<= public boolean java.util.ArrayList.add(java.lang.Object) -> <class java.lang.Boolean: true>
+<= public static void art.Test988.doFibTest(int,java.util.function.IntUnaryOperator) -> <null: null>
+=> public static native java.lang.Thread java.lang.Thread.currentThread()
+<= public static native java.lang.Thread java.lang.Thread.currentThread() -> <class java.lang.Thread: <non-deterministic>>
+=> public static native void art.Trace.disableMethodTracing(java.lang.Thread)
diff --git a/test/988-method-trace/info.txt b/test/988-method-trace/info.txt
new file mode 100644
index 0000000000..f0a200dc18
--- /dev/null
+++ b/test/988-method-trace/info.txt
@@ -0,0 +1,15 @@
+Tests method tracing in JVMTI
+
+This test is sensitive to the internal implementations of:
+ * java.lang.Error
+ * java.lang.Integer
+ * java.lang.Math
+ * java.lang.String
+ * java.lang.System
+ * java.util.ArrayList
+ * java.util.Arrays
+ * java.util.StringBuilder
+ * all super-classes and super-interfaces of the above types.
+
+Changes to the internal implementation of these classes might (or might not)
+change the output of this test.
diff --git a/test/988-method-trace/run b/test/988-method-trace/run
new file mode 100755
index 0000000000..51875a7e86
--- /dev/null
+++ b/test/988-method-trace/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/988-method-trace/src/Main.java b/test/988-method-trace/src/Main.java
new file mode 100644
index 0000000000..9dd1142bb6
--- /dev/null
+++ b/test/988-method-trace/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test988.run();
+ }
+}
diff --git a/test/988-method-trace/src/art/Test988.java b/test/988-method-trace/src/art/Test988.java
new file mode 100644
index 0000000000..37ff136b6c
--- /dev/null
+++ b/test/988-method-trace/src/art/Test988.java
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Arrays;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.Set;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.function.IntUnaryOperator;
+import java.util.function.Function;
+
+public class Test988 {
+
+ // Methods with non-deterministic output that should not be printed.
+ static Set<Method> NON_DETERMINISTIC_OUTPUT_METHODS = new HashSet<>();
+
+ static {
+ try {
+ NON_DETERMINISTIC_OUTPUT_METHODS.add(
+ Throwable.class.getDeclaredMethod("nativeFillInStackTrace"));
+ } catch (Exception e) {}
+ try {
+ NON_DETERMINISTIC_OUTPUT_METHODS.add(Thread.class.getDeclaredMethod("currentThread"));
+ } catch (Exception e) {}
+ }
+
+ static interface Printable {
+ public void Print();
+ }
+
+ static final class MethodEntry implements Printable {
+ private Object m;
+ private int cnt;
+ public MethodEntry(Object m, int cnt) {
+ this.m = m;
+ this.cnt = cnt;
+ }
+ @Override
+ public void Print() {
+ System.out.println(whitespace(cnt) + "=> " + m);
+ }
+ }
+
+ private static String genericToString(Object val) {
+ if (val == null) {
+ return "null";
+ } else if (val.getClass().isArray()) {
+ return arrayToString(val);
+ } else if (val instanceof Throwable) {
+ StringWriter w = new StringWriter();
+ ((Throwable) val).printStackTrace(new PrintWriter(w));
+ return w.toString();
+ } else {
+ return val.toString();
+ }
+ }
+
+ private static String charArrayToString(char[] src) {
+ String[] res = new String[src.length];
+ for (int i = 0; i < src.length; i++) {
+ if (Character.isISOControl(src[i])) {
+ res[i] = Character.getName(src[i]);
+ } else {
+ res[i] = Character.toString(src[i]);
+ }
+ }
+ return Arrays.toString(res);
+ }
+
+ private static String arrayToString(Object val) {
+ Class<?> klass = val.getClass();
+ if ((new Object[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString(
+ Arrays.stream((Object[])val).map(new Function<Object, String>() {
+ public String apply(Object o) {
+ return Test988.genericToString(o);
+ }
+ }).toArray());
+ } else if ((new byte[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((byte[])val);
+ } else if ((new char[0]).getClass().isAssignableFrom(klass)) {
+ return charArrayToString((char[])val);
+ } else if ((new short[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((short[])val);
+ } else if ((new int[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((int[])val);
+ } else if ((new long[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((long[])val);
+ } else if ((new float[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((float[])val);
+ } else if ((new double[0]).getClass().isAssignableFrom(klass)) {
+ return Arrays.toString((double[])val);
+ } else {
+ throw new Error("Unknown type " + klass);
+ }
+ }
+
+ static final class MethodReturn implements Printable {
+ private Object m;
+ private Object val;
+ private int cnt;
+ public MethodReturn(Object m, Object val, int cnt) {
+ this.m = m;
+ this.val = val;
+ this.cnt = cnt;
+ }
+ @Override
+ public void Print() {
+ String print;
+ if (NON_DETERMINISTIC_OUTPUT_METHODS.contains(m)) {
+ print = "<non-deterministic>";
+ } else {
+ print = genericToString(val);
+ }
+ Class<?> klass = null;
+ if (val != null) {
+ klass = val.getClass();
+ }
+ System.out.println(
+ whitespace(cnt) + "<= " + m + " -> <" + klass + ": " + print + ">");
+ }
+ }
+
+ static final class MethodThrownThrough implements Printable {
+ private Object m;
+ private int cnt;
+ public MethodThrownThrough(Object m, int cnt) {
+ this.m = m;
+ this.cnt = cnt;
+ }
+ @Override
+ public void Print() {
+ System.out.println(whitespace(cnt) + "<= " + m + " EXCEPTION");
+ }
+ }
+
+ private static String whitespace(int n) {
+ String out = "";
+ while (n > 0) {
+ n--;
+ out += ".";
+ }
+ return out;
+ }
+
+ static final class FibThrow implements Printable {
+ private String format;
+ private int arg;
+ private Throwable res;
+ public FibThrow(String format, int arg, Throwable res) {
+ this.format = format;
+ this.arg = arg;
+ this.res = res;
+ }
+
+ @Override
+ public void Print() {
+ System.out.printf(format, arg, genericToString(res));
+ }
+ }
+
+ static final class FibResult implements Printable {
+ private String format;
+ private int arg;
+ private int res;
+ public FibResult(String format, int arg, int res) {
+ this.format = format;
+ this.arg = arg;
+ this.res = res;
+ }
+
+ @Override
+ public void Print() {
+ System.out.printf(format, arg, res);
+ }
+ }
+
+ private static List<Printable> results = new ArrayList<>();
+ private static int cnt = 1;
+
+ // Iterative version
+ static final class IterOp implements IntUnaryOperator {
+ public int applyAsInt(int x) {
+ return iter_fibonacci(x);
+ }
+ }
+ static int iter_fibonacci(int n) {
+ if (n < 0) {
+ throw new Error("Bad argument: " + n + " < 0");
+ } else if (n == 0) {
+ return 0;
+ }
+ int x = 1;
+ int y = 1;
+ for (int i = 3; i <= n; i++) {
+ int z = x + y;
+ x = y;
+ y = z;
+ }
+ return y;
+ }
+
+ // Recursive version
+ static final class RecurOp implements IntUnaryOperator {
+ public int applyAsInt(int x) {
+ return fibonacci(x);
+ }
+ }
+ static int fibonacci(int n) {
+ if (n < 0) {
+ throw new Error("Bad argument: " + n + " < 0");
+ } else if ((n == 0) || (n == 1)) {
+ return n;
+ } else {
+ return fibonacci(n - 1) + (fibonacci(n - 2));
+ }
+ }
+
+ public static void notifyMethodEntry(Object m) {
+ // Called by native code when a method is entered. This method is ignored by the native
+ // entry and exit hooks.
+ results.add(new MethodEntry(m, cnt));
+ cnt++;
+ }
+
+ public static void notifyMethodExit(Object m, boolean exception, Object result) {
+ cnt--;
+ if (exception) {
+ results.add(new MethodThrownThrough(m, cnt));
+ } else {
+ results.add(new MethodReturn(m, result, cnt));
+ }
+ }
+
+ public static void run() throws Exception {
+ // call this here so it is linked. It doesn't actually do anything here.
+ loadAllClasses();
+ Trace.disableMethodTracing(Thread.currentThread());
+ Trace.enableMethodTracing(
+ Test988.class,
+ Test988.class.getDeclaredMethod("notifyMethodEntry", Object.class),
+ Test988.class.getDeclaredMethod(
+ "notifyMethodExit", Object.class, Boolean.TYPE, Object.class),
+ Thread.currentThread());
+ doFibTest(30, new IterOp());
+ doFibTest(5, new RecurOp());
+ doFibTest(-19, new IterOp());
+ doFibTest(-19, new RecurOp());
+ // Turn off method tracing so we don't have to deal with print internals.
+ Trace.disableMethodTracing(Thread.currentThread());
+ printResults();
+ }
+
+ // This ensures that all classes we touch are loaded before we start recording traces. This
+ // eliminates a major source of divergence between the RI and ART.
+ public static void loadAllClasses() {
+ MethodThrownThrough.class.toString();
+ MethodEntry.class.toString();
+ MethodReturn.class.toString();
+ FibResult.class.toString();
+ FibThrow.class.toString();
+ Printable.class.toString();
+ ArrayList.class.toString();
+ RecurOp.class.toString();
+ IterOp.class.toString();
+ StringBuilder.class.toString();
+ }
+
+ public static void printResults() {
+ for (Printable p : results) {
+ p.Print();
+ }
+ }
+
+ public static void doFibTest(int x, IntUnaryOperator op) {
+ try {
+ int y = op.applyAsInt(x);
+ results.add(new FibResult("fibonacci(%d)=%d\n", x, y));
+ } catch (Throwable t) {
+ results.add(new FibThrow("fibonacci(%d) -> %s\n", x, t));
+ }
+ }
+}
diff --git a/test/988-method-trace/src/art/Trace.java b/test/988-method-trace/src/art/Trace.java
new file mode 100644
index 0000000000..3370996df3
--- /dev/null
+++ b/test/988-method-trace/src/art/Trace.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Method;
+
+public class Trace {
+ public static native void enableMethodTracing(
+ Class<?> methodClass, Method entryMethod, Method exitMethod, Thread thr);
+ public static native void disableMethodTracing(Thread thr);
+}
diff --git a/test/988-redefine-use-after-free/expected.txt b/test/988-redefine-use-after-free/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/988-redefine-use-after-free/expected.txt
diff --git a/test/988-redefine-use-after-free/info.txt b/test/988-redefine-use-after-free/info.txt
new file mode 100644
index 0000000000..2b683dd75e
--- /dev/null
+++ b/test/988-redefine-use-after-free/info.txt
@@ -0,0 +1,13 @@
+Regression test for b/62237378
+
+It was possible for the JVMTI class redefinition to encounter a use-after-free
+bug if there had been an attempted redefinition that failed due to a
+verification error in the same class loader. Actually encountering the bug
+required that a later redefinition happen to get the same native pointer for its
+dex-file as the failed redefinition.
+
+Hitting this use-after-free can cause many strange outcomes, from CHECK failures
+to segfaults to incorrect redefinition failures (for example on buggy builds
+this test will fail a DCHECK on debug builds, segfault on x86_64 hosts and have
+redefinition of LDexCacheSmash$Transform; erroneously fail with
+JVMTI_ERROR_FAILS_VERIFICATION on 32 bit hosts).
diff --git a/test/988-redefine-use-after-free/run b/test/988-redefine-use-after-free/run
new file mode 100755
index 0000000000..c6e62ae6cd
--- /dev/null
+++ b/test/988-redefine-use-after-free/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/988-redefine-use-after-free/src-ex/DexCacheSmash.java b/test/988-redefine-use-after-free/src-ex/DexCacheSmash.java
new file mode 100644
index 0000000000..2193a631cd
--- /dev/null
+++ b/test/988-redefine-use-after-free/src-ex/DexCacheSmash.java
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.Redefinition;
+import java.util.Base64;
+
+public class DexCacheSmash {
+ static class Transform {
+ public void foo() {}
+ public void bar() {}
+ public String getId() {
+ return "TRANSFORM_INITIAL";
+ }
+ }
+
+ static class Transform2 {
+ public String getId() {
+ return "TRANSFORM2_INITIAL";
+ }
+ }
+
+ /**
+ * A base64 encoding of the dex/class file of the Transform class above.
+ */
+ static final Redefinition.CommonClassDefinition TRANSFORM_INITIAL =
+ new Redefinition.CommonClassDefinition(Transform.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAFwoABAAPCAAQBwASBwAVAQAGPGluaXQ+AQADKClWAQAEQ29kZQEAD0xpbmVOdW1i" +
+ "ZXJUYWJsZQEAA2ZvbwEAA2JhcgEABWdldElkAQAUKClMamF2YS9sYW5nL1N0cmluZzsBAApTb3Vy" +
+ "Y2VGaWxlAQASRGV4Q2FjaGVTbWFzaC5qYXZhDAAFAAYBABFUUkFOU0ZPUk1fSU5JVElBTAcAFgEA" +
+ "F0RleENhY2hlU21hc2gkVHJhbnNmb3JtAQAJVHJhbnNmb3JtAQAMSW5uZXJDbGFzc2VzAQAQamF2" +
+ "YS9sYW5nL09iamVjdAEADURleENhY2hlU21hc2gAIAADAAQAAAAAAAQAAAAFAAYAAQAHAAAAHQAB" +
+ "AAEAAAAFKrcAAbEAAAABAAgAAAAGAAEAAAATAAEACQAGAAEABwAAABkAAAABAAAAAbEAAAABAAgA" +
+ "AAAGAAEAAAAUAAEACgAGAAEABwAAABkAAAABAAAAAbEAAAABAAgAAAAGAAEAAAAVAAEACwAMAAEA" +
+ "BwAAABsAAQABAAAAAxICsAAAAAEACAAAAAYAAQAAABcAAgANAAAAAgAOABQAAAAKAAEAAwARABMA" +
+ "CA=="),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQDhg9CfghG1SRlLClguRuFYsqihr4F7NsGQAwAAcAAAAHhWNBIAAAAAAAAAAOQCAAAS" +
+ "AAAAcAAAAAcAAAC4AAAAAgAAANQAAAAAAAAAAAAAAAUAAADsAAAAAQAAABQBAABcAgAANAEAAKgB" +
+ "AACwAQAAxAEAAMcBAADiAQAA8wEAABcCAAA3AgAASwIAAF8CAAByAgAAfQIAAIACAACNAgAAkgIA" +
+ "AJcCAACeAgAApAIAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAsAAAACAAAABQAAAAAAAAALAAAA" +
+ "BgAAAAAAAAAAAAEAAAAAAAAAAQANAAAAAAABAA4AAAAAAAAADwAAAAQAAQAAAAAAAAAAAAAAAAAE" +
+ "AAAAAAAAAAEAAACYAQAAzgIAAAAAAAACAAAAvwIAAMUCAAABAAEAAQAAAKsCAAAEAAAAcBAEAAAA" +
+ "DgABAAEAAAAAALACAAABAAAADgAAAAEAAQAAAAAAtQIAAAEAAAAOAAAAAgABAAAAAAC6AgAAAwAA" +
+ "ABoACQARAAAANAEAAAAAAAAAAAAAAAAAAAY8aW5pdD4AEkRleENhY2hlU21hc2guamF2YQABTAAZ" +
+ "TERleENhY2hlU21hc2gkVHJhbnNmb3JtOwAPTERleENhY2hlU21hc2g7ACJMZGFsdmlrL2Fubm90" +
+ "YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJDbGFzczsAEkxq" +
+ "YXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABFUUkFOU0ZPUk1fSU5JVElBTAAJ" +
+ "VHJhbnNmb3JtAAFWAAthY2Nlc3NGbGFncwADYmFyAANmb28ABWdldElkAARuYW1lAAV2YWx1ZQAT" +
+ "AAcOABUABw4AFAAHDgAXAAcOAAICAREYAQIDAgwECBAXCgAAAQMAgIAEwAIBAdgCAQHsAgEBgAMO" +
+ "AAAAAAAAAAEAAAAAAAAAAQAAABIAAABwAAAAAgAAAAcAAAC4AAAAAwAAAAIAAADUAAAABQAAAAUA" +
+ "AADsAAAABgAAAAEAAAAUAQAAAxAAAAEAAAA0AQAAASAAAAQAAABAAQAABiAAAAEAAACYAQAAAiAA" +
+ "ABIAAACoAQAAAyAAAAQAAACrAgAABCAAAAIAAAC/AgAAACAAAAEAAADOAgAAABAAAAEAAADkAgAA"));
+
+ /**
+ * A base64 encoding of the following (invalid) class.
+ *
+ * .class LDexCacheSmash$Transform2;
+ * .super Ljava/lang/Object;
+ * .source "DexCacheSmash.java"
+ *
+ * # annotations
+ * .annotation system Ldalvik/annotation/EnclosingClass;
+ * value = LDexCacheSmash;
+ * .end annotation
+ *
+ * .annotation system Ldalvik/annotation/InnerClass;
+ * accessFlags = 0x8
+ * name = "Transform2"
+ * .end annotation
+ *
+ *
+ * # direct methods
+ * .method constructor <init>()V
+ * .registers 1
+ *
+ * .prologue
+ * .line 26
+ * invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ *
+ * return-void
+ * .end method
+ *
+ *
+ * # virtual methods
+ * .method public getId()Ljava/lang/String;
+ * .registers 2
+ *
+ * .prologue
+ * .line 28
+ * # NB Fails verification due to this function not returning a String.
+ * return-void
+ * .end method
+ */
+ static final Redefinition.CommonClassDefinition TRANSFORM2_INVALID =
+ new Redefinition.CommonClassDefinition(Transform2.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAEwcAEgcAEQEABjxpbml0PgEAAygpVgEABENvZGUKAAIAEAEAD0xpbmVOdW1iZXJU" +
+ "YWJsZQEABWdldElkAQAUKClMamF2YS9sYW5nL1N0cmluZzsBAApTb3VyY2VGaWxlAQASRGV4Q2Fj" +
+ "aGVTbWFzaC5qYXZhAQAMSW5uZXJDbGFzc2VzBwAPAQAKVHJhbnNmb3JtMgEADURleENhY2hlU21h" +
+ "c2gMAAMABAEAEGphdmEvbGFuZy9PYmplY3QBABhEZXhDYWNoZVNtYXNoJFRyYW5zZm9ybTIAIAAB" +
+ "AAIAAAAAAAIAAAADAAQAAQAFAAAAHQABAAEAAAAFKrcABrEAAAABAAcAAAAGAAEAAAAaAAEACAAJ" +
+ "AAEABQAAABkAAQABAAAAAbEAAAABAAcAAAAGAAEAAAAcAAIACgAAAAIACwAMAAAACgABAAEADQAO" +
+ "AAg="),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQCFcegr6Ns+I7iEF4uLRkUX4yGrLhP6soEgAwAAcAAAAHhWNBIAAAAAAAAAAHQCAAAP" +
+ "AAAAcAAAAAcAAACsAAAAAgAAAMgAAAAAAAAAAAAAAAMAAADgAAAAAQAAAPgAAAAIAgAAGAEAABgB" +
+ "AAAgAQAANAEAADcBAABTAQAAZAEAAIgBAACoAQAAvAEAANABAADcAQAA3wEAAOwBAADzAQAA+QEA" +
+ "AAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAoAAAACAAAABQAAAAAAAAAKAAAABgAAAAAAAAAAAAEA" +
+ "AAAAAAAAAAAMAAAABAABAAAAAAAAAAAAAAAAAAQAAAAAAAAAAQAAACACAABmAgAAAAAAAAY8aW5p" +
+ "dD4AEkRleENhY2hlU21hc2guamF2YQABTAAaTERleENhY2hlU21hc2gkVHJhbnNmb3JtMjsAD0xE" +
+ "ZXhDYWNoZVNtYXNoOwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZp" +
+ "ay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcv" +
+ "U3RyaW5nOwAKVHJhbnNmb3JtMgABVgALYWNjZXNzRmxhZ3MABWdldElkAARuYW1lAAV2YWx1ZQAC" +
+ "AwILBAgNFwkCAgEOGAEAAAAAAAIAAAAJAgAAAAIAABQCAAAAAAAAAAAAAAAAAAAaAAcOABwABw4A" +
+ "AAABAAEAAQAAADACAAAEAAAAcBACAAAADgACAAEAAAAAADUCAAABAAAADgAAAAEBAICABLwEAQHU" +
+ "BA4AAAAAAAAAAQAAAAAAAAABAAAADwAAAHAAAAACAAAABwAAAKwAAAADAAAAAgAAAMgAAAAFAAAA" +
+ "AwAAAOAAAAAGAAAAAQAAAPgAAAACIAAADwAAABgBAAAEIAAAAgAAAAACAAADEAAAAgAAABACAAAG" +
+ "IAAAAQAAACACAAADIAAAAgAAADACAAABIAAAAgAAADwCAAAAIAAAAQAAAGYCAAAAEAAAAQAAAHQC" +
+ "AAA="));
+
+ public static void run() throws Exception {
+ try {
+ Redefinition.doMultiClassRedefinition(TRANSFORM2_INVALID);
+ } catch (Exception e) {
+ if (!e.getMessage().endsWith("JVMTI_ERROR_FAILS_VERIFICATION")) {
+ throw new Error(
+ "Unexpected error: Expected failure due to JVMTI_ERROR_FAILS_VERIFICATION", e);
+ }
+ }
+ // Doing this redefinition after a redefinition that failed due to FAILS_VERIFICATION could
+ // cause a use-after-free of the Transform2's DexCache by the redefinition code if it happens
+ // that the native pointer of the art::DexFile created for the Transform redefinition aliases
+ // the one created for Transform2's failed redefinition.
+ //
+ // Due to the order of checks performed by the redefinition code FAILS_VERIFICATION is the only
+ // failure mode that can cause Use-after-frees in this way.
+ //
+ // This should never throw any exceptions (except perhaps OOME in very strange circumstances).
+ Redefinition.doMultiClassRedefinition(TRANSFORM_INITIAL);
+ }
+}
diff --git a/test/988-redefine-use-after-free/src-ex/art/Redefinition.java b/test/988-redefine-use-after-free/src-ex/art/Redefinition.java
new file mode 100644
index 0000000000..56d2938a01
--- /dev/null
+++ b/test/988-redefine-use-after-free/src-ex/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/988-redefine-use-after-free/src/Main.java b/test/988-redefine-use-after-free/src/Main.java
new file mode 100644
index 0000000000..d88c471a07
--- /dev/null
+++ b/test/988-redefine-use-after-free/src/Main.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+
+public class Main {
+ public static final String TEST_NAME = "988-redefine-use-after-free";
+ public static final int REPS = 1000;
+ public static final int STEP = 100;
+
+ public static void main(String[] args) throws Exception {
+ for (int i = 0; i < REPS; i += STEP) {
+ runSeveralTimes(STEP);
+ }
+ }
+
+ public static ClassLoader getClassLoaderFor(String location) throws Exception {
+ try {
+ Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+ Constructor<?> ctor = class_loader_class.getConstructor(String.class, ClassLoader.class);
+ return (ClassLoader)ctor.newInstance(location + "/" + TEST_NAME + "-ex.jar",
+ Main.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ // Running on RI. Use URLClassLoader.
+ return new java.net.URLClassLoader(
+ new java.net.URL[] { new java.net.URL("file://" + location + "/classes-ex/") });
+ }
+ }
+
+ // Run the redefinition several times on a single class-loader to try to trigger the
+ // Use-after-free bug b/62237378
+ public static void runSeveralTimes(int times) throws Exception {
+ ClassLoader c = getClassLoaderFor(System.getenv("DEX_LOCATION"));
+
+ Class<?> klass = (Class<?>)c.loadClass("DexCacheSmash");
+ Method m = klass.getDeclaredMethod("run");
+ for (int i = 0 ; i < times; i++) {
+ m.invoke(null);
+ }
+ }
+}
diff --git a/test/989-method-trace-throw/expected.txt b/test/989-method-trace-throw/expected.txt
new file mode 100644
index 0000000000..0911bc35e8
--- /dev/null
+++ b/test/989-method-trace-throw/expected.txt
@@ -0,0 +1,188 @@
+Normal: Entering public static void art.Test989.doNothing()
+Normal: Leaving public static void art.Test989.doNothing() returned null
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$doNothingClass].
+Normal: Entering public static native void art.Test989.doNothingNative()
+Normal: Leaving public static native void art.Test989.doNothingNative() returned null
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$doNothingNativeClass].
+Normal: Entering public static void art.Test989.throwA()
+Normal: Leaving public static void art.Test989.throwA() returned <exception>
+Received expected error for test[class art.Test989$NormalTracer, class art.Test989$throwAClass] - art.Test989$ErrorA: Throwing Error A
+Normal: Entering public static native void art.Test989.throwANative()
+Normal: Leaving public static native void art.Test989.throwANative() returned <exception>
+Received expected error for test[class art.Test989$NormalTracer, class art.Test989$throwANativeClass] - art.Test989$ErrorA: Throwing Error A
+Normal: Entering public static java.lang.Object art.Test989.returnValue()
+Normal: Leaving public static java.lang.Object art.Test989.returnValue() returned TestObject(0)
+returnValue returned: TestObject(0)
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$returnValueClass].
+Normal: Entering public static native java.lang.Object art.Test989.returnValueNative()
+Normal: Leaving public static native java.lang.Object art.Test989.returnValueNative() returned TestObject(1)
+returnValueNative returned: TestObject(1)
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$returnValueNativeClass].
+Normal: Entering public static void art.Test989.acceptValue(java.lang.Object)
+Recieved TestObject(2)
+Normal: Leaving public static void art.Test989.acceptValue(java.lang.Object) returned null
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$acceptValueClass].
+Normal: Entering public static native void art.Test989.acceptValueNative(java.lang.Object)
+Recieved TestObject(3)
+Normal: Leaving public static native void art.Test989.acceptValueNative(java.lang.Object) returned null
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$acceptValueNativeClass].
+Normal: Entering public static void art.Test989.tryCatchExit()
+Normal: Leaving public static void art.Test989.tryCatchExit() returned null
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$tryCatchExitClass].
+Normal: Entering public static float art.Test989.returnFloat()
+Normal: Leaving public static float art.Test989.returnFloat() returned 1.618
+returnFloat returned: 1.618
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$returnFloatClass].
+Normal: Entering public static native float art.Test989.returnFloatNative()
+Normal: Leaving public static native float art.Test989.returnFloatNative() returned 1.618
+returnFloatNative returned: 1.618
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$returnFloatNativeClass].
+Normal: Entering public static double art.Test989.returnDouble()
+Normal: Leaving public static double art.Test989.returnDouble() returned 3.14159628
+returnDouble returned: 3.14159628
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$returnDoubleClass].
+Normal: Entering public static native double art.Test989.returnDoubleNative()
+Normal: Leaving public static native double art.Test989.returnDoubleNative() returned 3.14159628
+returnDoubleNative returned: 3.14159628
+Received no exception as expected for test[class art.Test989$NormalTracer, class art.Test989$returnDoubleNativeClass].
+ThrowEnter: Entering public static void art.Test989.doNothing()
+ThrowEnter: Leaving public static void art.Test989.doNothing() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$doNothingClass] - art.Test989$ErrorB: Throwing error while entering public static void art.Test989.doNothing()
+ThrowEnter: Entering public static native void art.Test989.doNothingNative()
+ThrowEnter: Leaving public static native void art.Test989.doNothingNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$doNothingNativeClass] - art.Test989$ErrorB: Throwing error while entering public static native void art.Test989.doNothingNative()
+ThrowEnter: Entering public static void art.Test989.throwA()
+ThrowEnter: Leaving public static void art.Test989.throwA() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$throwAClass] - art.Test989$ErrorB: Throwing error while entering public static void art.Test989.throwA()
+ThrowEnter: Entering public static native void art.Test989.throwANative()
+ThrowEnter: Leaving public static native void art.Test989.throwANative() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$throwANativeClass] - art.Test989$ErrorB: Throwing error while entering public static native void art.Test989.throwANative()
+ThrowEnter: Entering public static java.lang.Object art.Test989.returnValue()
+ThrowEnter: Leaving public static java.lang.Object art.Test989.returnValue() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$returnValueClass] - art.Test989$ErrorB: Throwing error while entering public static java.lang.Object art.Test989.returnValue()
+ThrowEnter: Entering public static native java.lang.Object art.Test989.returnValueNative()
+ThrowEnter: Leaving public static native java.lang.Object art.Test989.returnValueNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$returnValueNativeClass] - art.Test989$ErrorB: Throwing error while entering public static native java.lang.Object art.Test989.returnValueNative()
+ThrowEnter: Entering public static void art.Test989.acceptValue(java.lang.Object)
+ThrowEnter: Leaving public static void art.Test989.acceptValue(java.lang.Object) returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$acceptValueClass] - art.Test989$ErrorB: Throwing error while entering public static void art.Test989.acceptValue(java.lang.Object)
+ThrowEnter: Entering public static native void art.Test989.acceptValueNative(java.lang.Object)
+ThrowEnter: Leaving public static native void art.Test989.acceptValueNative(java.lang.Object) returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$acceptValueNativeClass] - art.Test989$ErrorB: Throwing error while entering public static native void art.Test989.acceptValueNative(java.lang.Object)
+ThrowEnter: Entering public static void art.Test989.tryCatchExit()
+ThrowEnter: Leaving public static void art.Test989.tryCatchExit() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$tryCatchExitClass] - art.Test989$ErrorB: Throwing error while entering public static void art.Test989.tryCatchExit()
+ThrowEnter: Entering public static float art.Test989.returnFloat()
+ThrowEnter: Leaving public static float art.Test989.returnFloat() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$returnFloatClass] - art.Test989$ErrorB: Throwing error while entering public static float art.Test989.returnFloat()
+ThrowEnter: Entering public static native float art.Test989.returnFloatNative()
+ThrowEnter: Leaving public static native float art.Test989.returnFloatNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$returnFloatNativeClass] - art.Test989$ErrorB: Throwing error while entering public static native float art.Test989.returnFloatNative()
+ThrowEnter: Entering public static double art.Test989.returnDouble()
+ThrowEnter: Leaving public static double art.Test989.returnDouble() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$returnDoubleClass] - art.Test989$ErrorB: Throwing error while entering public static double art.Test989.returnDouble()
+ThrowEnter: Entering public static native double art.Test989.returnDoubleNative()
+ThrowEnter: Leaving public static native double art.Test989.returnDoubleNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowEnterTracer, class art.Test989$returnDoubleNativeClass] - art.Test989$ErrorB: Throwing error while entering public static native double art.Test989.returnDoubleNative()
+ThrowExit: Entering public static void art.Test989.doNothing()
+ThrowExit: Leaving public static void art.Test989.doNothing() returned null
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$doNothingClass] - art.Test989$ErrorB: Throwing error while exit public static void art.Test989.doNothing() returned null
+ThrowExit: Entering public static native void art.Test989.doNothingNative()
+ThrowExit: Leaving public static native void art.Test989.doNothingNative() returned null
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$doNothingNativeClass] - art.Test989$ErrorB: Throwing error while exit public static native void art.Test989.doNothingNative() returned null
+ThrowExit: Entering public static void art.Test989.throwA()
+ThrowExit: Leaving public static void art.Test989.throwA() returned <exception>
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$throwAClass] - art.Test989$ErrorB: Throwing error while exit public static void art.Test989.throwA() returned <exception>
+ThrowExit: Entering public static native void art.Test989.throwANative()
+ThrowExit: Leaving public static native void art.Test989.throwANative() returned <exception>
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$throwANativeClass] - art.Test989$ErrorB: Throwing error while exit public static native void art.Test989.throwANative() returned <exception>
+ThrowExit: Entering public static java.lang.Object art.Test989.returnValue()
+ThrowExit: Leaving public static java.lang.Object art.Test989.returnValue() returned TestObject(7)
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$returnValueClass] - art.Test989$ErrorB: Throwing error while exit public static java.lang.Object art.Test989.returnValue() returned TestObject(7)
+ThrowExit: Entering public static native java.lang.Object art.Test989.returnValueNative()
+ThrowExit: Leaving public static native java.lang.Object art.Test989.returnValueNative() returned TestObject(8)
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$returnValueNativeClass] - art.Test989$ErrorB: Throwing error while exit public static native java.lang.Object art.Test989.returnValueNative() returned TestObject(8)
+ThrowExit: Entering public static void art.Test989.acceptValue(java.lang.Object)
+Recieved TestObject(9)
+ThrowExit: Leaving public static void art.Test989.acceptValue(java.lang.Object) returned null
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$acceptValueClass] - art.Test989$ErrorB: Throwing error while exit public static void art.Test989.acceptValue(java.lang.Object) returned null
+ThrowExit: Entering public static native void art.Test989.acceptValueNative(java.lang.Object)
+Recieved TestObject(10)
+ThrowExit: Leaving public static native void art.Test989.acceptValueNative(java.lang.Object) returned null
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$acceptValueNativeClass] - art.Test989$ErrorB: Throwing error while exit public static native void art.Test989.acceptValueNative(java.lang.Object) returned null
+ThrowExit: Entering public static void art.Test989.tryCatchExit()
+ThrowExit: Leaving public static void art.Test989.tryCatchExit() returned null
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$tryCatchExitClass] - art.Test989$ErrorB: Throwing error while exit public static void art.Test989.tryCatchExit() returned null
+ThrowExit: Entering public static float art.Test989.returnFloat()
+ThrowExit: Leaving public static float art.Test989.returnFloat() returned 1.618
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$returnFloatClass] - art.Test989$ErrorB: Throwing error while exit public static float art.Test989.returnFloat() returned 1.618
+ThrowExit: Entering public static native float art.Test989.returnFloatNative()
+ThrowExit: Leaving public static native float art.Test989.returnFloatNative() returned 1.618
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$returnFloatNativeClass] - art.Test989$ErrorB: Throwing error while exit public static native float art.Test989.returnFloatNative() returned 1.618
+ThrowExit: Entering public static double art.Test989.returnDouble()
+ThrowExit: Leaving public static double art.Test989.returnDouble() returned 3.14159628
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$returnDoubleClass] - art.Test989$ErrorB: Throwing error while exit public static double art.Test989.returnDouble() returned 3.14159628
+ThrowExit: Entering public static native double art.Test989.returnDoubleNative()
+ThrowExit: Leaving public static native double art.Test989.returnDoubleNative() returned 3.14159628
+Received expected error for test[class art.Test989$ThrowExitTracer, class art.Test989$returnDoubleNativeClass] - art.Test989$ErrorB: Throwing error while exit public static native double art.Test989.returnDoubleNative() returned 3.14159628
+ThrowBoth: Entering public static void art.Test989.doNothing()
+ThrowBoth: Leaving public static void art.Test989.doNothing() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$doNothingClass] - art.Test989$ErrorC: Throwing error while exit public static void art.Test989.doNothing() returned <exception>
+ThrowBoth: Entering public static native void art.Test989.doNothingNative()
+ThrowBoth: Leaving public static native void art.Test989.doNothingNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$doNothingNativeClass] - art.Test989$ErrorC: Throwing error while exit public static native void art.Test989.doNothingNative() returned <exception>
+ThrowBoth: Entering public static void art.Test989.throwA()
+ThrowBoth: Leaving public static void art.Test989.throwA() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$throwAClass] - art.Test989$ErrorC: Throwing error while exit public static void art.Test989.throwA() returned <exception>
+ThrowBoth: Entering public static native void art.Test989.throwANative()
+ThrowBoth: Leaving public static native void art.Test989.throwANative() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$throwANativeClass] - art.Test989$ErrorC: Throwing error while exit public static native void art.Test989.throwANative() returned <exception>
+ThrowBoth: Entering public static java.lang.Object art.Test989.returnValue()
+ThrowBoth: Leaving public static java.lang.Object art.Test989.returnValue() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$returnValueClass] - art.Test989$ErrorC: Throwing error while exit public static java.lang.Object art.Test989.returnValue() returned <exception>
+ThrowBoth: Entering public static native java.lang.Object art.Test989.returnValueNative()
+ThrowBoth: Leaving public static native java.lang.Object art.Test989.returnValueNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$returnValueNativeClass] - art.Test989$ErrorC: Throwing error while exit public static native java.lang.Object art.Test989.returnValueNative() returned <exception>
+ThrowBoth: Entering public static void art.Test989.acceptValue(java.lang.Object)
+ThrowBoth: Leaving public static void art.Test989.acceptValue(java.lang.Object) returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$acceptValueClass] - art.Test989$ErrorC: Throwing error while exit public static void art.Test989.acceptValue(java.lang.Object) returned <exception>
+ThrowBoth: Entering public static native void art.Test989.acceptValueNative(java.lang.Object)
+ThrowBoth: Leaving public static native void art.Test989.acceptValueNative(java.lang.Object) returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$acceptValueNativeClass] - art.Test989$ErrorC: Throwing error while exit public static native void art.Test989.acceptValueNative(java.lang.Object) returned <exception>
+ThrowBoth: Entering public static void art.Test989.tryCatchExit()
+ThrowBoth: Leaving public static void art.Test989.tryCatchExit() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$tryCatchExitClass] - art.Test989$ErrorC: Throwing error while exit public static void art.Test989.tryCatchExit() returned <exception>
+ThrowBoth: Entering public static float art.Test989.returnFloat()
+ThrowBoth: Leaving public static float art.Test989.returnFloat() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$returnFloatClass] - art.Test989$ErrorC: Throwing error while exit public static float art.Test989.returnFloat() returned <exception>
+ThrowBoth: Entering public static native float art.Test989.returnFloatNative()
+ThrowBoth: Leaving public static native float art.Test989.returnFloatNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$returnFloatNativeClass] - art.Test989$ErrorC: Throwing error while exit public static native float art.Test989.returnFloatNative() returned <exception>
+ThrowBoth: Entering public static double art.Test989.returnDouble()
+ThrowBoth: Leaving public static double art.Test989.returnDouble() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$returnDoubleClass] - art.Test989$ErrorC: Throwing error while exit public static double art.Test989.returnDouble() returned <exception>
+ThrowBoth: Entering public static native double art.Test989.returnDoubleNative()
+ThrowBoth: Leaving public static native double art.Test989.returnDoubleNative() returned <exception>
+Received expected error for test[class art.Test989$ThrowBothTracer, class art.Test989$returnDoubleNativeClass] - art.Test989$ErrorC: Throwing error while exit public static native double art.Test989.returnDoubleNative() returned <exception>
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$doNothingClass].
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$doNothingNativeClass].
+Received expected error for test[class art.Test989$ForceGCTracer, class art.Test989$throwAClass] - art.Test989$ErrorA: Throwing Error A
+Received expected error for test[class art.Test989$ForceGCTracer, class art.Test989$throwANativeClass] - art.Test989$ErrorA: Throwing Error A
+returnValue returned: TestObject(14)
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$returnValueClass].
+returnValueNative returned: TestObject(15)
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$returnValueNativeClass].
+Recieved TestObject(16)
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$acceptValueClass].
+Recieved TestObject(17)
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$acceptValueNativeClass].
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$tryCatchExitClass].
+returnFloat returned: 1.618
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$returnFloatClass].
+returnFloatNative returned: 1.618
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$returnFloatNativeClass].
+returnDouble returned: 3.14159628
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$returnDoubleClass].
+returnDoubleNative returned: 3.14159628
+Received no exception as expected for test[class art.Test989$ForceGCTracer, class art.Test989$returnDoubleNativeClass].
+Finished!
diff --git a/test/989-method-trace-throw/info.txt b/test/989-method-trace-throw/info.txt
new file mode 100644
index 0000000000..f0a200dc18
--- /dev/null
+++ b/test/989-method-trace-throw/info.txt
@@ -0,0 +1,15 @@
+Tests method tracing in JVMTI
+
+This test is sensitive to the internal implementations of:
+ * java.lang.Error
+ * java.lang.Integer
+ * java.lang.Math
+ * java.lang.String
+ * java.lang.System
+ * java.util.ArrayList
+ * java.util.Arrays
+ * java.util.StringBuilder
+ * all super-classes and super-interfaces of the above types.
+
+Changes to the internal implementation of these classes might (or might not)
+change the output of this test.
diff --git a/test/989-method-trace-throw/method_trace.cc b/test/989-method-trace-throw/method_trace.cc
new file mode 100644
index 0000000000..554784effe
--- /dev/null
+++ b/test/989-method-trace-throw/method_trace.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+#include <memory>
+#include <stdio.h>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test989StackTraceThrow {
+
+extern "C" JNIEXPORT
+jfloat JNICALL Java_art_Test989_returnFloatNative(JNIEnv* env, jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "doGetFloat", "()F");
+ return env->CallStaticFloatMethod(klass, targetMethod);
+}
+extern "C" JNIEXPORT
+jdouble JNICALL Java_art_Test989_returnDoubleNative(JNIEnv* env, jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "doGetDouble", "()D");
+ return env->CallStaticDoubleMethod(klass, targetMethod);
+}
+
+extern "C" JNIEXPORT jobject JNICALL Java_art_Test989_returnValueNative(JNIEnv* env, jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "mkTestObject", "()Ljava/lang/Object;");
+ return env->CallStaticObjectMethod(klass, targetMethod);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test989_doNothingNative(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ return;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test989_throwANative(JNIEnv* env,
+ jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "doThrowA", "()V");
+ env->CallStaticVoidMethod(klass, targetMethod);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test989_acceptValueNative(JNIEnv* env,
+ jclass klass,
+ jobject arg) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "printObject", "(Ljava/lang/Object;)V");
+ env->CallStaticVoidMethod(klass, targetMethod, arg);
+}
+
+} // namespace Test989StackTraceThrow
+} // namespace art
+
diff --git a/test/989-method-trace-throw/run b/test/989-method-trace-throw/run
new file mode 100755
index 0000000000..51875a7e86
--- /dev/null
+++ b/test/989-method-trace-throw/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/989-method-trace-throw/src/Main.java b/test/989-method-trace-throw/src/Main.java
new file mode 100644
index 0000000000..29b9de1027
--- /dev/null
+++ b/test/989-method-trace-throw/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test989.run();
+ }
+}
diff --git a/test/989-method-trace-throw/src/art/Test989.java b/test/989-method-trace-throw/src/art/Test989.java
new file mode 100644
index 0000000000..18421bd08b
--- /dev/null
+++ b/test/989-method-trace-throw/src/art/Test989.java
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Method;
+import java.util.Set;
+import java.util.HashSet;
+
+public class Test989 {
+ static boolean PRINT_STACK_TRACE = false;
+ static Set<Method> testMethods = new HashSet<>();
+
+ static MethodTracer currentTracer = new MethodTracer() {
+ public void methodEntry(Object o) { return; }
+ public void methodExited(Object o, boolean e, Object r) { return; }
+ };
+
+ private static boolean DISABLE_TRACING = false;
+
+ static {
+ try {
+ testMethods.add(Test989.class.getDeclaredMethod("doNothing"));
+ testMethods.add(Test989.class.getDeclaredMethod("doNothingNative"));
+ testMethods.add(Test989.class.getDeclaredMethod("throwA"));
+ testMethods.add(Test989.class.getDeclaredMethod("throwANative"));
+ testMethods.add(Test989.class.getDeclaredMethod("returnFloat"));
+ testMethods.add(Test989.class.getDeclaredMethod("returnFloatNative"));
+ testMethods.add(Test989.class.getDeclaredMethod("returnDouble"));
+ testMethods.add(Test989.class.getDeclaredMethod("returnDoubleNative"));
+ testMethods.add(Test989.class.getDeclaredMethod("returnValue"));
+ testMethods.add(Test989.class.getDeclaredMethod("returnValueNative"));
+ testMethods.add(Test989.class.getDeclaredMethod("acceptValue", Object.class));
+ testMethods.add(Test989.class.getDeclaredMethod("acceptValueNative", Object.class));
+ testMethods.add(Test989.class.getDeclaredMethod("tryCatchExit"));
+ } catch (Exception e) {
+ throw new Error("Bad static!", e);
+ }
+ }
+
+ // Disables tracing only on RI. Used to work around an annoying piece of behavior where in the
+ // RI throwing an exception in an exit hook causes the exit hook to be re-executed. This leads
+ // to an infinite loop on the RI.
+ private static void disableTraceForRI() {
+ if (!System.getProperty("java.vm.name").equals("Dalvik")) {
+ Trace.disableMethodTracing(Thread.currentThread());
+ }
+ }
+
+ private static String getInfo(Object m, boolean exception, Object result) {
+ String out = m.toString() + " returned ";
+ if (exception) {
+ out += "<exception>";
+ } else {
+ out += result;
+ }
+ return out;
+ }
+
+ public static interface MethodTracer {
+ public void methodEntry(Object m);
+ public void methodExited(Object m, boolean exception, Object result);
+ public default Class<?> entryException() { return null; }
+ public default Class<?> exitException() { return null; }
+ }
+
+ public static class NormalTracer implements MethodTracer {
+ public void methodEntry(Object m) {
+ if (testMethods.contains(m)) {
+ System.out.println("Normal: Entering " + m);
+ }
+ }
+ public void methodExited(Object m, boolean exception, Object result) {
+ if (testMethods.contains(m)) {
+ System.out.println("Normal: Leaving " + getInfo(m, exception, result));
+ }
+ }
+ }
+
+ public static class ThrowEnterTracer implements MethodTracer {
+ public void methodEntry(Object m) {
+ if (testMethods.contains(m)) {
+ System.out.println("ThrowEnter: Entering " + m);
+ throw new ErrorB("Throwing error while entering " + m);
+ }
+ }
+ public void methodExited(Object m, boolean exception, Object result) {
+ if (testMethods.contains(m)) {
+ System.out.println("ThrowEnter: Leaving " + getInfo(m, exception, result));
+ }
+ }
+ public Class<?> entryException() { return ErrorB.class; }
+ }
+
+ public static class ThrowExitTracer implements MethodTracer {
+ public void methodEntry(Object m) {
+ if (testMethods.contains(m)) {
+ System.out.println("ThrowExit: Entering " + m);
+ }
+ }
+ public void methodExited(Object m, boolean exception, Object result) {
+ if (testMethods.contains(m)) {
+ // The RI goes into an infinite loop if we throw exceptions in an ExitHook. See
+ // disableTraceForRI for explanation.
+ disableTraceForRI();
+ System.out.println("ThrowExit: Leaving " + getInfo(m, exception, result));
+ throw new ErrorB("Throwing error while exit " + getInfo(m, exception, result));
+ }
+ }
+ public Class<?> exitException() { return ErrorB.class; }
+ }
+
+ public static class ThrowBothTracer implements MethodTracer {
+ public void methodEntry(Object m) {
+ if (testMethods.contains(m)) {
+ System.out.println("ThrowBoth: Entering " + m);
+ throw new ErrorB("Throwing error while entering " + m);
+ }
+ }
+ public void methodExited(Object m, boolean exception, Object result) {
+ if (testMethods.contains(m)) {
+ // The RI goes into an infinite loop if we throw exceptions in an ExitHook. See
+ // disableTraceForRI for explanation.
+ disableTraceForRI();
+ System.out.println("ThrowBoth: Leaving " + getInfo(m, exception, result));
+ throw new ErrorC("Throwing error while exit " + getInfo(m, exception, result));
+ }
+ }
+ public Class<?> entryException() { return ErrorB.class; }
+ public Class<?> exitException() { return ErrorC.class; }
+ }
+
+ public static class ForceGCTracer implements MethodTracer {
+ public void methodEntry(Object m) {
+ if (System.getProperty("java.vm.name").equals("Dalvik")) {
+ System.gc();
+ }
+ }
+ public void methodExited(Object m, boolean exception, Object result) {
+ if (System.getProperty("java.vm.name").equals("Dalvik")) {
+ System.gc();
+ }
+ }
+ }
+
+ private static void maybeDisableTracing() throws Exception {
+ if (DISABLE_TRACING) {
+ Trace.disableMethodTracing(Thread.currentThread());
+ }
+ }
+
+ public static void baseNotifyMethodEntry(Object o) {
+ currentTracer.methodEntry(o);
+ }
+ public static void baseNotifyMethodExit(Object o, boolean exception, Object res) {
+ currentTracer.methodExited(o, exception, res);
+ }
+
+ private static void setupTracing() throws Exception {
+ Trace.enableMethodTracing(
+ Test989.class,
+ Test989.class.getDeclaredMethod("baseNotifyMethodEntry", Object.class),
+ Test989.class.getDeclaredMethod(
+ "baseNotifyMethodExit", Object.class, Boolean.TYPE, Object.class),
+ Thread.currentThread());
+ }
+ private static void setEntry(MethodTracer type) throws Exception {
+ if (DISABLE_TRACING || !System.getProperty("java.vm.name").equals("Dalvik")) {
+ Trace.disableMethodTracing(Thread.currentThread());
+ setupTracing();
+ }
+ currentTracer = type;
+ }
+
+ private static String testDescription(MethodTracer type, Runnable test) {
+ return "test[" + type.getClass() + ", " + test.getClass() + "]";
+ }
+
+ private static Class<?> getExpectedError(MethodTracer t, MyRunnable r) {
+ if (t.exitException() != null) {
+ return t.exitException();
+ } else if (t.entryException() != null) {
+ return t.entryException();
+ } else {
+ return r.expectedThrow();
+ }
+ }
+
+ private static void doTest(MethodTracer type, MyRunnable test) throws Exception {
+ Class<?> expected = getExpectedError(type, test);
+
+ setEntry(type);
+ try {
+ test.run();
+ // Disabling method tracing just makes this test somewhat faster.
+ maybeDisableTracing();
+ if (expected == null) {
+ System.out.println(
+ "Received no exception as expected for " + testDescription(type, test) + ".");
+ return;
+ }
+ } catch (Error t) {
+ // Disabling method tracing just makes this test somewhat faster.
+ maybeDisableTracing();
+ if (expected == null) {
+ throw new Error("Unexpected error occured: " + t + " for " + testDescription(type, test), t);
+ } else if (!expected.isInstance(t)) {
+ throw new Error("Expected error of type " + expected + " not " + t +
+ " for " + testDescription(type, test), t);
+ } else {
+ System.out.println(
+ "Received expected error for " + testDescription(type, test) + " - " + t);
+ if (PRINT_STACK_TRACE) {
+ t.printStackTrace();
+ }
+ return;
+ }
+ }
+ System.out.println("Expected an error of type " + expected + " but got no exception for "
+ + testDescription(type, test));
+ // throw new Error("Expected an error of type " + expected + " but got no exception for "
+ // + testDescription(type, test));
+ }
+
+ public static interface MyRunnable extends Runnable {
+ public default Class<?> expectedThrow() {
+ return null;
+ }
+ }
+
+ public static void run() throws Exception {
+ MyRunnable[] testCases = new MyRunnable[] {
+ new doNothingClass(),
+ new doNothingNativeClass(),
+ new throwAClass(),
+ new throwANativeClass(),
+ new returnValueClass(),
+ new returnValueNativeClass(),
+ new acceptValueClass(),
+ new acceptValueNativeClass(),
+ new tryCatchExitClass(),
+ new returnFloatClass(),
+ new returnFloatNativeClass(),
+ new returnDoubleClass(),
+ new returnDoubleNativeClass(),
+ };
+ MethodTracer[] tracers = new MethodTracer[] {
+ new NormalTracer(),
+ new ThrowEnterTracer(),
+ new ThrowExitTracer(),
+ new ThrowBothTracer(),
+ new ForceGCTracer(),
+ };
+
+ setupTracing();
+ for (MethodTracer t : tracers) {
+ for (MyRunnable r : testCases) {
+ doTest(t, r);
+ }
+ }
+
+ maybeDisableTracing();
+ System.out.println("Finished!");
+ Trace.disableMethodTracing(Thread.currentThread());
+ }
+
+ private static final class throwAClass implements MyRunnable {
+ public void run() {
+ throwA();
+ }
+ @Override
+ public Class<?> expectedThrow() {
+ return ErrorA.class;
+ }
+ }
+
+ private static final class throwANativeClass implements MyRunnable {
+ public void run() {
+ throwANative();
+ }
+ @Override
+ public Class<?> expectedThrow() {
+ return ErrorA.class;
+ }
+ }
+
+ private static final class tryCatchExitClass implements MyRunnable {
+ public void run() {
+ tryCatchExit();
+ }
+ }
+
+ private static final class doNothingClass implements MyRunnable {
+ public void run() {
+ doNothing();
+ }
+ }
+
+ private static final class doNothingNativeClass implements MyRunnable {
+ public void run() {
+ doNothingNative();
+ }
+ }
+
+ private static final class acceptValueClass implements MyRunnable {
+ public void run() {
+ acceptValue(mkTestObject());
+ }
+ }
+
+ private static final class acceptValueNativeClass implements MyRunnable {
+ public void run() {
+ acceptValueNative(mkTestObject());
+ }
+ }
+
+ private static final class returnValueClass implements MyRunnable {
+ public void run() {
+ Object o = returnValue();
+ System.out.println("returnValue returned: " + o);
+ }
+ }
+
+ private static final class returnValueNativeClass implements MyRunnable {
+ public void run() {
+ Object o = returnValueNative();
+ System.out.println("returnValueNative returned: " + o);
+ }
+ }
+
+ private static final class returnFloatClass implements MyRunnable {
+ public void run() {
+ float d = returnFloat();
+ System.out.println("returnFloat returned: " + d);
+ }
+ }
+
+ private static final class returnFloatNativeClass implements MyRunnable {
+ public void run() {
+ float d = returnFloatNative();
+ System.out.println("returnFloatNative returned: " + d);
+ }
+ }
+
+ private static final class returnDoubleClass implements MyRunnable {
+ public void run() {
+ double d = returnDouble();
+ System.out.println("returnDouble returned: " + d);
+ }
+ }
+
+ private static final class returnDoubleNativeClass implements MyRunnable {
+ public void run() {
+ double d = returnDoubleNative();
+ System.out.println("returnDoubleNative returned: " + d);
+ }
+ }
+
+ private static class ErrorA extends Error {
+ private static final long serialVersionUID = 0;
+ public ErrorA(String s) { super(s); }
+ }
+
+ private static class ErrorB extends Error {
+ private static final long serialVersionUID = 1;
+ public ErrorB(String s) { super(s); }
+ }
+
+ private static class ErrorC extends Error {
+ private static final long serialVersionUID = 2;
+ public ErrorC(String s) { super(s); }
+ }
+
+ // Does nothing.
+ public static void doNothing() { }
+
+ public static void tryCatchExit() {
+ try {
+ Object o = mkTestObject();
+ return;
+ } catch (ErrorB b) {
+ System.out.println("ERROR: Caught " + b);
+ b.printStackTrace();
+ } catch (ErrorC c) {
+ System.out.println("ERROR: Caught " + c);
+ c.printStackTrace();
+ }
+ }
+
+ public static float returnFloat() {
+ return doGetFloat();
+ }
+
+ public static double returnDouble() {
+ return doGetDouble();
+ }
+
+ // Throws an ErrorA.
+ public static void throwA() {
+ doThrowA();
+ }
+
+ public static void doThrowA() {
+ throw new ErrorA("Throwing Error A");
+ }
+
+ static final class TestObject {
+ private int idx;
+ public TestObject(int v) {
+ this.idx = v;
+ }
+ @Override
+ public String toString() {
+ return "TestObject(" + idx + ")";
+ }
+ }
+
+ static int counter = 0;
+ public static Object mkTestObject() {
+ return new TestObject(counter++);
+ }
+
+ public static void printObject(Object o) {
+ System.out.println("Recieved " + o);
+ }
+
+ // Returns a newly allocated value.
+ public static Object returnValue() {
+ return mkTestObject();
+ }
+
+ public static void acceptValue(Object o) {
+ printObject(o);
+ }
+
+ public static float doGetFloat() {
+ return 1.618f;
+ }
+
+ public static double doGetDouble() {
+ return 3.14159628;
+ }
+
+ // Calls mkTestObject from native code and returns it.
+ public static native Object returnValueNative();
+ // Calls printObject from native code.
+ public static native void acceptValueNative(Object t);
+ public static native void doNothingNative();
+ public static native void throwANative();
+ public static native float returnFloatNative();
+ public static native double returnDoubleNative();
+}
diff --git a/test/989-method-trace-throw/src/art/Trace.java b/test/989-method-trace-throw/src/art/Trace.java
new file mode 100644
index 0000000000..3370996df3
--- /dev/null
+++ b/test/989-method-trace-throw/src/art/Trace.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Method;
+
+public class Trace {
+ public static native void enableMethodTracing(
+ Class<?> methodClass, Method entryMethod, Method exitMethod, Thread thr);
+ public static native void disableMethodTracing(Thread thr);
+}
diff --git a/test/Android.bp b/test/Android.bp
index d9b309174d..0937c62469 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -278,6 +278,7 @@ art_cc_defaults {
"984-obsolete-invoke/obsolete_invoke.cc",
"986-native-method-bind/native_bind.cc",
"987-agent-bind/agent_bind.cc",
+ "989-method-trace-throw/method_trace.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Instrumentation/Instrumentation.java b/test/Instrumentation/Instrumentation.java
index 09d434213b..b44f78fc3b 100644
--- a/test/Instrumentation/Instrumentation.java
+++ b/test/Instrumentation/Instrumentation.java
@@ -15,8 +15,21 @@
*/
public class Instrumentation {
+ private static int primitiveField;
+ private static Object referenceField;
+
// Direct method
private void instanceMethod() {
System.out.println("instanceMethod");
}
+
+ private Object returnReference() {
+ System.out.println("returnReference");
+ return null;
+ }
+
+ private int returnPrimitive() {
+ System.out.println("returnPrimitive");
+ return 0;
+ }
}
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index d2cfbffc30..d8e5b571bd 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -29,7 +29,7 @@
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedUtfChars.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
@@ -148,12 +148,24 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isAotCompiled(JNIEnv* env,
CHECK(chars.c_str() != nullptr);
ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
chars.c_str(), kRuntimePointerSize);
- const void* code = method->GetOatMethodQuickCode(kRuntimePointerSize);
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) {
- return true;
+ return method->GetOatMethodQuickCode(kRuntimePointerSize) != nullptr;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isJitCompiled(JNIEnv* env,
+ jclass,
+ jclass cls,
+ jstring method_name) {
+ jit::Jit* jit = GetJitIfEnabled();
+ if (jit == nullptr) {
+ return false;
}
- return code != nullptr;
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
+ chars.c_str(), kRuntimePointerSize);
+ return jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
}
extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index ceb4ba241b..80a278012d 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -25,7 +25,7 @@
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
-#include "thread-inl.h"
+#include "thread-current-inl.h"
namespace art {
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ca52a996ac..8aacc8c9b7 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -63,6 +63,8 @@ TEST_VDEX="n"
TEST_IS_NDEBUG="n"
APP_IMAGE="y"
JVMTI_STRESS="n"
+JVMTI_TRACE_STRESS="n"
+JVMTI_REDEFINE_STRESS="n"
VDEX_FILTER=""
PROFILE="n"
RANDOM_PROFILE="n"
@@ -151,10 +153,15 @@ while true; do
elif [ "x$1" = "x--prebuild" ]; then
PREBUILD="y"
shift
- elif [ "x$1" = "x--jvmti-stress" ]; then
- # APP_IMAGE doesn't really work with jvmti-torture
+ elif [ "x$1" = "x--jvmti-redefine-stress" ]; then
+ # APP_IMAGE doesn't really work with jvmti redefine stress
APP_IMAGE="n"
JVMTI_STRESS="y"
+ JVMTI_REDEFINE_STRESS="y"
+ shift
+ elif [ "x$1" = "x--jvmti-trace-stress" ]; then
+ JVMTI_STRESS="y"
+ JVMTI_TRACE_STRESS="y"
shift
elif [ "x$1" = "x--no-app-image" ]; then
APP_IMAGE="n"
@@ -397,13 +404,25 @@ if [[ "$JVMTI_STRESS" = "y" ]]; then
plugin=libopenjdkjvmti.so
fi
- file_1=$(mktemp --tmpdir=${DEX_LOCATION})
- file_2=$(mktemp --tmpdir=${DEX_LOCATION})
+ # Just give it a default start so we can always add ',' to it.
+ agent_args="jvmti-stress"
+ if [[ "$JVMTI_REDEFINE_STRESS" = "y" ]]; then
+ # We really cannot do this on RI so don't both passing it in that case.
+ if [[ "$USE_JVM" = "n" ]]; then
+ file_1=$(mktemp --tmpdir=${DEX_LOCATION})
+ file_2=$(mktemp --tmpdir=${DEX_LOCATION})
+ # TODO Remove need for DEXTER_BINARY!
+ agent_args="${agent_args},redefine,${DEXTER_BINARY},${file_1},${file_2}"
+ fi
+ fi
+ if [[ "$JVMTI_TRACE_STRESS" = "y" ]]; then
+ agent_args="${agent_args},trace"
+ fi
+ # In the future add onto this;
if [[ "$USE_JVM" = "y" ]]; then
- FLAGS="${FLAGS} -agentpath:${ANDROID_HOST_OUT}/nativetest64/${agent}=/bin/false,${file_1},${file_2}"
+ FLAGS="${FLAGS} -agentpath:${ANDROID_HOST_OUT}/nativetest64/${agent}=${agent_args}"
else
- # TODO Remove need for DEXTER_BINARY!
- FLAGS="${FLAGS} -agentpath:${agent}=${DEXTER_BINARY},${file_1},${file_2}"
+ FLAGS="${FLAGS} -agentpath:${agent}=${agent_args}"
if [ "$IS_JVMTI_TEST" = "n" ]; then
FLAGS="${FLAGS} -Xplugin:${plugin}"
FLAGS="${FLAGS} -Xcompiler-option --debuggable"
@@ -610,7 +629,7 @@ if [ "$PREBUILD" = "y" ]; then
if [ "$HOST" != "n" ]; then
# Use SIGRTMIN+2 to try to dump threads.
# Use -k 1m to SIGKILL it a minute later if it hasn't ended.
- dex2oat_cmdline="timeout -k 1m -s SIGRTMIN+2 1m ${dex2oat_cmdline}"
+ dex2oat_cmdline="timeout -k 1m -s SIGRTMIN+2 90s ${dex2oat_cmdline} --watchdog-timeout=60000"
fi
if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
vdex_cmdline="${dex2oat_cmdline} ${VDEX_FILTER} --input-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex --output-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex"
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 36ac307c5e..67ca316425 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -511,7 +511,7 @@
"645-checker-abs-simd",
"706-checker-scheduler"],
"description": ["Checker tests are not compatible with jvmti."],
- "variant": "jvmti-stress"
+ "variant": "jvmti-stress | redefine-stress | trace-stress"
},
{
"tests": [
@@ -519,7 +519,7 @@
"964-default-iface-init-gen"
],
"description": ["Tests that just take too long with jvmti-stress"],
- "variant": "jvmti-stress"
+ "variant": "jvmti-stress | redefine-stress | trace-stress"
},
{
"tests": [
@@ -539,7 +539,7 @@
"dexter/slicer."
],
"bug": "b/37272822",
- "variant": "jvmti-stress"
+ "variant": "jvmti-stress | redefine-stress"
},
{
"tests": [
@@ -550,7 +550,7 @@
"981-dedup-original-dex"
],
"description": ["Tests that require exact knowledge of the number of plugins and agents."],
- "variant": "jvmti-stress"
+ "variant": "jvmti-stress | redefine-stress | trace-stress"
},
{
"tests": [
@@ -564,7 +564,7 @@
"description": [
"Tests that use illegal dex files or otherwise break dexter assumptions"
],
- "variant": "jvmti-stress"
+ "variant": "jvmti-stress | redefine-stress"
},
{
"tests": [
@@ -581,7 +581,7 @@
"Tests that use custom class loaders or other features not supported ",
"by our JVMTI implementation"
],
- "variant": "jvmti-stress"
+ "variant": "jvmti-stress | redefine-stress"
},
{
"tests": [
@@ -592,7 +592,7 @@
"Tests that use annotations and debug data that is not kept around by dexter."
],
"bug": "b/37239009",
- "variant": "jvmti-stress"
+ "variant": "jvmti-stress | redefine-stress"
},
{
"tests": [
@@ -680,5 +680,35 @@
"The java.lang.Integer.valueOf intrinsic is not supported in PIC mode."
],
"variant": "optimizing & pictest | speed-profile & pictest"
+ },
+ {
+ "tests": "202-thread-oome",
+ "description": "ASAN aborts when large thread stacks are requested.",
+ "variant": "host",
+ "env_vars": {"SANITIZE_HOST": "address"}
+ },
+ {
+ "tests": "202-thread-oome",
+ "description": "ASAN aborts when large thread stacks are requested.",
+ "variant": "target",
+ "env_vars": {"SANITIZE_TARGET": "address"}
+ },
+ {
+ "tests": "071-dexfile-map-clean",
+ "description": [ "We use prebuilt zipalign on master-art-host to avoid pulling in a lot",
+ "of the framework. But a non-sanitized zipalign binary does not work with",
+ "a sanitized libc++."],
+ "env_vars": {"SANITIZE_HOST": "address"}
+ },
+ {
+ "tests": ["988-method-trace"],
+ "variant": "redefine-stress | jvmti-stress",
+ "description": "Test disabled due to redefine-stress disabling intrinsics which changes the trace output slightly."
+ },
+ {
+ "tests": ["137-cfi", "629-vdex-speed"],
+ "description": [ "Tests require speed compilation which is no longer the default for",
+ "no-prebuild or no-image configs."],
+ "variant": "no-prebuild | no-image"
}
]
diff --git a/test/run-test b/test/run-test
index 933a7febac..41a0dc2a84 100755
--- a/test/run-test
+++ b/test/run-test
@@ -137,7 +137,8 @@ trace_stream="false"
basic_verify="false"
gc_verify="false"
gc_stress="false"
-jvmti_stress="false"
+jvmti_trace_stress="false"
+jvmti_redefine_stress="false"
strace="false"
always_clean="no"
never_clean="no"
@@ -234,8 +235,11 @@ while true; do
basic_verify="true"
gc_stress="true"
shift
- elif [ "x$1" = "x--jvmti-stress" ]; then
- jvmti_stress="true"
+ elif [ "x$1" = "x--jvmti-redefine-stress" ]; then
+ jvmti_redefine_stress="true"
+ shift
+ elif [ "x$1" = "x--jvmti-trace-stress" ]; then
+ jvmti_trace_stress="true"
shift
elif [ "x$1" = "x--suspend-timeout" ]; then
shift
@@ -447,8 +451,11 @@ fi
if [ "$gc_stress" = "true" ]; then
run_args="${run_args} --gc-stress --runtime-option -Xgc:gcstress --runtime-option -Xms2m --runtime-option -Xmx16m"
fi
-if [ "$jvmti_stress" = "true" ]; then
- run_args="${run_args} --no-app-image --jvmti-stress"
+if [ "$jvmti_redefine_stress" = "true" ]; then
+ run_args="${run_args} --no-app-image --jvmti-redefine-stress"
+fi
+if [ "$jvmti_trace_stress" = "true" ]; then
+ run_args="${run_args} --no-app-image --jvmti-trace-stress"
fi
if [ "$trace" = "true" ]; then
run_args="${run_args} --runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file-size:2000000"
@@ -658,7 +665,9 @@ if [ "$usage" = "yes" ]; then
echo " --stream Run method tracing in streaming mode (requires --trace)"
echo " --gcstress Run with gc stress testing"
echo " --gcverify Run with gc verification"
- echo " --jvmti-stress Run with jvmti stress testing"
+ echo " --jvmti-trace-stress Run with jvmti method tracing stress testing"
+ echo " --jvmti-redefine-stress"
+ echo " Run with jvmti method redefinition stress testing"
echo " --always-clean Delete the test files even if the test fails."
echo " --never-clean Keep the test files even if the test succeeds."
echo " --android-root [path] The path on target for the android root. (/system by default)."
@@ -728,10 +737,8 @@ export TEST_NAME=`basename ${test_dir}`
# Checker when compiled with Optimizing on host.
if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$USE_JACK" = "true" ]; then
- # In no-prebuild mode, the compiler is only invoked if both dex2oat and
- # patchoat are available. Disable Checker otherwise (b/22552692).
- if [ "$prebuild_mode" = "yes" ] \
- || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
+ # In no-prebuild or no-image mode, the compiler only quickens so disable the checker.
+ if [ "$prebuild_mode" = "yes" -a "$have_image" = "yes" ]; then
run_checker="yes"
if [ "$target_mode" = "no" ]; then
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 6e47c5eb7a..654fb06d02 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -42,6 +42,12 @@ target_config = {
# ART run-test configurations
# (calls testrunner which builds and then runs the test targets)
+ 'art-ndebug' : {
+ 'run-test' : ['--ndebug'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'true'
+ }
+ },
'art-interpreter' : {
'run-test' : ['--interpreter'],
'env' : {
@@ -316,6 +322,23 @@ target_config = {
}
},
+ # ASAN (host) configurations.
+
+ 'art-gtest-asan': {
+ 'make' : 'test-art-host-gtest',
+ 'env': {
+ 'SANITIZE_HOST' : 'address'
+ }
+ },
+ 'art-asan': {
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
+ 'env': {
+ 'SANITIZE_HOST' : 'address'
+ }
+ },
+
# ART Golem build targets used by go/lem (continuous ART benchmarking),
# (art-opt-cc is used by default since it mimics the default preopt config),
#
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 77ef25a75b..344507115b 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -147,7 +147,7 @@ def gather_test_info():
VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
- VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress'}
+ VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress'}
VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'optimizing',
'regalloc_gc', 'speed-profile'}
@@ -437,7 +437,11 @@ def run_tests(tests):
options_test += ' --debuggable'
if jvmti == 'jvmti-stress':
- options_test += ' --jvmti-stress'
+ options_test += ' --jvmti-trace-stress --jvmti-redefine-stress'
+ elif jvmti == 'trace-stress':
+ options_test += ' --jvmti-trace-stress'
+ elif jvmti == 'redefine-stress':
+ options_test += ' --jvmti-redefine-stress'
if address_size == '64':
options_test += ' --64'
@@ -954,6 +958,10 @@ def parse_option():
IMAGE_TYPES.add('multipicimage')
if options['jvmti_stress']:
JVMTI_TYPES.add('jvmti-stress')
+ if options['redefine_stress']:
+ JVMTI_TYPES.add('redefine-stress')
+ if options['trace_stress']:
+ JVMTI_TYPES.add('trace-stress')
if options['no_jvmti']:
JVMTI_TYPES.add('no-jvmti')
if options['verbose']:
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index bfd4d254f4..6eaa5c37df 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -69,6 +69,214 @@ static void throwCommonRedefinitionError(jvmtiEnv* jvmti,
env->ThrowNew(env->FindClass("java/lang/Exception"), message.c_str());
}
+namespace common_trace {
+
+// Taken from art/runtime/modifiers.h
+static constexpr uint32_t kAccStatic = 0x0008; // field, method, ic
+
+struct TraceData {
+ jclass test_klass;
+ jmethodID enter_method;
+ jmethodID exit_method;
+ bool in_callback;
+};
+
+static jobject GetJavaMethod(jvmtiEnv* jvmti, JNIEnv* env, jmethodID m) {
+ jint mods = 0;
+ if (JvmtiErrorToException(env, jvmti, jvmti->GetMethodModifiers(m, &mods))) {
+ return nullptr;
+ }
+
+ bool is_static = (mods & kAccStatic) != 0;
+ jclass method_klass = nullptr;
+ if (JvmtiErrorToException(env, jvmti, jvmti->GetMethodDeclaringClass(m, &method_klass))) {
+ return nullptr;
+ }
+ jobject res = env->ToReflectedMethod(method_klass, m, is_static);
+ env->DeleteLocalRef(method_klass);
+ return res;
+}
+
+static jobject GetJavaValue(jvmtiEnv* jvmtienv,
+ JNIEnv* env,
+ jmethodID m,
+ jvalue value) {
+ char *fname, *fsig, *fgen;
+ if (JvmtiErrorToException(env, jvmtienv, jvmtienv->GetMethodName(m, &fname, &fsig, &fgen))) {
+ return nullptr;
+ }
+ std::string type(fsig);
+ type = type.substr(type.find(")") + 1);
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
+ std::string name;
+ switch (type[0]) {
+ case 'V':
+ return nullptr;
+ case '[':
+ case 'L':
+ return value.l;
+ case 'Z':
+ name = "java/lang/Boolean";
+ break;
+ case 'B':
+ name = "java/lang/Byte";
+ break;
+ case 'C':
+ name = "java/lang/Character";
+ break;
+ case 'S':
+ name = "java/lang/Short";
+ break;
+ case 'I':
+ name = "java/lang/Integer";
+ break;
+ case 'J':
+ name = "java/lang/Long";
+ break;
+ case 'F':
+ name = "java/lang/Float";
+ break;
+ case 'D':
+ name = "java/lang/Double";
+ break;
+ default:
+ LOG(FATAL) << "Unable to figure out type!";
+ return nullptr;
+ }
+ std::ostringstream oss;
+ oss << "(" << type[0] << ")L" << name << ";";
+ std::string args = oss.str();
+ jclass target = env->FindClass(name.c_str());
+ jmethodID valueOfMethod = env->GetStaticMethodID(target, "valueOf", args.c_str());
+
+ CHECK(valueOfMethod != nullptr) << args;
+ jobject res = env->CallStaticObjectMethodA(target, valueOfMethod, &value);
+ env->DeleteLocalRef(target);
+ return res;
+}
+
+static void methodExitCB(jvmtiEnv* jvmti,
+ JNIEnv* jnienv,
+ jthread thr ATTRIBUTE_UNUSED,
+ jmethodID method,
+ jboolean was_popped_by_exception,
+ jvalue return_value) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(jnienv, jvmti,
+ jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ if (method == data->exit_method || method == data->enter_method || data->in_callback) {
+ // Don't do callback for either of these to prevent an infinite loop.
+ return;
+ }
+ data->in_callback = true;
+ jobject method_arg = GetJavaMethod(jvmti, jnienv, method);
+ jobject result =
+ was_popped_by_exception ? nullptr : GetJavaValue(jvmti, jnienv, method, return_value);
+ if (jnienv->ExceptionCheck()) {
+ data->in_callback = false;
+ return;
+ }
+ jnienv->CallStaticVoidMethod(data->test_klass,
+ data->exit_method,
+ method_arg,
+ was_popped_by_exception,
+ result);
+ jnienv->DeleteLocalRef(method_arg);
+ data->in_callback = false;
+}
+
+static void methodEntryCB(jvmtiEnv* jvmti,
+ JNIEnv* jnienv,
+ jthread thr ATTRIBUTE_UNUSED,
+ jmethodID method) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(jnienv, jvmti,
+ jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ if (method == data->exit_method || method == data->enter_method || data->in_callback) {
+ // Don't do callback for either of these to prevent an infinite loop.
+ return;
+ }
+ data->in_callback = true;
+ jobject method_arg = GetJavaMethod(jvmti, jnienv, method);
+ if (jnienv->ExceptionCheck()) {
+ return;
+ }
+ jnienv->CallStaticVoidMethod(data->test_klass, data->enter_method, method_arg);
+ jnienv->DeleteLocalRef(method_arg);
+ data->in_callback = false;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableMethodTracing(
+ JNIEnv* env,
+ jclass trace ATTRIBUTE_UNUSED,
+ jclass klass,
+ jobject enter,
+ jobject exit,
+ jthread thr) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->Allocate(sizeof(TraceData),
+ reinterpret_cast<unsigned char**>(&data)))) {
+ return;
+ }
+ memset(data, 0, sizeof(TraceData));
+ data->test_klass = reinterpret_cast<jclass>(env->NewGlobalRef(klass));
+ data->enter_method = env->FromReflectedMethod(enter);
+ data->exit_method = env->FromReflectedMethod(exit);
+ data->in_callback = false;
+
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEnvironmentLocalStorage(data))) {
+ return;
+ }
+
+ jvmtiEventCallbacks cb;
+ memset(&cb, 0, sizeof(cb));
+ cb.MethodEntry = methodEntryCB;
+ cb.MethodExit = methodExitCB;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_METHOD_ENTRY,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_METHOD_EXIT,
+ thr))) {
+ return;
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_disableMethodTracing(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+ if (JvmtiErrorToException(env, jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_METHOD_ENTRY,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env, jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_METHOD_EXIT,
+ thr))) {
+ return;
+ }
+}
+
+} // namespace common_trace
+
namespace common_redefine {
static void throwRedefinitionError(jvmtiEnv* jvmti,
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index e8e3cc7fa2..497db1cd3e 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -20,6 +20,7 @@
#include <fstream>
#include <stdio.h>
#include <sstream>
+#include <strstream>
#include "jvmti.h"
#include "exec_utils.h"
@@ -35,6 +36,8 @@ struct StressData {
std::string out_temp_dex;
std::string in_temp_dex;
bool vm_class_loader_initialized;
+ bool trace_stress;
+ bool redefine_stress;
};
static void WriteToFile(const std::string& fname, jint data_len, const unsigned char* data) {
@@ -95,7 +98,6 @@ static void doJvmtiMethodBind(jvmtiEnv* jvmtienv,
if (thread == nullptr) {
info.name = const_cast<char*>("<NULLPTR>");
} else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
- LOG(WARNING) << "Unable to get thread info!";
info.name = const_cast<char*>("<UNKNOWN THREAD>");
}
char *fname, *fsig, *fgen;
@@ -115,8 +117,8 @@ static void doJvmtiMethodBind(jvmtiEnv* jvmtienv,
env->DeleteLocalRef(klass);
return;
}
- LOG(INFO) << "Loading native method \"" << cname << "->" << fname << fsig << "\". Thread is "
- << info.name;
+ LOG(INFO) << "Loading native method \"" << cname << "->" << fname << fsig << "\". Thread is \""
+ << info.name << "\"";
jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
@@ -126,6 +128,151 @@ static void doJvmtiMethodBind(jvmtiEnv* jvmtienv,
return;
}
+static std::string GetName(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jobject obj) {
+ jclass klass = jnienv->GetObjectClass(obj);
+ char *cname, *cgen;
+ if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get class name!";
+ jnienv->DeleteLocalRef(klass);
+ return "<UNKNOWN>";
+ }
+ std::string name(cname);
+ if (name == "Ljava/lang/String;") {
+ jstring str = reinterpret_cast<jstring>(obj);
+ const char* val = jnienv->GetStringUTFChars(str, nullptr);
+ if (val == nullptr) {
+ name += " (unable to get value)";
+ } else {
+ std::ostringstream oss;
+ oss << name << " (value: \"" << val << "\")";
+ name = oss.str();
+ jnienv->ReleaseStringUTFChars(str, val);
+ }
+ }
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
+ jnienv->DeleteLocalRef(klass);
+ return name;
+}
+
+static std::string GetValOf(jvmtiEnv* env, JNIEnv* jnienv, std::string type, jvalue val) {
+ std::ostringstream oss;
+ switch (type[0]) {
+ case '[':
+ case 'L':
+ return val.l != nullptr ? GetName(env, jnienv, val.l) : "null";
+ case 'Z':
+ return val.z == JNI_TRUE ? "true" : "false";
+ case 'B':
+ oss << val.b;
+ return oss.str();
+ case 'C':
+ oss << val.c;
+ return oss.str();
+ case 'S':
+ oss << val.s;
+ return oss.str();
+ case 'I':
+ oss << val.i;
+ return oss.str();
+ case 'J':
+ oss << val.j;
+ return oss.str();
+ case 'F':
+ oss << val.f;
+ return oss.str();
+ case 'D':
+ oss << val.d;
+ return oss.str();
+ case 'V':
+ return "<void>";
+ default:
+ return "<ERROR Found type " + type + ">";
+ }
+}
+
+void JNICALL MethodExitHook(jvmtiEnv* jvmtienv,
+ JNIEnv* env,
+ jthread thread,
+ jmethodID m,
+ jboolean was_popped_by_exception,
+ jvalue val) {
+ jvmtiThreadInfo info;
+ if (thread == nullptr) {
+ info.name = const_cast<char*>("<NULLPTR>");
+ } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
+ // LOG(WARNING) << "Unable to get thread info!";
+ info.name = const_cast<char*>("<UNKNOWN THREAD>");
+ }
+ char *fname, *fsig, *fgen;
+ char *cname, *cgen;
+ jclass klass = nullptr;
+ if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get method declaring class!";
+ return;
+ }
+ if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get method name!";
+ env->DeleteLocalRef(klass);
+ return;
+ }
+ if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get class name!";
+ env->DeleteLocalRef(klass);
+ return;
+ }
+ std::string type(fsig);
+ type = type.substr(type.find(")") + 1);
+ std::string out_val(was_popped_by_exception ? "" : GetValOf(jvmtienv, env, type, val));
+ LOG(INFO) << "Leaving method \"" << cname << "->" << fname << fsig << "\". Thread is \""
+ << info.name << "\"." << std::endl
+ << " Cause: " << (was_popped_by_exception ? "exception" : "return ")
+ << out_val << ".";
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
+ env->DeleteLocalRef(klass);
+}
+
+void JNICALL MethodEntryHook(jvmtiEnv* jvmtienv,
+ JNIEnv* env,
+ jthread thread,
+ jmethodID m) {
+ jvmtiThreadInfo info;
+ if (thread == nullptr) {
+ info.name = const_cast<char*>("<NULLPTR>");
+ } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
+ info.name = const_cast<char*>("<UNKNOWN THREAD>");
+ }
+ char *fname, *fsig, *fgen;
+ char *cname, *cgen;
+ jclass klass = nullptr;
+ if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get method declaring class!";
+ return;
+ }
+ if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get method name!";
+ env->DeleteLocalRef(klass);
+ return;
+ }
+ if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to get class name!";
+ env->DeleteLocalRef(klass);
+ return;
+ }
+ LOG(INFO) << "Entering method \"" << cname << "->" << fname << fsig << "\". Thread is \""
+ << info.name << "\"";
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
+ jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
+ env->DeleteLocalRef(klass);
+}
+
// The hook we are using.
void JNICALL ClassFileLoadHookSecretNoOp(jvmtiEnv* jvmti,
JNIEnv* jni_env ATTRIBUTE_UNUSED,
@@ -163,27 +310,57 @@ void JNICALL ClassFileLoadHookSecretNoOp(jvmtiEnv* jvmti,
}
}
-// Options are ${DEXTER_BINARY},${TEMP_FILE_1},${TEMP_FILE_2}
+static std::string AdvanceOption(const std::string& ops) {
+ return ops.substr(ops.find(',') + 1);
+}
+
+static bool HasNextOption(const std::string& ops) {
+ return ops.find(',') != std::string::npos;
+}
+
+static std::string GetOption(const std::string& in) {
+ return in.substr(0, in.find(','));
+}
+
+// Options are
+// jvmti-stress,[redefine,${DEXTER_BINARY},${TEMP_FILE_1},${TEMP_FILE_2},][trace]
static void ReadOptions(StressData* data, char* options) {
std::string ops(options);
- data->dexter_cmd = ops.substr(0, ops.find(','));
- ops = ops.substr(ops.find(',') + 1);
- data->in_temp_dex = ops.substr(0, ops.find(','));
- ops = ops.substr(ops.find(',') + 1);
- data->out_temp_dex = ops;
+ CHECK_EQ(GetOption(ops), "jvmti-stress") << "Options should start with jvmti-stress";
+ do {
+ ops = AdvanceOption(ops);
+ std::string cur = GetOption(ops);
+ if (cur == "trace") {
+ data->trace_stress = true;
+ } else if (cur == "redefine") {
+ data->redefine_stress = true;
+ ops = AdvanceOption(ops);
+ data->dexter_cmd = GetOption(ops);
+ ops = AdvanceOption(ops);
+ data->in_temp_dex = GetOption(ops);
+ ops = AdvanceOption(ops);
+ data->out_temp_dex = GetOption(ops);
+ } else {
+ LOG(FATAL) << "Unknown option: " << GetOption(ops);
+ }
+ } while (HasNextOption(ops));
}
-// We need to make sure that VMClassLoader is initialized before we start redefining anything since
-// it can give (non-fatal) error messages if it's initialized after we've redefined BCP classes.
-// These error messages are expected and no problem but they will mess up our testing
-// infrastructure.
-static void JNICALL EnsureVMClassloaderInitializedCB(jvmtiEnv *jvmti_env,
- JNIEnv* jni_env,
- jthread thread ATTRIBUTE_UNUSED) {
+// Do final setup during the VMInit callback. By this time most things are all setup.
+static void JNICALL PerformFinalSetupVMInit(jvmtiEnv *jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread ATTRIBUTE_UNUSED) {
// Load the VMClassLoader class. We will get a ClassNotFound exception because we don't have
// visibility but the class will be loaded behind the scenes.
LOG(INFO) << "manual load & initialization of class java/lang/VMClassLoader!";
jclass klass = jni_env->FindClass("java/lang/VMClassLoader");
+ StressData* data = nullptr;
+ CHECK_EQ(jvmti_env->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)),
+ JVMTI_ERROR_NONE);
+ // We need to make sure that VMClassLoader is initialized before we start redefining anything
+ // since it can give (non-fatal) error messages if it's initialized after we've redefined BCP
+ // classes. These error messages are expected and no problem but they will mess up our testing
+ // infrastructure.
if (klass == nullptr) {
// Probably on RI. Clear the exception so we can continue but don't mark vmclassloader as
// initialized.
@@ -193,11 +370,20 @@ static void JNICALL EnsureVMClassloaderInitializedCB(jvmtiEnv *jvmti_env,
// GetMethodID is spec'd to cause the class to be initialized.
jni_env->GetMethodID(klass, "hashCode", "()I");
jni_env->DeleteLocalRef(klass);
- StressData* data = nullptr;
- CHECK_EQ(jvmti_env->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)),
- JVMTI_ERROR_NONE);
data->vm_class_loader_initialized = true;
}
+ if (data->trace_stress) {
+ if (jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_METHOD_ENTRY,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable JVMTI_EVENT_METHOD_ENTRY event!";
+ }
+ if (jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_METHOD_EXIT,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable JVMTI_EVENT_METHOD_EXIT event!";
+ }
+ }
}
extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
@@ -233,7 +419,9 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
memset(&cb, 0, sizeof(cb));
cb.ClassFileLoadHook = ClassFileLoadHookSecretNoOp;
cb.NativeMethodBind = doJvmtiMethodBind;
- cb.VMInit = EnsureVMClassloaderInitializedCB;
+ cb.VMInit = PerformFinalSetupVMInit;
+ cb.MethodEntry = MethodEntryHook;
+ cb.MethodExit = MethodExitHook;
if (jvmti->SetEventCallbacks(&cb, sizeof(cb)) != JVMTI_ERROR_NONE) {
LOG(ERROR) << "Unable to set class file load hook cb!";
return 1;
@@ -250,11 +438,13 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
LOG(ERROR) << "Unable to enable JVMTI_EVENT_VM_INIT event!";
return 1;
}
- if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
- JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
- nullptr) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to enable CLASS_FILE_LOAD_HOOK event!";
- return 1;
+ if (data->redefine_stress) {
+ if (jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
+ nullptr) != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable CLASS_FILE_LOAD_HOOK event!";
+ return 1;
+ }
}
return 0;
}
diff --git a/tools/add_package_property.sh b/tools/add_package_property.sh
new file mode 100644
index 0000000000..e9294a9ed2
--- /dev/null
+++ b/tools/add_package_property.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Sets the property of an Android package
+
+if [ "$#" -ne 2 ] ; then
+ echo "USAGE: sh add_package_property.sh [PACKAGE_NAME] [PROPERTY_SCRIPT_PATH]"
+ exit 1
+fi
+PACKAGE_NAME=$1
+PROPERTY_SCRIPT_PATH=$2
+PROPERTY_SCRIPT_NAME=`basename $PROPERTY_SCRIPT_PATH`
+adb push $PROPERTY_SCRIPT_PATH /data/data/$PACKAGE_NAME/
+adb shell chmod o+x /data/data/$PACKAGE_NAME/$PROPERTY_SCRIPT_NAME
+adb shell restorecon /data/data/$PACKAGE_NAME/$PROPERTY_SCRIPT_NAME
+adb shell setprop wrap.$PACKAGE_NAME /data/data/$PACKAGE_NAME/$PROPERTY_SCRIPT_NAME
diff --git a/tools/asan.sh b/tools/asan.sh
new file mode 100644
index 0000000000..b74954510f
--- /dev/null
+++ b/tools/asan.sh
@@ -0,0 +1,21 @@
+#!/system/bin/sh
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# NOTE: This script is used by add_package_property.sh and not meant to be executed directly
+#
+# This script contains the property and the options required to log poisoned
+# memory accesses (found in logcat)
+ASAN_OPTIONS=halt_on_error=0:verbosity=0:print_legend=0:print_full_thread_history=0:print_stats=0:print_summary=0:suppress_equal_pcs=0:fast_unwind_on_fatal=1 asanwrapper $@
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 963efa49a5..bf7692ab15 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -30,8 +30,13 @@ else
out_dir=${OUT_DIR}
fi
+using_jack=true
+if [[ $ANDROID_COMPILE_WITH_JACK == false ]]; then
+ using_jack=false
+fi
+
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target ${out_dir}/host/linux-x86/bin/jack"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target"
mode="target"
j_arg="-j$(nproc)"
showcommands=
@@ -58,6 +63,10 @@ while true; do
fi
done
+if $using_jack; then
+ common_targets="$common_targets ${out_dir}/host/linux-x86/bin/jack"
+fi
+
if [[ $mode == "host" ]]; then
make_command="make $j_arg $showcommands build-art-host-tests $common_targets"
make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "
diff --git a/tools/cpp-define-generator/offset_runtime.def b/tools/cpp-define-generator/offset_runtime.def
index 17167a0605..41e7e40af5 100644
--- a/tools/cpp-define-generator/offset_runtime.def
+++ b/tools/cpp-define-generator/offset_runtime.def
@@ -17,7 +17,8 @@
// Offsets within ShadowFrame.
#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "runtime.h" // art::Runtime
+#include "base/callee_save_type.h" // art::CalleeSaveType
+#include "runtime.h" // art::Runtime
#endif
#include "common.def" // DEFINE_OFFSET_EXPR
@@ -25,17 +26,20 @@
// Note: these callee save methods loads require read barriers.
#define DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(field_name, constant_name) \
- DEFINE_OFFSET_EXPR(Runtime, field_name ## _METHOD, size_t, art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: constant_name))
+ DEFINE_OFFSET_EXPR(Runtime, \
+ field_name ## _METHOD, \
+ size_t, \
+ art::Runtime::GetCalleeSaveMethodOffset(constant_name))
// Macro substring Constant name
// Offset of field Runtime::callee_save_methods_[kSaveAllCalleeSaves]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_ALL_CALLEE_SAVES, kSaveAllCalleeSaves)
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_ALL_CALLEE_SAVES, art::CalleeSaveType::kSaveAllCalleeSaves)
// Offset of field Runtime::callee_save_methods_[kSaveRefsOnly]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_ONLY, kSaveRefsOnly)
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_ONLY, art::CalleeSaveType::kSaveRefsOnly)
// Offset of field Runtime::callee_save_methods_[kSaveRefsAndArgs]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_AND_ARGS, kSaveRefsAndArgs)
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_AND_ARGS, art::CalleeSaveType::kSaveRefsAndArgs)
// Offset of field Runtime::callee_save_methods_[kSaveEverything]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING, kSaveEverything)
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING, art::CalleeSaveType::kSaveEverything)
#undef DEFINE_RUNTIME_CALLEE_SAVE_OFFSET
#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/dexfuzz/src/dexfuzz/program/Mutation.java b/tools/dexfuzz/src/dexfuzz/program/Mutation.java
index 2eba7181c9..436fcc4a98 100644
--- a/tools/dexfuzz/src/dexfuzz/program/Mutation.java
+++ b/tools/dexfuzz/src/dexfuzz/program/Mutation.java
@@ -39,8 +39,21 @@ public abstract class Mutation {
this.mutatableCode = mutatableCode;
this.mutatableCodeIdx = mutatableCode.mutatableCodeIdx;
}
-
+ /**
+ * Serializes the field(s) of the mutation to string format.
+ * The fields are separated by a space.
+ * @return the serialized string representation of the field(s) of the mutation.
+ */
public abstract String getString();
+ /**
+ * Deserializes the strings back to the field(s) of the mutation,
+ * given a string array as its argument. The string array
+ * contains the individual elements which were previously constructed by
+ * getstring() method. elements[0] stores the class name and elements[1]
+ * stores the mutable code index which are predefined in MutationSerializer.java.
+ * Users can deserialize the string representation in elements[2] and so forth.
+ * @param elements string array with serialized representations of the field(s) of the mutation.
+ */
public abstract void parseString(String[] elements);
} \ No newline at end of file
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 07d7fb8e3a..0c58585701 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -209,5 +209,12 @@
modes: [device],
names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
"libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
+},
+{
+ description: "Test is timing sensitive",
+ result: EXEC_FAILED,
+ bug: 62528691,
+ modes: [device],
+ names: ["libcore.java.util.TimeZoneTest#testSetDefaultRace"]
}
]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index d48d8579be..f7427676eb 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -23,10 +23,24 @@ if [ -z "$ANDROID_HOST_OUT" ] ; then
ANDROID_HOST_OUT=${OUT_DIR-$ANDROID_BUILD_TOP/out}/host/linux-x86
fi
+using_jack=true
+if [[ $ANDROID_COMPILE_WITH_JACK == false ]]; then
+ using_jack=false
+fi
+
+function jlib_suffix {
+ local str=$1
+ local suffix="jar"
+ if $using_jack; then
+ suffix="jack"
+ fi
+ echo "$str.$suffix"
+}
+
# Jar containing all the tests.
-test_jack=${ANDROID_HOST_OUT}/../common/obj/JAVA_LIBRARIES/apache-harmony-jdwp-tests-hostdex_intermediates/classes.jack
+test_jar=$(jlib_suffix "${ANDROID_HOST_OUT}/../common/obj/JAVA_LIBRARIES/apache-harmony-jdwp-tests-hostdex_intermediates/classes")
-if [ ! -f $test_jack ]; then
+if [ ! -f $test_jar ]; then
echo "Before running, you must build jdwp tests and vogar:" \
"make apache-harmony-jdwp-tests-hostdex vogar"
exit 1
@@ -147,6 +161,12 @@ if [[ $verbose == "yes" ]]; then
art_debugee="$art_debugee -verbose:jdwp"
fi
+if $using_jack; then
+ toolchain_args="--toolchain jack --language JN --jack-arg -g"
+else
+ toolchain_args="--toolchain jdk --language CUR"
+fi
+
# Run the tests using vogar.
vogar $vm_command \
$vm_args \
@@ -160,10 +180,9 @@ vogar $vm_command \
--vm-arg -Djpda.settings.waitingTime=$jdwp_test_timeout \
--vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
--vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $image $debuggee_args" \
- --classpath $test_jack \
- --toolchain jack --language JN \
+ --classpath "$test_jar" \
+ $toolchain_args \
--vm-arg -Xcompiler-option --vm-arg --debuggable \
- --jack-arg -g \
$test
vogar_exit_status=$?
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index b860a6273f..f9f375457b 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -25,10 +25,26 @@ else
JAVA_LIBRARIES=${ANDROID_PRODUCT_OUT}/../../common/obj/JAVA_LIBRARIES
fi
+using_jack=true
+if [[ $ANDROID_COMPILE_WITH_JACK == false ]]; then
+ using_jack=false
+fi
+
+function classes_jar_path {
+ local var="$1"
+ local suffix="jar"
+
+ if $using_jack; then
+ suffix="jack"
+ fi
+
+ echo "${JAVA_LIBRARIES}/${var}_intermediates/classes.${suffix}"
+}
+
function cparg {
for var
do
- printf -- "--classpath ${JAVA_LIBRARIES}/${var}_intermediates/classes.jack ";
+ printf -- "--classpath $(classes_jar_path "$var") ";
done
}
@@ -36,7 +52,7 @@ DEPS="core-tests jsr166-tests mockito-target"
for lib in $DEPS
do
- if [ ! -f "${JAVA_LIBRARIES}/${lib}_intermediates/classes.jack" ]; then
+ if [[ ! -f "$(classes_jar_path "$lib")" ]]; then
echo "${lib} is missing. Before running, you must run art/tools/buildbot-build.sh"
exit 1
fi
@@ -122,8 +138,12 @@ done
# the default timeout.
vogar_args="$vogar_args --timeout 480"
-# Use Jack with "1.8" configuration.
-vogar_args="$vogar_args --toolchain jack --language JO"
+# Switch between using jack or javac+desugar+dx
+if $using_jack; then
+ vogar_args="$vogar_args --toolchain jack --language JO"
+else
+ vogar_args="$vogar_args --toolchain jdk --language CUR"
+fi
# JIT settings.
if $use_jit; then